info.plist
project:<key>NSAppTransportSecurity</key><dict><key>NSExceptionDomains</key><dict><key>qcloud.com</key><dict><key>NSExceptionAllowsInsecureHTTPLoads</key><true/><key>NSExceptionMinimumTLSVersion</key><string>TLSv1.2</string><key>NSIncludesSubdomains</key><true/><key>NSRequiresCertificateTransparency</key><false/></dict></dict></dict>
<key>NSMicrophoneUsageDescription</key><string> Your mic is required to capture audios </string>
#import<QCloudSDK/QCloudSDK.h>
//1. Create QCloudConfig instanceQCloudConfig *config = [[QCloudConfig alloc] initWithAppId:kQDAppIdsecretId:kQDSecretIdsecretKey:kQDSecretKeyprojectId:kQDProjectId];config.sliceTime = 600; //Voice segmentation duration 600msconfig.enableDetectVolume = YES; //Detect volume or notconfig.endRecognizeWhenDetectSilence = YES; //Stop recognition when silence is detected
QCloudRealTimeRecognizer *recognizer = [[QCloudRealTimeRecognizer alloc] initWithConfig:config];
recognizer.delegate = self;
[recognizer start];
[recognizer stop];
#import<QCloudSDK/QCloudSDK.h>
//1. Create QCloudConfig instanceQCloudConfig *config = [[QCloudConfig alloc] initWithAppId:kQDAppIdsecretId:kQDSecretIdsecretKey:kQDSecretKeyprojectId:kQDProjectId];config.sliceTime = 600; //Voice segmentation duration 600msconfig.enableDetectVolume = YES; //Detect volume or notconfig.endRecognizeWhenDetectSilence = YES; //Stop recognition when silence is detected
QCloudDemoAudioDataSource *dataSource = [[QCloudDemoAudioDataSource alloc] init];
QCloudRealTimeRecognizer *recognizer = [[QCloudRealTimeRecognizer alloc] initWithConfig:config dataSource:dataSource];
recognizer.delegate = self;
[recognizer start];
[recognizer stop];
/*** Initialization method where the built-in recorder is used to capture audios* @param config Configuration parameters, see QCloudConfig Definition*/- (instancetype)initWithConfig:(QCloudConfig *)config;/*** Initialization method, where the caller passes voice data to call this initialization method* @param config Configuration parameters, see QCloudConfig Definition* @param dataSource Voice data source which must implement QCloudAudioDataSource protocol*/- (instancetype)initWithConfig:(QCloudConfig *)config dataSource:(id<QCloudAudioDataSource>)dataSource;
/*** Initialization method - direct authentication* @param appid Tencent Cloud `appId`* @param secretId Tencent Cloud `secretId`* @param secretKey Tencent Cloud `secretKey`* @param projectId Tencent Cloud `projectId`*/- (instancetype)initWithAppId:(NSString *)appidsecretId:(NSString *)secretIdsecretKey:(NSString *)secretKeyprojectId:(NSString *)projectId;/*** Initialization method - authentication through STS temporary credentials* @param appid Tencent Cloud `appId`* @param secretId Tencent Cloud temporary `secretId`* @param secretKey Tencent Cloud temporary `secretKey`* @param token Corresponding `token`*/- (instancetype)initWithAppId:(NSString *)appidsecretId:(NSString *)secretIdsecretKey:(NSString *)secretKeytoken:(NSString *)token;
/*** Real-time recording recognition is divided into multiple flows. Each flow can be understood as a sentence. A recognition session can include multiple sentences.* Each flow contains multiple seq voice data packets. Each flow's seq starts from 0*/@protocol QCloudRealTimeRecognizerDelegate <NSObject>@required/*** Fragmented recognition result of each voice packet* @param response Recognition result of the voice fragment*/- (void)realTimeRecognizerOnSliceRecognize:(QCloudRealTimeRecognizer *)recognizer response:(QCloudRealTimeResponse *)response;@optional/*** Callback for a successful single recognition@param recognizer Real-time ASR instance@param result Total text from a single recognition*/- (void)realTimeRecognizerDidFinish:(QCloudRealTimeRecognizer *)recognizer result:(NSString *)result;/*** Callback for a failed single recognition* @param recognizer Real-time ASR instance* @param error Error message* @param voiceId If the error is returned from the backend, include voiceId*/- (void)realTimeRecognizerDidError:(QCloudRealTimeRecognizer *)recognizer error:(NSError *)error voiceId:(NSString * _Nullable) voiceId;///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////*** Callback for recording start* @param recognizer Real-time ASR instance* @param error Failed to start recording, error message*/- (void)realTimeRecognizerDidStartRecord:(QCloudRealTimeRecognizer *)recognizer error:(NSError *)error;/*** Callback for recording end* @param recognizer Real-time ASR instance*/- (void)realTimeRecognizerDidStopRecord:(QCloudRealTimeRecognizer *)recognizer;/*** Real-Time callback for recording volume* @param recognizer Real-time ASR instance* @param volume Audio volume level in the range of -40 to 0*/- (void)realTimeRecognizerDidUpdateVolume:(QCloudRealTimeRecognizer *)recognizer volume:(float)volume;///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////*** Start recognition of the voice stream* @param recognizer Real-time ASR instance* @param voiceId The voiceId corresponding to the voice stream, a unique identifier* @param seq The sequence number of the flow*/- (void)realTimeRecognizerOnFlowRecognizeStart:(QCloudRealTimeRecognizer *)recognizer voiceId:(NSString *)voiceId seq:(NSInteger)seq;/*** End recognition of the voice stream* @param recognizer Real-time ASR instance* @param voiceId The voiceId corresponding to the voice stream, a unique identifier* @param seq The sequence number of the flow*/- (void)realTimeRecognizerOnFlowRecognizeEnd:(QCloudRealTimeRecognizer *)recognizer voiceId:(NSString *)voiceId seq:(NSInteger)seq;///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////*** Voice stream recognition started* @param recognizer Real-time ASR instance* @param voiceId The voiceId corresponding to the voice stream, a unique identifier* @param seq The sequence number of the flow*/- (void)realTimeRecognizerOnFlowStart:(QCloudRealTimeRecognizer *)recognizer voiceId:(NSString *)voiceId seq:(NSInteger)seq;/*** Voice stream recognition ended* @param recognizer Real-time ASR instance* @param voiceId The voiceId corresponding to the voice stream, a unique identifier* @param seq The sequence number of the flow*/- (void)realTimeRecognizerOnFlowEnd:(QCloudRealTimeRecognizer *)recognizer voiceId:(NSString *)voiceId seq:(NSInteger)seq;@end
/*** Voice data source. If you need to provide your own voice data, implement all methods in this protocol* Provide voice data that meets the following requirements:* Sampling rate: 16k* Audio format: PCM* Encoding: 16-bit single channel*/@protocol QCloudAudioDataSource <NSObject>@required/*** Indicates whether the data source is working. Set to YES after executing start, set to NO after executing stop*/@property (nonatomic, assign) BOOL running;/*** The SDK will call the start method. Classes implementing this protocol need to initialize the data source.*/- (void)start:(void(^)(BOOL didStart, NSError *error))completion;/*** The SDK will call the stop method. Classes implementing this protocol need to stop providing data*/- (void)stop;/*** The SDK will call this method on the object implementing this protocol to read voice data. If there is not enough voice data for the expected length, it should return nil.* @param expectLength The expected number of bytes to read. If the returned NSData is less than the expected length, the SDK will throw an exception.*/- (nullable NSData *)readData:(NSInteger)expectLength;@end
Was this page helpful?