speech
AVSpeechUtterance 語音話語
包括: 語種(eg:zh-CN)、內容(文字)
@property(nonatomic) NSTimeInterval preUtteranceDelay; // Default is 0.0 讀一段話之前的停頓
@property(nonatomic) NSTimeInterval postUtteranceDelay; // Default is 0.0 讀完一段后的停頓時間
@property(nonatomic) float rate;// Values are pinned between AVSpeechUtteranceMinimumSpeechRate and AVSpeechUtteranceMaximumSpeechRate.
@property(nonatomic) float pitchMultiplier; // [0.5 - 2] Default = 1 音高
@property(nonatomic) float volume; // [0-1] Default = 1
初始化
+ (instancetype)speechUtteranceWithString:(NSString *)string;
+ (instancetype)speechUtteranceWithAttributedString:(NSAttributedString *)string API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
- (instancetype)initWithString:(NSString *)string;
- (instancetype)initWithAttributedString:(NSAttributedString *)string API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
創建發言對象通過內容
@property(nonatomic, readonly) NSString *speechString;
@property(nonatomic, readonly) NSAttributedString *attributedSpeechString API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
應該是10 之后-支持了一些東西;
@property(nonatomic, retain, nullable) AVSpeechSynthesisVoice *voice;
聲明為retain ,其實應該對應的就是一個位置的結構體類似;
語種的設置
@interface AVSpeechSynthesisVoice : NSObject<NSSecureCoding>
+ (NSArray<AVSpeechSynthesisVoice *> *)speechVoices; // 所有的餓支持的語種
+ (NSString *)currentLanguageCode; // 語種的代碼
初始化 (通過語種代碼)
+ (nullable AVSpeechSynthesisVoice *)voiceWithLanguage:(nullable NSString *)languageCode;
這個方式也是通過identifier來進行初始化; (應該是可以進行自定義一些內容)
+ (nullable AVSpeechSynthesisVoice *)voiceWithIdentifier:(NSString *)identifier NS_AVAILABLE_IOS(9_0);
讀取的屬性
@property(nonatomic, readonly) NSString *language;
@property(nonatomic, readonly) NSString *identifier NS_AVAILABLE_IOS(9_0);
@property(nonatomic, readonly) NSString *name NS_AVAILABLE_IOS(9_0);
@property(nonatomic, readonly) AVSpeechSynthesisVoiceQuality quality NS_AVAILABLE_IOS(9_0);
@end
AVSpeechSynthesizer 語音合成器
@property(nonatomic, weak, nullable) id<AVSpeechSynthesizerDelegate> delegate; 代理
@property(nonatomic, readonly, getter=isSpeaking) BOOL speaking; // 進行
@property(nonatomic, readonly, getter=isPaused) BOOL paused; // 暫停
- (void)speakUtterance:(AVSpeechUtterance *)utterance;
播放語音
停止播放語音的邊界,打斷當前的語音和清除隊列;
/* Call stopSpeakingAtBoundary: to interrupt current speech and clear the queue. */
- (BOOL)stopSpeakingAtBoundary:(AVSpeechBoundary)boundary;
- (BOOL)pauseSpeakingAtBoundary:(AVSpeechBoundary)boundary;
- (BOOL)continueSpeaking;
typedef NS_ENUM(NSInteger, AVSpeechBoundary) {
AVSpeechBoundaryImmediate, //立刻停止
AVSpeechBoundaryWord // 讀完最后一個字停止
} NS_ENUM_AVAILABLE_IOS(7_0);
@property(nonatomic, retain, nullable) NSArray<AVAudioSessionChannelDescription *> *outputChannels API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0));
指定輸出的渠道,這里是audiochannel 在當前的audio 的rote上;復制到指定的渠道里面
常量
個人測試的值是: 最小是0 ,最大是1.0 ,默認是0.5
覺得這里不夠清楚,可以看官方文檔,這里是對官方文檔的一些理解