Open cisen opened 6 years ago
一个调用该函数说话的例子
import AVFoundation
class SpeechModal {
// 咱有个会说话的人了,也知道是哪国语言了,也知道说些什么了,那咋就开始说吧,于是我们有了一个函数beginSpeech()
final func beginSpeech() {
for i in 0 ..< self.speechStrings.count {
let utterance = AVSpeechUtterance.init(string: self.speechStrings[i] as! String)
// 有哪些语言
utterance.voice = self.voices[i % 2] as? AVSpeechSynthesisVoice
// 说话的速率 0.0 - 1.0
utterance.rate = 0.5
// 说话的音调 0.5 - 2.0
utterance.pitchMultiplier = 0.5
self.synthesizer.speak(utterance)
}
}
// 首先,我们需要一个说话的人,所以我们需要一个会说话的对象
// AVSpeechSynthesizer:用于说话的对象
private lazy var synthesizer: AVSpeechSynthesizer = {
let temp = AVSpeechSynthesizer.init()
return temp
}()
// 其次,你要我说什么啊,要我怎么说啊
// 于是我们创建了一个语言数组 使用AVSpeechSynthesisVoice
private lazy var voices: NSArray = {
let temp : NSArray = [AVSpeechSynthesisVoice.init(language: "zh-CN")!
,AVSpeechSynthesisVoice.init(language: "en-US")!
]
return temp
}()
// 你得告诉我说什么,对吧。行,那咱就说这个吧
lazy var speechStrings: NSArray = {
let temp : NSArray = ["我爱你","Hello World !","你说你是不是傻","Are you stop angry now ?"]
return temp
}()
}
最后在任意地方调用SpeechModal.init().beginSpeech()
即可
实例
在swift文件里:
import UIKit import AVFoundation
class Camera: UIViewController,AVCaptureMetadataOutputObjectsDelegate,AVCapturePhotoCaptureDelegate {
// @IBOutlet weak var captureButton: UIButton! @IBOutlet weak var messageLabel: UILabel!
// captureButton.layer.cornerRadius = captureButton.frame.size.width / 2 // captureButton.clipsToBounds = true
// messageLabel.isHidden = true
}
extension ViewController : AVCapturePhotoCaptureDelegate { func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) { // Make sure we get some photo sample buffer guard error == nil, let photoSampleBuffer = photoSampleBuffer else { print("Error capturing photo: (String(describing: error))") return }
}
extension ViewController : AVCaptureMetadataOutputObjectsDelegate { func metadataOutput(_ captureOutput: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) { // Check if the metadataObjects array is contains at least one object. // if metadataObjects.count == 0 { // qrCodeFrameView?.frame = CGRect.zero // messageLabel.isHidden = true // return // } // // // Get the metadata object. // let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject // // if metadataObj.type == AVMetadataObject.ObjectType.qr { // // If the found metadata is equal to the QR code metadata then update the status label's text and set the bounds // let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj) // qrCodeFrameView?.frame = barCodeObject!.bounds // // if metadataObj.stringValue != nil { // messageLabel.isHidden = false // messageLabel.text = metadataObj.stringValue // } // } } }
extension UIInterfaceOrientation { var videoOrientation: AVCaptureVideoOrientation? { switch self { case .portraitUpsideDown: return .portraitUpsideDown case .landscapeRight: return .landscapeRight case .landscapeLeft: return .landscapeLeft case .portrait: return .portrait default: return nil } } }