cisen / blog

Time waits for no one.
135 stars 20 forks source link

AVFoundation #31

Open cisen opened 6 years ago

cisen commented 6 years ago

实例

  1. 一个基础能用版,需10.0以上和权限协议统一 在plist添加
    <key>NSCameraUsageDescription</key>
    <string>App需要您的同意,才能访问相机</string>

    在swift文件里:

import UIKit import AVFoundation

class Camera: UIViewController,AVCaptureMetadataOutputObjectsDelegate,AVCapturePhotoCaptureDelegate {

@IBOutlet weak var previewView: UIView!

// @IBOutlet weak var captureButton: UIButton! @IBOutlet weak var messageLabel: UILabel!

var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?

var capturePhotoOutput: AVCapturePhotoOutput?
var qrCodeFrameView: UIView?

override func viewDidLoad() {
    super.viewDidLoad()
    let previewView = UIView(frame: CGRect(x: 20, y: 20, width: 50, height: 50))
    previewView.backgroundColor = UIColor.green
    self.view.addSubview(previewView)

// captureButton.layer.cornerRadius = captureButton.frame.size.width / 2 // captureButton.clipsToBounds = true

    // Get an instance of the AVCaptureDevice class to initialize a device object and provide the video as the media type parameter
    guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
        fatalError("No video device found")
    }

    do {
        // Get an instance of the AVCaptureDeviceInput class using the previous deivce object
        let input = try AVCaptureDeviceInput(device: captureDevice)

        // Initialize the captureSession object
        captureSession = AVCaptureSession()

        // Set the input devcie on the capture session
        captureSession?.addInput(input)

        // Get an instance of ACCapturePhotoOutput class
        capturePhotoOutput = AVCapturePhotoOutput()
        capturePhotoOutput?.isHighResolutionCaptureEnabled = true

        // Set the output on the capture session
        captureSession?.addOutput(capturePhotoOutput!)

        // Initialize a AVCaptureMetadataOutput object and set it as the input device
        let captureMetadataOutput = AVCaptureMetadataOutput()
        captureSession?.addOutput(captureMetadataOutput)

        // Set delegate and use the default dispatch queue to execute the call back
        captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
        captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]

        //Initialise the video preview layer and add it as a sublayer to the viewPreview view's layer
        videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
        videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
        videoPreviewLayer?.frame = view.layer.bounds
        previewView.layer.addSublayer(videoPreviewLayer!)

        //start video capture
        captureSession?.startRunning()

// messageLabel.isHidden = true

        //Initialize QR Code Frame to highlight the QR code
        qrCodeFrameView = UIView()

        if let qrCodeFrameView = qrCodeFrameView {
            qrCodeFrameView.layer.borderColor = UIColor.green.cgColor
            qrCodeFrameView.layer.borderWidth = 2
            view.addSubview(qrCodeFrameView)
            view.bringSubview(toFront: qrCodeFrameView)
        }
    } catch {
        //If any error occurs, simply print it out
        print(error)
        return
    }

}

override func viewDidLayoutSubviews() {
    videoPreviewLayer?.frame = view.bounds
    if let previewLayer = videoPreviewLayer ,(previewLayer.connection?.isVideoOrientationSupported)! {
        previewLayer.connection?.videoOrientation = UIApplication.shared.statusBarOrientation.videoOrientation ?? .portrait
    }
}

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}

@IBAction func onTapTakePhoto(_ sender: Any) {
    // Make sure capturePhotoOutput is valid
    guard let capturePhotoOutput = self.capturePhotoOutput else { return }

    // Get an instance of AVCapturePhotoSettings class
    let photoSettings = AVCapturePhotoSettings()

    // Set photo settings for our need
    photoSettings.isAutoStillImageStabilizationEnabled = true
    photoSettings.isHighResolutionPhotoEnabled = true
    photoSettings.flashMode = .auto

    // Call capturePhoto method by passing our photo settings and a delegate implementing AVCapturePhotoCaptureDelegate
    capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}

}

extension ViewController : AVCapturePhotoCaptureDelegate { func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) { // Make sure we get some photo sample buffer guard error == nil, let photoSampleBuffer = photoSampleBuffer else { print("Error capturing photo: (String(describing: error))") return }

    // Convert photo same buffer to a jpeg image data by using AVCapturePhotoOutput
    guard let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
        return
    }

    // Initialise an UIImage with our image data
    let capturedImage = UIImage.init(data: imageData , scale: 1.0)
    if let image = capturedImage {
        // Save our captured image to photos album
        UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
    }
}

}

extension ViewController : AVCaptureMetadataOutputObjectsDelegate { func metadataOutput(_ captureOutput: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) { // Check if the metadataObjects array is contains at least one object. // if metadataObjects.count == 0 { // qrCodeFrameView?.frame = CGRect.zero // messageLabel.isHidden = true // return // } // // // Get the metadata object. // let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject // // if metadataObj.type == AVMetadataObject.ObjectType.qr { // // If the found metadata is equal to the QR code metadata then update the status label's text and set the bounds // let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj) // qrCodeFrameView?.frame = barCodeObject!.bounds // // if metadataObj.stringValue != nil { // messageLabel.isHidden = false // messageLabel.text = metadataObj.stringValue // } // } } }

extension UIInterfaceOrientation { var videoOrientation: AVCaptureVideoOrientation? { switch self { case .portraitUpsideDown: return .portraitUpsideDown case .landscapeRight: return .landscapeRight case .landscapeLeft: return .landscapeLeft case .portrait: return .portrait default: return nil } } }

cisen commented 6 years ago

一个调用该函数说话的例子

import AVFoundation

class SpeechModal {
    // 咱有个会说话的人了,也知道是哪国语言了,也知道说些什么了,那咋就开始说吧,于是我们有了一个函数beginSpeech()
    final func beginSpeech() {

        for i in 0 ..< self.speechStrings.count {
            let utterance = AVSpeechUtterance.init(string: self.speechStrings[i] as! String)
            // 有哪些语言
            utterance.voice = self.voices[i % 2] as? AVSpeechSynthesisVoice
            // 说话的速率 0.0 - 1.0
            utterance.rate = 0.5
            // 说话的音调 0.5 - 2.0
            utterance.pitchMultiplier = 0.5
            self.synthesizer.speak(utterance)
        }
    }
    //  首先,我们需要一个说话的人,所以我们需要一个会说话的对象
    //  AVSpeechSynthesizer:用于说话的对象
    private lazy var synthesizer: AVSpeechSynthesizer = {
        let temp = AVSpeechSynthesizer.init()
        return temp
    }()
    //  其次,你要我说什么啊,要我怎么说啊
    //  于是我们创建了一个语言数组 使用AVSpeechSynthesisVoice
    private lazy var voices: NSArray = {
        let temp : NSArray = [AVSpeechSynthesisVoice.init(language: "zh-CN")!
            ,AVSpeechSynthesisVoice.init(language: "en-US")!
        ]
        return temp
    }()
    // 你得告诉我说什么,对吧。行,那咱就说这个吧
    lazy var speechStrings: NSArray = {
        let temp : NSArray = ["我爱你","Hello World !","你说你是不是傻","Are you stop angry now ?"]
        return temp
    }()
}

最后在任意地方调用SpeechModal.init().beginSpeech()即可