iOS 摄像头操作
通过[AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]可以获取到所有摄像头设备
"AVCaptureFigVideoDevice: 0x14fd07620 [Back Camera][com.apple.avfoundation.avcapturedevice.built-in_video:0]", "AVCaptureFigVideoDevice: 0x14fd079e0 [Front Camera][com.apple.avfoundation.avcapturedevice.built-in_video:1]",
PS: 从上面可以看出分别包含后置摄像头、前置摄像头
摄像头数据获取及显示
- (void)initCapture { AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:[AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo][1] error:nil]; AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init]; captureOutput.alwaysDiscardsLateVideoFrames = YES; //captureOutput.minFrameDuration = CMTimeMake(1, 10); dispatch_queue_t queue; queue = dispatch_queue_create("cameraQueue", NULL); [captureOutput setSampleBufferDelegate:self queue:queue]; NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; [captureOutput setVideoSettings:videoSettings]; self.captureSession = [[AVCaptureSession alloc] init]; [self.captureSession addInput:captureInput]; [self.captureSession addOutput:captureOutput]; [self.captureSession startRunning]; self.customLayer = [CALayer layer]; self.customLayer.frame = self.view.bounds; self.customLayer.transform = CATransform3DRotate( CATransform3DIdentity, M_PI/2.0f, 0, 0, 1); self.customLayer.contentsGravity = kCAGravityResizeAspectFill; [self.view.layer addSublayer:self.customLayer]; self.imageView = [[UIImageView alloc] init]; self.imageView.frame = CGRectMake(0, 0, 100, 100); [self.view addSubview:self.imageView]; self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession]; self.prevLayer.frame = CGRectMake(100, 0, 100, 100); self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill; [self.view.layer addSublayer: self.prevLayer]; } #pragma mark - #pragma mark AVCaptureSession delegate - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection { CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CVPixelBufferLockBaseAddress(imageBuffer,0); uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); size_t width = CVPixelBufferGetWidth(imageBuffer); size_t height = CVPixelBufferGetHeight(imageBuffer); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGImageRef newImage = CGBitmapContextCreateImage(newContext); CGContextRelease(newContext); CGColorSpaceRelease(colorSpace); [self.customLayer performSelectorOnMainThread:@selector(setContents:) withObject: (__bridge id) newImage waitUntilDone:YES]; UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationRight]; CGImageRelease(newImage); [self.imageView performSelectorOnMainThread:@selector(setImage:) withObject:image waitUntilDone:YES]; CVPixelBufferUnlockBaseAddress(imageBuffer,0); }
总结
从以上代码可以看出是通过在AVCaptureSession添加input和output源,startRunning后,通过代理接口来获取图像数据,并通过layer来显示图像数据.