In previous versions of swift, you create a AVCaptureVideoPreviewLayer in a ViewController and add it to the default view using view.layer.addSublayer(previewLayer).
How is this done in SwiftUI ContentView? None of the View types in SwiftUI appear to have an addSublayer. There is no Text("Hello World").layer.addSublayer....
I have tried adding previewLayer to various views in ContentView
import Foundation
import AVFoundation
import Combine
import SwiftUI
class Scanner: NSObject, AVCaptureMetadataOutputObjectsDelegate, ObservableObject {
@Published var captureSession: AVCaptureSession!
@Published var previewLayer: AVCaptureVideoPreviewLayer!
@Published var previewView: UIView
override init() {
captureSession = AVCaptureSession()
previewLayer = nil
//previewView = UIView()
super.init()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
//previewView.layer.addSublayer(previewLayer)
}
import SwiftUI
import Combine
struct ContentView: View {
@ObservedObject var scanner = Scanner()
var body: some View {
//Text("Hello World").layer.addSublayer(scanner.previewLayer)
//Text("")
Text("HelloWorld")//.addSublayer(scanner.previewLayer))
//.previewLayout(scanner.previewLayer)
.layer.addSublayer(scanner.previewLayer)
//.previewLayout(scanner.previewLayer)
//.overlay(scanner.previewView)
scanner.captureSession.startRunning()
}
}
Compile errors trying to add previewLayer
You can't add a layer directly. That why people currently bottle up the whole thing inside UIView(Controller)Representable
like many other things.
I manage to put the captured image onto the SwiftUI view
In the view component, simply put a
Image(uiImage: cameraManager.capturedImage)
And for CameraManager
, using frame capture function, after transforming the sample buffer to UIImage, simply set the capturedImage
to the uiImage
.
(Referring to https://medium.com/ios-os-x-development/ios-camera-frames-extraction-d2c0f80ed05a)
class CameraManager: NSObject, ObservableObject{
...
@Published public var capturedImage: UIImage = UIImage()
...
}
extension CameraManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// transforming sample buffer to UIImage
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let uiImage = UIImage(cgImage: cgImage)
// publishing changes to the main thread
DispatchQueue.main.async {
self.capturedImage = uiImage
}
}
}
I was intended to capture every frame for later image processing, although it can preview now, I'm not sure about by adding a computer vision algorithm, will it affect the frame capturing. Till now, feel running the model will be on another thread, so...
Well, open to comment
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With