What is the most efficient way to capture frames from a MTKView
? If possible, I would like to save a .mov file from the frames in realtime. Is it possible to render into an AVPlayer frame or something?
It is currently drawing with this code (based on @warrenm PerformanceShaders project):
func draw(in view: MTKView) {
_ = inflightSemaphore.wait(timeout: DispatchTime.distantFuture)
updateBuffers()
let commandBuffer = commandQueue.makeCommandBuffer()
commandBuffer.addCompletedHandler{ [weak self] commandBuffer in
if let strongSelf = self {
strongSelf.inflightSemaphore.signal()
}
}
// Dispatch the current kernel to perform the selected image filter
selectedKernel.encode(commandBuffer: commandBuffer,
sourceTexture: kernelSourceTexture!,
destinationTexture: kernelDestTexture!)
if let renderPassDescriptor = view.currentRenderPassDescriptor, let currentDrawable = view.currentDrawable
{
let clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 1)
renderPassDescriptor.colorAttachments[0].clearColor = clearColor
let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor)
renderEncoder.label = "Main pass"
renderEncoder.pushDebugGroup("Draw textured square")
renderEncoder.setFrontFacing(.counterClockwise)
renderEncoder.setCullMode(.back)
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: MBEVertexDataSize * bufferIndex, at: 0)
renderEncoder.setVertexBuffer(uniformBuffer, offset: MBEUniformDataSize * bufferIndex , at: 1)
renderEncoder.setFragmentTexture(kernelDestTexture, at: 0)
renderEncoder.setFragmentSamplerState(sampler, at: 0)
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
renderEncoder.popDebugGroup()
renderEncoder.endEncoding()
commandBuffer.present(currentDrawable)
}
bufferIndex = (bufferIndex + 1) % MBEMaxInflightBuffers
commandBuffer.commit()
}
Here's a small class that performs the essential functions of writing out a movie file that captures the contents of a Metal view:
class MetalVideoRecorder {
var isRecording = false
var recordingStartTime = TimeInterval(0)
private var assetWriter: AVAssetWriter
private var assetWriterVideoInput: AVAssetWriterInput
private var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor
init?(outputURL url: URL, size: CGSize) {
do {
assetWriter = try AVAssetWriter(outputURL: url, fileType: .m4v)
} catch {
return nil
}
let outputSettings: [String: Any] = [ AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : size.width,
AVVideoHeightKey : size.height ]
assetWriterVideoInput = AVAssetWriterInput(mediaType: .video, outputSettings: outputSettings)
assetWriterVideoInput.expectsMediaDataInRealTime = true
let sourcePixelBufferAttributes: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String : size.width,
kCVPixelBufferHeightKey as String : size.height ]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributes)
assetWriter.add(assetWriterVideoInput)
}
func startRecording() {
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: .zero)
recordingStartTime = CACurrentMediaTime()
isRecording = true
}
func endRecording(_ completionHandler: @escaping () -> ()) {
isRecording = false
assetWriterVideoInput.markAsFinished()
assetWriter.finishWriting(completionHandler: completionHandler)
}
func writeFrame(forTexture texture: MTLTexture) {
if !isRecording {
return
}
while !assetWriterVideoInput.isReadyForMoreMediaData {}
guard let pixelBufferPool = assetWriterPixelBufferInput.pixelBufferPool else {
print("Pixel buffer asset writer input did not have a pixel buffer pool available; cannot retrieve frame")
return
}
var maybePixelBuffer: CVPixelBuffer? = nil
let status = CVPixelBufferPoolCreatePixelBuffer(nil, pixelBufferPool, &maybePixelBuffer)
if status != kCVReturnSuccess {
print("Could not get pixel buffer from asset writer input; dropping frame...")
return
}
guard let pixelBuffer = maybePixelBuffer else { return }
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let pixelBufferBytes = CVPixelBufferGetBaseAddress(pixelBuffer)!
// Use the bytes per row value from the pixel buffer since its stride may be rounded up to be 16-byte aligned
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let region = MTLRegionMake2D(0, 0, texture.width, texture.height)
texture.getBytes(pixelBufferBytes, bytesPerRow: bytesPerRow, from: region, mipmapLevel: 0)
let frameTime = CACurrentMediaTime() - recordingStartTime
let presentationTime = CMTimeMakeWithSeconds(frameTime, preferredTimescale: 240)
assetWriterPixelBufferInput.append(pixelBuffer, withPresentationTime: presentationTime)
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
}
}
After initializing one of these and calling startRecording()
, you can add a scheduled handler to the command buffer containing your rendering commands and call writeFrame
(after you end encoding, but before presenting the drawable or committing the buffer):
let texture = currentDrawable.texture
commandBuffer.addCompletedHandler { commandBuffer in
self.recorder.writeFrame(forTexture: texture)
}
When you're done recording, just call endRecording
, and the video file will be finalized and closed.
Caveats:
This class assumes the source texture to be of the default format, .bgra8Unorm
. If it isn't, you'll get crashes or corruption. If necessary, convert the texture with a compute or fragment shader, or use Accelerate.
This class also assumes that the texture is the same size as the video frame. If this isn't the case (if the drawable size changes, or your screen autorotates), the output will be corrupted and you may see crashes. Mitigate this by scaling or cropping the source texture as your application requires.
Upgraded to Swift 5
import AVFoundation
class MetalVideoRecorder {
var isRecording = false
var recordingStartTime = TimeInterval(0)
private var assetWriter: AVAssetWriter
private var assetWriterVideoInput: AVAssetWriterInput
private var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor
init?(outputURL url: URL, size: CGSize) {
do {
assetWriter = try AVAssetWriter(outputURL: url, fileType: AVFileType.m4v)
} catch {
return nil
}
let outputSettings: [String: Any] = [ AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : size.width,
AVVideoHeightKey : size.height ]
assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
assetWriterVideoInput.expectsMediaDataInRealTime = true
let sourcePixelBufferAttributes: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String : size.width,
kCVPixelBufferHeightKey as String : size.height ]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributes)
assetWriter.add(assetWriterVideoInput)
}
func startRecording() {
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMTime.zero)
recordingStartTime = CACurrentMediaTime()
isRecording = true
}
func endRecording(_ completionHandler: @escaping () -> ()) {
isRecording = false
assetWriterVideoInput.markAsFinished()
assetWriter.finishWriting(completionHandler: completionHandler)
}
func writeFrame(forTexture texture: MTLTexture) {
if !isRecording {
return
}
while !assetWriterVideoInput.isReadyForMoreMediaData {}
guard let pixelBufferPool = assetWriterPixelBufferInput.pixelBufferPool else {
print("Pixel buffer asset writer input did not have a pixel buffer pool available; cannot retrieve frame")
return
}
var maybePixelBuffer: CVPixelBuffer? = nil
let status = CVPixelBufferPoolCreatePixelBuffer(nil, pixelBufferPool, &maybePixelBuffer)
if status != kCVReturnSuccess {
print("Could not get pixel buffer from asset writer input; dropping frame...")
return
}
guard let pixelBuffer = maybePixelBuffer else { return }
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let pixelBufferBytes = CVPixelBufferGetBaseAddress(pixelBuffer)!
// Use the bytes per row value from the pixel buffer since its stride may be rounded up to be 16-byte aligned
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let region = MTLRegionMake2D(0, 0, texture.width, texture.height)
texture.getBytes(pixelBufferBytes, bytesPerRow: bytesPerRow, from: region, mipmapLevel: 0)
let frameTime = CACurrentMediaTime() - recordingStartTime
let presentationTime = CMTimeMakeWithSeconds(frameTime, preferredTimescale: 240)
assetWriterPixelBufferInput.append(pixelBuffer, withPresentationTime: presentationTime)
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
}
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With