I'm quite new to OpenGL
/Metal
and I'm trying to understand some fundamental concepts.
Within our app, we are using CIFilter
to filter videos. I saw a WWDC
video from 2017 explaining that you can wrap CIFilter
with Metal
and use it as a regular filter.
I'm trying to understand how to convert this OpenGL
video effect to Metal
so I can use it as a reference point for future effects.
void mainImage(out vec4 fragColor, in vec2 fragCoord) {
float amount = sin(iTime) * 0.1;
// uv coords
vec2 uv = fragCoord / iResolution.xy;
amount *= 0.3;
float split = 1. - fract(iTime / 2.);
float scanOffset = 0.01;
vec2 uv1 = vec2(uv.x + amount, uv.y);
vec2 uv2 = vec2(uv.x, uv.y + amount);
if (uv.y > split) {
uv.x += scanOffset;
uv1.x += scanOffset;
uv2.x += scanOffset;
}
float r = texture(iChannel0, uv1).r;
float g = texture(iChannel0, uv).g;
float b = texture(iChannel0, uv2).b;
fragColor = vec4(r, g, b, 1.);
}
Which produces:
After converting the OpenGL
code to Metal
I'm using the CIFilter
wrapper to use it with AVPlayerItem
:
class MetalFilter: CIFilter {
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
private let kernel: CIKernel
var inputImage: CIImage?
override init() {
let url = Bundle.main.url(forResource: "default", withExtension: "metallib")!
let data = try! Data(contentsOf: url)
kernel = try! CIKernel(functionName: "vhs", fromMetalLibraryData: data)
super.init()
}
func outputImage() -> CIImage? {
guard let inputImage = inputImage else {return nil}
let sourceSize = inputImage.extent.size
let outputImage = kernel.apply(extent: CGRect(x: 0, y: 0, width: sourceSize.width, height: sourceSize.height), roiCallback: { index, destRect in
return destRect
}, arguments: [inputImage, NSNumber(value: Float(1.0 / sourceSize.width)), NSNumber(value: Float(1.0 / sourceSize.height)), NSNumber(value: 60.0)])
return outputImage
}
}
Any help will be highly appreciated!
I gave it a try. Here's the kernel code:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
extern "C" { namespace coreimage {
float4 vhs(sampler_h src, float time, float amount) {
const float magnitude = sin(time) * 0.1 * amount;
float2 greenCoord = src.coord(); // this is alreay in relative coords; no need to devide by image size
const float split = 1.0 - fract(time / 2.0);
const float scanOffset = 0.01;
float2 redCoord = float2(greenCoord.x + magnitude, greenCoord.y);
float2 blueCoord = float2(greenCoord.x, greenCoord.y + magnitude);
if (greenCoord.y > split) {
greenCoord.x += scanOffset;
redCoord.x += scanOffset;
blueCoord.x += scanOffset;
}
float r = src.sample(redCoord).r;
float g = src.sample(greenCoord).g;
float b = src.sample(blueCoord).b;
return float4(r, g, b, 1.0);
}
}}
And here some slight adjustments to outputImage
in your filter:
override var outputImage: CIImage? {
guard let inputImage = self.inputImage else { return nil }
// could be filter parameters
let inputTime: NSNumber = 60
let inputAmount: NSNumber = 0.3
// You need to tell the kernel the region of interest of the input image,
// i.e. what region of input pixels you need to read for a given output region.
// Since you sample pixels to the right and below the center pixel, you need
// to extend the ROI accordingly.
let magnitude = CGFloat(sin(inputTime.floatValue) * 0.1 * inputAmount.floatValue)
let inputExtent = inputImage.extent
let roiCallback: CIKernelROICallback = { _, rect -> CGRect in
return CGRect(x: rect.minX, y: rect.minY,
width: rect.width + (magnitude + 0.01) * inputExtent.width, // scanOffset
height: rect.height + magnitude * inputExtent.height)
}
return self.kernel.apply(extent: inputExtent,
roiCallback: roiCallback,
arguments: [inputImage, inputTime, inputAmount])
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With