I'm encountering wrong orientation of video exported using AVAssetExportSession only in front Camera. I followed this tutorial https://stackoverflow.com/a/35368649/3764365 but I got this scenario. I think it's not wrong orientation the image is cut at half. I tried changing the video layer, render layer but got no luck. My code looks like this.
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: path)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaTypeVideo)
// get audi trac
let videoTrack:AVAssetTrack = vtrack[0]
_ = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
var _: NSError?
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)
} catch let error {
print(error.localizedDescription)
}
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)
} catch {
print("error")
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
parentlayer.addSublayer(videolayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = CGSize(width: size.height, height: size.width)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
layerinstruction.setTransform(videoTrack.preferredTransform, at: kCMTimeZero)
// create new file to receive data
let movieDestinationUrl = UIImage.outPut()
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeQuickTimeMovie
assetExport.outputURL = movieDestinationUrl
Setting movieFileOutputConnection?.isVideoMirrored from true to false fixed the issue for me. Its a weird bug in my opinion.
if self.currentCamera == .front {
movieFileOutputConnection?.isVideoMirrored = false
}
I will share my code on how I solved this issue.
func addImagesToVideo(path: URL, labelImageViews: [LabelImageView]) {
SVProgressHUD.show()
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: path)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaTypeVideo)
// get audi trac
let videoTrack:AVAssetTrack = vtrack[0]
_ = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
var _: NSError?
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)
} catch let error {
print(error.localizedDescription)
}
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)
} catch {
print("error")
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
parentlayer.addSublayer(videolayer)
if labelImageViews.count != 0 {
let blankImage = self.clearImage(size: videolayer.frame.size)
let image = self.saveImage(imageOne: blankImage, labelImageViews: labelImageViews)
let imglayer = CALayer()
imglayer.contents = image.cgImage
imglayer.frame = CGRect(origin: CGPoint.zero, size: videolayer.frame.size)
imglayer.opacity = 1
parentlayer.addSublayer(imglayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = CGSize(width: size.height, height: size.width)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
var isVideoAssetPortrait = false
let videoTransform = videoTrack.preferredTransform
if(videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0) {
isVideoAssetPortrait = true
}
if(videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0) {
isVideoAssetPortrait = true
}
if isVideoAssetPortrait {
let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)
layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor), at: kCMTimeZero)
} else {
let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)
layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 560)), at: kCMTimeZero)
}
// create new file to receive data
let movieDestinationUrl = UIImage.outPut()
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeQuickTimeMovie
assetExport.outputURL = movieDestinationUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error!)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error!)")
default:
print("Movie complete")
// play video
OperationQueue.main.addOperation({ () -> Void in
let output = UIImage.outPut()
UIImage.compress(inputURL: movieDestinationUrl as NSURL, outputURL: output as NSURL) {
UISaveVideoAtPathToSavedPhotosAlbum(output.relativePath, nil, nil, nil)
print("Done Converting")
DispatchQueue.main.async {
SVProgressHUD.dismiss()
}
}
})
}
})
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With