快速视频中的图像/文本叠加

Posted

技术标签:

【中文标题】快速视频中的图像/文本叠加【英文标题】:Image/Text overlay in video swift 【发布时间】:2015-09-08 06:07:29 【问题描述】:

我正在使用 swift 处理视频中的水印效果的图像叠加。我为此使用AVFoundation,但不知何故我没有成功。

以下是我的叠加图像/文本代码

    let path = NSBundle.mainBundle().pathForResource("sample_movie", ofType:"mp4")
    let fileURL = NSURL(fileURLWithPath: path!)

    let composition = AVMutableComposition()
    var vidAsset = AVURLAsset(URL: fileURL, options: nil)

    // get video track
    let vtrack =  vidAsset.tracksWithMediaType(AVMediaTypeVideo)
    let videoTrack:AVAssetTrack = vtrack[0] as! AVAssetTrack
    let vid_duration = videoTrack.timeRange.duration
    let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)

    var error: NSError?
    let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
    compositionvideoTrack.insertTimeRange(vid_timerange, ofTrack: videoTrack, atTime: kCMTimeZero, error: &error)

    compositionvideoTrack.preferredTransform = videoTrack.preferredTransform

    // Watermark Effect
    let size = videoTrack.naturalSize

    let imglogo = UIImage(named: "image.png")
    let imglayer = CALayer()
    imglayer.contents = imglogo?.CGImage
    imglayer.frame = CGRectMake(5, 5, 100, 100)
    imglayer.opacity = 0.6

    // create text Layer
    let titleLayer = CATextLayer()
    titleLayer.backgroundColor = UIColor.whiteColor().CGColor
    titleLayer.string = "Dummy text"
    titleLayer.font = UIFont(name: "Helvetica", size: 28)
    titleLayer.shadowOpacity = 0.5
    titleLayer.alignmentMode = kCAAlignmentCenter
    titleLayer.frame = CGRectMake(0, 50, size.width, size.height / 6)

    let videolayer = CALayer()
    videolayer.frame = CGRectMake(0, 0, size.width, size.height)

    let parentlayer = CALayer()
    parentlayer.frame = CGRectMake(0, 0, size.width, size.height)
    parentlayer.addSublayer(videolayer)
    parentlayer.addSublayer(imglayer)
    parentlayer.addSublayer(titleLayer)

    let layercomposition = AVMutableVideoComposition()
    layercomposition.frameDuration = CMTimeMake(1, 30)
    layercomposition.renderSize = size
    layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, inLayer: parentlayer)

    // instruction for watermark
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
    let videotrack = composition.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
    let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
    instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject]
    layercomposition.instructions = NSArray(object: instruction) as [AnyObject]

    //  create new file to receive data
    let dirPaths = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)
    let docsDir: AnyObject = dirPaths[0]
    let movieFilePath = docsDir.stringByAppendingPathComponent("result.mov")
    let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)       

    // use AVAssetExportSession to export video
    let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
    assetExport.outputFileType = AVFileTypeQuickTimeMovie 
    assetExport.outputURL = movieDestinationUrl
    assetExport.exportAsynchronouslyWithCompletionHandler(
        switch assetExport.status
        case  AVAssetExportSessionStatus.Failed:
            println("failed \(assetExport.error)")
        case AVAssetExportSessionStatus.Cancelled:
            println("cancelled \(assetExport.error)")
        default:
            println("Movie complete")


            // play video
            NSOperationQueue.mainQueue().addOperationWithBlock( () -> Void in
                self.playVideo(movieDestinationUrl!)
            )
        
    )    

通过这段代码,我没有实现覆盖……我不知道我做错了什么……

问题:

此代码中是否缺少任何内容?或者这段代码有什么问题? 此代码是否仅适用于录制的视频或所有视频,包括图库中的视频?

【问题讨论】:

你曾经解决过这个问题吗?寻找相同的答案。 你能在哪里解决这个问题? 【参考方案1】:

@El Captain 提供的代码可以使用。它只是缺少:

    assetExport.videoComposition = layercomposition

您可以在 AVAssetExportSession

实例化之后立即添加它

注意:最初提供的代码只会导出视频轨道,而不是音频轨道。如果您需要音轨,您可以在配置 compositionvideoTrack 后添加类似的内容:

let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
    for audioTrack in audioTracks 
        try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, ofTrack: audioTrack, atTime: kCMTimeZero)
    

【讨论】:

甜蜜!希望它可以帮助其他有同样问题的人。【参考方案2】:

以下是 Swift 4 中的更新:

import UIKit
import AVFoundation
import AVKit
import Photos

class ViewController: UIViewController 

var myurl: URL?

override func viewDidLoad() 
    super.viewDidLoad()
    // Do any additional setup after loading the view, typically from a nib.



@IBAction func saveVideoTapper(_ sender: Any) 

    let path = Bundle.main.path(forResource: "sample_video", ofType:"mp4")
    let fileURL = NSURL(fileURLWithPath: path!)

    let composition = AVMutableComposition()
    let vidAsset = AVURLAsset(url: fileURL as URL, options: nil)

    // get video track
    let vtrack =  vidAsset.tracks(withMediaType: AVMediaType.video)
    let videoTrack: AVAssetTrack = vtrack[0]
    let vid_timerange = CMTimeRangeMake(start: CMTime.zero, duration: vidAsset.duration)

    let tr: CMTimeRange = CMTimeRange(start: CMTime.zero, duration: CMTime(seconds: 10.0, preferredTimescale: 600))
    composition.insertEmptyTimeRange(tr)

    let trackID:CMPersistentTrackID = CMPersistentTrackID(kCMPersistentTrackID_Invalid)

    if let compositionvideoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: trackID) 

        do 
            try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: CMTime.zero)
         catch 
            print("error")
        

        compositionvideoTrack.preferredTransform = videoTrack.preferredTransform

     else 
        print("unable to add video track")
        return
    


    // Watermark Effect
    let size = videoTrack.naturalSize

    let imglogo = UIImage(named: "image.png")
    let imglayer = CALayer()
    imglayer.contents = imglogo?.cgImage
    imglayer.frame = CGRect(x: 5, y: 5, width: 100, height: 100)
    imglayer.opacity = 0.6

    // create text Layer
    let titleLayer = CATextLayer()
    titleLayer.backgroundColor = UIColor.white.cgColor
    titleLayer.string = "Dummy text"
    titleLayer.font = UIFont(name: "Helvetica", size: 28)
    titleLayer.shadowOpacity = 0.5
    titleLayer.alignmentMode = CATextLayerAlignmentMode.center
    titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height / 6)


    let videolayer = CALayer()
    videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)

    let parentlayer = CALayer()
    parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
    parentlayer.addSublayer(videolayer)
    parentlayer.addSublayer(imglayer)
    parentlayer.addSublayer(titleLayer)

    let layercomposition = AVMutableVideoComposition()
    layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
    layercomposition.renderSize = size
    layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)

    // instruction for watermark
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: composition.duration)
    let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
    let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
    instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
    layercomposition.instructions = NSArray(object: instruction) as [AnyObject] as! [AVVideoCompositionInstructionProtocol]

    //  create new file to receive data
    let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
    let docsDir = dirPaths[0] as NSString
    let movieFilePath = docsDir.appendingPathComponent("result.mov")
    let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)

    // use AVAssetExportSession to export video
    let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
    assetExport?.outputFileType = AVFileType.mov
    assetExport?.videoComposition = layercomposition

    // Check exist and remove old file
    FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)

    assetExport?.outputURL = movieDestinationUrl as URL
    assetExport?.exportAsynchronously(completionHandler: 
        switch assetExport!.status 
        case AVAssetExportSession.Status.failed:
            print("failed")
            print(assetExport?.error ?? "unknown error")
        case AVAssetExportSession.Status.cancelled:
            print("cancelled")
            print(assetExport?.error ?? "unknown error")
        default:
            print("Movie complete")

            self.myurl = movieDestinationUrl as URL

            phphotoLibrary.shared().performChanges(
                PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
            )  saved, error in
                if saved 
                    print("Saved")
                
            

            self.playVideo()

        
    )




func playVideo() 
    let player = AVPlayer(url: myurl!)
    let playerLayer = AVPlayerLayer(player: player)
    playerLayer.frame = self.view.bounds
    self.view.layer.addSublayer(playerLayer)
    player.play()
    print("playing...")







extension FileManager 
func removeItemIfExisted(_ url:URL) -> Void 
    if FileManager.default.fileExists(atPath: url.path) 
        do 
            try FileManager.default.removeItem(atPath: url.path)
        
        catch 
            print("Failed to delete file")
        
    


【讨论】:

【参考方案3】:

对我来说(我在你的代码中看到的),你没有将parentlayer 添加到屏幕上。

您创建一个 CALayer() 以将 videolayerimglayertitleLayer 添加到新层中,但您没有在屏幕上添加最后一层。

yourView.layer.addSublayer(parentlayer)

希望对你有帮助

【讨论】:

检查layercomposition.animationTool这行...它使用父层处理视频的位置 尝试在let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)中使用layercomposition而不是composition 您能否添加完整的解决方案作为答案。 如果以后有类似问题的人发现这个问题,有答案可能对他们有用。你问了这个问题,你现在有了答案。 嗨@Dolwen 你知道如何纠正视频中的模糊吗?这里的问题:***.com/questions/34912050/…。感谢您的宝贵时间!【参考方案4】:

@Rey Hernandez 这对我帮助很大!如果有人想进一步说明如何在视频中添加音频资产,这里是合并它们的代码

    let vtrack =  vidAsset.tracksWithMediaType(AVMediaTypeVideo)
    let videoTrack:AVAssetTrack = vtrack[0] 
    let vid_duration = videoTrack.timeRange.duration
    let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)

    let atrack =  vidAsset.tracksWithMediaType(AVMediaTypeAudio)
    let audioTrack:AVAssetTrack = atrack[0]
    let audio_duration = audioTrack.timeRange.duration
    let audio_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)

    do 
        let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())

        try compositionvideoTrack.insertTimeRange(vid_timerange, ofTrack: videoTrack, atTime: kCMTimeZero)

        compositionvideoTrack.preferredTransform = videoTrack.preferredTransform



        let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
        try! compositionAudioTrack.insertTimeRange(audio_timerange, ofTrack: audioTrack, atTime: kCMTimeZero)

        compositionvideoTrack.preferredTransform = audioTrack.preferredTransform

     catch 
        print(error)
    

【讨论】:

【参考方案5】:

作为补充,这里有一个函数,它基于通过复制其旋转、缩放和字体提供的数组 UITextViews 创建 CATextLayers。只需将这些添加到提供给 AVVideoCompositionCoreAnimationTool 的容器层:

private static func createTextLayer(totalSize: CGSize,
                                        textView: UITextView) -> CATextLayer 
        let textLayer: CACenteredTextLayer = CACenteredTextLayer()
        textLayer.backgroundColor = UIColor.clear
        textLayer.foregroundColor = textView.textColor?.cgColor
        textLayer.masksToBounds = false
        textLayer.isWrapped = true

        let scale: CGFloat = UIScreen.main.scale

        if let font: UIFont = textView.font 
            let upscaledFont: UIFont = font.withSize(font.pointSize * scale)
            let attributedString = NSAttributedString(
                string: textView.text,
                attributes: [NSAttributedString.Key.font: upscaledFont,
                             NSAttributedString.Key.foregroundColor: textView.textColor ?? UIColor.white])
            textLayer.string = attributedString
        

        // Set text alignment
        let alignment: CATextLayerAlignmentMode
        switch textView.textAlignment 
        case NSTextAlignment.left:
            alignment = CATextLayerAlignmentMode.left
        case NSTextAlignment.center:
            alignment = CATextLayerAlignmentMode.center
        default:
            alignment = CATextLayerAlignmentMode.right
        
        textLayer.alignmentMode = alignment

        let originalFrame: CGRect = textView.frame

        // Also take scale into consideration
        let targetSize: CGSize = CGSize(width: originalFrame.width * scale,
                                        height: originalFrame.height * scale)

        // The CALayer positioning is inverted on the Y-axes, so apply this
        let origin: CGPoint = CGPoint(x: originalFrame.origin.x * scale,
                                      y: (totalSize.height - (originalFrame.origin.y * scale)) - targetSize.height)

        textLayer.frame = CGRect(x: origin.x,
                                 y: origin.y,
                                 width: targetSize.width,
                                 height: targetSize.height)

        // Determine the scale
        textLayer.anchorPoint = CGPoint(x: 0.5,
                                        y: 0.5)

        var newTransform: CATransform3D = CATransform3DMakeScale(textView.transform.xScale,
                                                                 textView.transform.yScale,
                                                                 0)

        // Convert to degrees, invert the amount and convert back to radians to apply
        newTransform = CATransform3DRotate(newTransform,
                                           textView.transform.radiansFor3DTransform,
                                           0,
                                           0,
                                           1)
        textLayer.transform = newTransform

        return textLayer

将它与 CATextLayer 的这个子类结合起来,使文本垂直居中:

final class CACenteredTextLayer: CATextLayer 
    override func draw(in ctx: CGContext) 
        guard let attributedString = string as? NSAttributedString else  return 

        let height = self.bounds.size.height
        let boundingRect: CGRect = attributedString.boundingRect(
            with: CGSize(width: bounds.width,
                         height: CGFloat.greatestFiniteMagnitude),
            options: NSStringDrawingOptions.usesLineFragmentOrigin,
            context: nil)
        let yDiff: CGFloat = (height - boundingRect.size.height) / 2

        ctx.saveGState()
        ctx.translateBy(x: 0.0, y: yDiff)
        super.draw(in: ctx)
        ctx.restoreGState()
    


private extension CGAffineTransform 
    var xScale: CGFloat 
        return sqrt((a*a) + (c*c))
    

    var yScale: CGFloat 
        return sqrt((b*b) + (d*d))
    

    var radiansFor3DTransform: CGFloat 
        let radians: CGFloat = atan2(b, a);
        let degrees: CGFloat = -(radians * 180 / CGFloat.pi)
        let convertedRadians: CGFloat = CGFloat(degrees * (CGFloat.pi / 180))
        return convertedRadians
    

【讨论】:

嘿,我尝试了您的解决方案,但没有运气。可以分享一下xScale、yScale、radiansFor3DTransform 我已经附加了它们。

以上是关于快速视频中的图像/文本叠加的主要内容,如果未能解决你的问题,请参考以下文章

通过单击图像叠加开始自动播放视频。需要为多个视频工作

AVAsset 在 iOS 中使用图像叠加保存视频会破坏视频

IOS safari 中的画布叠加在视频上

添加到视频合成并导出时,叠加图像会变得像素化

opencv视频逐帧提取,然后图像叠加得出一个图像要用到啥函数?

单击叠加图像以启动视频后,阻止页面移至顶部