AVAssetExportSession AVFoundationErrorDomain Code -11800 操作无法完成,NSOSStatusErrorDomain Code=-12780 &q
Posted
技术标签:
【中文标题】AVAssetExportSession AVFoundationErrorDomain Code -11800 操作无法完成,NSOSStatusErrorDomain Code=-12780 "(null) in Swift iOS【英文标题】:AVAssetExportSession AVFoundationErrorDomain Code -11800 The operation could not be completed, NSOSStatusErrorDomain Code=-12780 "(null) in Swift iOS 【发布时间】:2019-11-17 17:07:58 【问题描述】:我正在用 Swift 开发基于视频的应用程序。我正在导出带有水印徽标和淡入淡出效果的视频剪辑。这是我的代码:
func watermark(video videoAsset:AVAsset, videoModal:VideoModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, withMode mode: SpeedoVideoMode, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?)
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
let servicemodel = ServiceModel()
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).sync
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
if videoAsset.tracks(withMediaType: AVMediaTypeVideo).count == 0
completion!(nil, nil, nil)
return
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
self.addAudioTrack(composition: mixComposition, videoAsset: videoAsset as! AVURLAsset, withMode: mode, videoModal:videoModal)
do
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
catch
print(error.localizedDescription)
let videoSize = clipVideoTrack.naturalSize //CGSize(width: 375, height: 300)
//to add Watermark
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
//videoLayer.backgroundColor = UIColor.red.cgColor
parentLayer.addSublayer(videoLayer)
if name != nil
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
//imageLayer.backgroundColor = UIColor.purple.cgColor
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 150
switch (position)
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize - 100
yPosition = 80
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.75
parentLayer.addSublayer(imageLayer)
if text != nil
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 20
titleLayer.alignmentMode = kCAAlignmentRight
titleLayer.frame = CGRect(x: 0, y: yPosition - imageSize, width: videoSize.width - imageSize/2 - 4, height: 57)
titleLayer.foregroundColor = UIColor.lightGray.cgColor
parentLayer.addSublayer(titleLayer)
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
//Add Fade In Out effects
let startTime = CMTime(seconds: Double(0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(1), preferredTimescale: 1000)
let timeRange = CMTimeRange(start: startTime, end: endTime)
layerInstruction.setOpacityRamp(fromStartOpacity: 0.1, toEndOpacity: 1.0, timeRange: timeRange)
let startTime1 = CMTime(seconds: videoAsset.duration.seconds-1, preferredTimescale: 1000)
let endTime1 = CMTime(seconds: videoAsset.duration.seconds, preferredTimescale: 1000)
let timeRange1 = CMTimeRange(start: startTime1, end: endTime1)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.1, timeRange: timeRange1)
arrayLayerInstructions.append(layerInstruction)
instruction.layerInstructions = arrayLayerInstructions
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(videoModal.fileID).mov")
let filePath = url.path
let fileManager = FileManager.default
do
if fileManager.fileExists(atPath: filePath)
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
else
print("FILE NOT AVAILABLE")
catch _
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
let timeRangetoTrim = CMTimeRange(start: CMTime(seconds: Double(videoModal.leftRangeValue), preferredTimescale: 1000),
end: CMTime(seconds: Double(videoModal.rightRangeValue), preferredTimescale: 1000))
exporter?.timeRange = timeRangetoTrim
exporter?.shouldOptimizeForNetworkUse = false
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously()
DispatchQueue.main.async
if exporter?.status == AVAssetExportSessionStatus.completed
let outputURL = exporter?.outputURL
if flag
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path)
phphotoLibrary.shared().performChanges(
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
) saved, error in
if saved
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
else
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
else
// Error
completion!(exporter?.status, exporter, nil)// Getting error here
func addAudioTrack(composition: AVMutableComposition, videoAsset: AVURLAsset, withMode mode: SpeedoVideoMode, videoModal:VideoFileModel)
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let audioTracks = videoAsset.tracks(withMediaType: AVMediaTypeAudio)
for audioTrack in audioTracks
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / 375
if assetInfo.isPortrait
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
else
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 0))
if assetInfo.orientation == .down
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = 375 + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: CGFloat(yFix))
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
instruction.setTransform(concat, at: kCMTimeZero)
return instruction
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool)
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0
assetOrientation = .right
isPortrait = true
else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0
assetOrientation = .left
isPortrait = true
else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0
assetOrientation = .up
else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0
assetOrientation = .down
return (assetOrientation, isPortrait)
我的代码适用于某些视频,但有时它也不适用于某些视频。由于 AVAssetExportSessionStatus 失败,我收到以下错误:
Error Domain=AVFoundationErrorDomain Code=-11800 "该操作可能 未完成” UserInfo=NSLocalizedFailureReason=未知错误 发生 (-12780), NSLocalizedDescription=操作不能 完成,NSUnderlyingError=0x28262c240 错误 Domain=NSOSStatusErrorDomain Code=-12780 "(null)"
谁能帮我解决这个问题?提前谢谢你。
【问题讨论】:
您的问题与时间有关。视频没有得到适当的时间,这个方法也是做什么的,你能发帖吗?let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
?您能否提供一个演示,然后它也会有所帮助
让我检查一下,我会更新你!
你又错过了addAudioTrack
的方法,你能不能给我发一份没有任何第三方东西或自定义东西的文件,让我帮忙!
哦,好的,现在更新@SohilR.Memon 请检查
最后一个方法orientationFromTransform
,可以更新一下吗?
【参考方案1】:
这个方法func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction
是错误的,因为你需要提供和AVAssetTrack
有实际的视频。
但是,您传递的不是那个,而是仍然需要组合的 AVCompositionTrack
,所以用这个 func videoCompositionInstructionForTrack(track: AVAssetTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction
替换您的方法。
现在调用实际方法时,需要传递clipVideoTrack
,即let layerInstruction = self.videoCompositionInstructionForTrack(track: clipVideoTrack, asset: videoAsset)
。
如果您仍然面临错误,请告诉我!
【讨论】:
以上是关于AVAssetExportSession AVFoundationErrorDomain Code -11800 操作无法完成,NSOSStatusErrorDomain Code=-12780 &q的主要内容,如果未能解决你的问题,请参考以下文章
AVAssetExportSession 错误 -11820
AVAssetExportSession - 如何从视频持续时间中修剪毫秒