Распакованная ошибка: Ошибка Domain = NSOSStatusErrorDomain Code = -12909 - PullRequest
0 голосов
/ 14 февраля 2020

Я использую H264 Алгоритм ниже - это ссылка, которую я имею в виду для распаковки видео с использованием VideoToolbox framework

{ ссылка }

Не все несколько видео застряли в определенной позиции c.

Ниже приведены журналы ошибок, которые я получаю во время распаковки кадра.

NALU Raw: 00, 00, 00, 01, 41, 9a, 00, 18
~~~~~~~ Received NALU Type "1: Coded slice of a non-IDR picture (VCL)" ~~~~~~~~
Decompressed error: Error Domain=NSOSStatusErrorDomain Code=-12909 "(null)"
  • Версия Xocde - 11.2.1
  • Цель разработки - 10,0

Если вам нужна дополнительная информация, не стесняйтесь спрашивать меня.

Мой код класса VideoDecoder ниже:

@interface VideoDecoder () {
    AVSampleBufferDisplayLayer *mVideoLayer;
    long videoTimestamp;
}
@property (nonatomic, assign) CMVideoFormatDescriptionRef formatDesc;
@property (nonatomic, assign) VTDecompressionSessionRef decompressionSession;
@property (nonatomic, assign) int spsSize;
@property (nonatomic, assign) int ppsSize;
@property (nonatomic, retain) NSMutableData* streamVideoData;
@end

@implementation VideoDecoder



NSString * const naluTypesStrings[] = {
    @"0: Unspecified (non-VCL)",
    @"1: Coded slice of a non-IDR picture (VCL)", // P frame
    @"2: Coded slice data partition A (VCL)",
    @"3: Coded slice data partition B (VCL)",
    @"4: Coded slice data partition C (VCL)",
    @"5: Coded slice of an IDR picture (VCL)", // I frame
    @"6: Supplemental enhancement information (SEI) (non-VCL)",
    @"7: Sequence parameter set (non-VCL)", // SPS parameter
    @"8: Picture parameter set (non-VCL)", // PPS parameter
    @"9: Access unit delimiter (non-VCL)",
    @"10: End of sequence (non-VCL)",
    @"11: End of stream (non-VCL)",
    @"12: Filler data (non-VCL)",
    @"13: Sequence parameter set extension (non-VCL)",
    @"14: Prefix NAL unit (non-VCL)",
    @"15: Subset sequence parameter set (non-VCL)",
    @"16: Reserved (non-VCL)",
    @"17: Reserved (non-VCL)",
    @"18: Reserved (non-VCL)",
    @"19: Coded slice of an auxiliary coded picture without partitioning (non-VCL)",
    @"20: Coded slice extension (non-VCL)",
    @"21: Coded slice extension for depth view components (non-VCL)",
    @"22: Reserved (non-VCL)",
    @"23: Reserved (non-VCL)",
    @"24: STAP-A Single-time aggregation packet (non-VCL)",
    @"25: STAP-B Single-time aggregation packet (non-VCL)",
    @"26: MTAP16 Multi-time aggregation packet (non-VCL)",
    @"27: MTAP24 Multi-time aggregation packet (non-VCL)",
    @"28: FU-A Fragmentation unit (non-VCL)",
    @"29: FU-B Fragmentation unit (non-VCL)",
    @"30: Unspecified (non-VCL)",
    @"31: Unspecified (non-VCL)",
};

- (instancetype)init {
    self = [super init];
    if (self) {
//        _videoLayer = [[AVSampleBufferDisplayLayer alloc] init];
////        _videoLayer.frame = self.view.frame;
////        _videoLayer.bounds = self.view.bounds;
//        _videoLayer.frame = CGRectMake(0, 0, 1280, 720);
//        _videoLayer.bounds = CGRectMake(0, 0, 1280, 720);
//        _videoLayer.videoGravity = AVLayerVideoGravityResizeAspect;
//
//        // set Timebase, you may need this if you need to display frames at specific times
//        // I didn't need it so I haven't verified that the timebase is working
//        CMTimebaseRef controlTimebase;
//        CMTimebaseCreateWithMasterClock(CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase);
//
//        //videoLayer.controlTimebase = controlTimebase;
//        CMTimebaseSetTime(_videoLayer.controlTimebase, kCMTimeZero);
//        CMTimebaseSetRate(_videoLayer.controlTimebase, 1.0);

        self.streamVideoData = nil;
        self.startDumpData = NO;
        videoTimestamp = 0;
    }
    return self;
}

- (void)initDebugStreamingVideo
{
    self.streamVideoData = [[NSMutableData alloc] init];
}

- (void)releaseVideoDecorder
{
    if (_decompressionSession != NULL)
    {
        VTDecompressionSessionInvalidate(_decompressionSession);
        _decompressionSession = NULL;
    }

    videoTimestamp = 0;
}

- (long)getVideoTimeStamp
{
    return videoTimestamp;
}

- (void)setVideoTimestamp:(long)timestamp
{
    videoTimestamp = timestamp;

#ifdef DEBUG
    NSLog(@"(sync)video: %2.1f", (float)videoTimestamp/1000000.);
#endif
}

- (NSString*)getLiveStreamingMP4Path
{
    NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
                                                         NSUserDomainMask, YES);
    NSString *rootOfCachepath = [paths objectAtIndex:0];
    NSString* nalFilePath = [self getUniqueFilePath:rootOfCachepath FileNamePrefix:@"liveRecord" FileNameSubfix:@"MP4"];

    return nalFilePath;
}

- (NSString*)closeAndSaveDebugStreamingVideo
{
    NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
                                                         NSUserDomainMask, YES);
    NSString *rootOfCachepath = [paths objectAtIndex:0];
    NSString* nalFilePath = [self getUniqueFilePath:rootOfCachepath FileNamePrefix:@"liveRecord" FileNameSubfix:@"264"];

    if (self.streamVideoData.length > 0)
        [self.streamVideoData writeToFile:nalFilePath atomically:YES];

    self.streamVideoData = [[NSMutableData alloc] init];

    return nalFilePath;
}

- (NSString*)getUniqueFilePath:(NSString*)parentFolder FileNamePrefix:(NSString*)fileName FileNameSubfix:(NSString*)subFix
{
    NSString* fullFilePath = nil;
    NSString* memoFileName = @"";

    NSDate *currentDate = [NSDate date];
    NSDateFormatter *currentDateFormat = [[NSDateFormatter alloc] init];
    [currentDateFormat setDateFormat:@"yyyyMMddHHmmss"];
    NSString *currentDateString = [currentDateFormat stringFromDate:currentDate];

    memoFileName = [NSString stringWithFormat:@"%@_%@.%@", fileName, currentDateString, subFix];

    fullFilePath = [parentFolder stringByAppendingPathComponent:memoFileName];

    return fullFilePath;
}


//- (void)setVideoLayer:(AVSampleBufferDisplayLayer *)layer {
//    mVideoLayer = layer;
//    CMTimebaseRef controlTimebase;
//    CMTimebaseCreateWithMasterClock(CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase);
//
//    mVideoLayer.controlTimebase = controlTimebase;
//    CMTimebaseSetTime(mVideoLayer.controlTimebase, kCMTimeZero);
//    CMTimebaseSetRate(mVideoLayer.controlTimebase, 1.0);
//}

- (BOOL)checkIfThisIsIDRFrame:(uint8_t *)frame withSize:(uint32_t)frameSize {

    BOOL isIDRFrame = NO;
    int startCodeIndex = 0;

    int pi = 0;
    if (frame[pi] == 0x00 && frame[pi+1] == 0x00 && frame[pi+2] == 0x00 && frame[pi+3] == 0x01 && frame[pi+4] == 0x09 && frame[pi+5] == 0x50)
        startCodeIndex = 6;

//    NSLog(@"NALU Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x", frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],frame[6],frame[7]);
    int nalu_type = (frame[startCodeIndex + 4] & 0x1F);
//    NSLog(@"~~~~~~~ Received NALU Type \"%@\" ~~~~~~~~", naluTypesStrings[nalu_type]);

    // if we havent already set up our format description with our SPS PPS parameters, we
    // can't process any frames except type 7 that has our parameters
    if (nalu_type != 7 && _formatDesc == NULL) {
        NSLog(@"Video error: Frame is not an I Frame and format description is null");
        return isIDRFrame;
    }

    // NALU type 7 is the SPS parameter NALU
    if (nalu_type == 7) {
        isIDRFrame = YES;
    }

    return isIDRFrame;
}

- (void)receivedRawVideoFrame:(uint8_t *)frame withSize:(uint32_t)frameSize {
    OSStatus status = 0;

    uint8_t *data = NULL;
    uint8_t *pps = NULL;
    uint8_t *sps = NULL;

#if defined(DEBUG) || defined(_RECORD_USE_LIVE_PACKAGE)
    if (self.startDumpData && self.streamVideoData && frameSize <= 512*1024)
        [self.streamVideoData appendBytes:(const void*)frame length:frameSize];
    //if (self.streamVideoData && frameSize <= 512*1024)
    //    [self.streamVideoData appendBytes:(const void*)frame length:frameSize];
#endif

    // I know how my H.264 data source's NALUs looks like so I know start code index is always 0.
    // if you don't know where it starts, you can use a for loop similar to how I find the 2nd and 3rd start codes
    int startCodeIndex = 0;
    int secondStartCodeIndex = 0;
    int thirdStartCodeIndex = 0;

//#ifdef DEBUG
    int pi = 0;
    if (frame[pi] == 0x00 && frame[pi+1] == 0x00 && frame[pi+2] == 0x00 && frame[pi+3] == 0x01 && frame[pi+4] == 0x09 && frame[pi+5] == 0x50)
        startCodeIndex = 6;
//#endif

    long blockLength = 0;
    BOOL withSPSPPS = NO;

    CMSampleBufferRef sampleBuffer = NULL;
    CMBlockBufferRef blockBuffer = NULL;

    NSLog(@"NALU Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x", frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],frame[6],frame[7]);
    int nalu_type = (frame[startCodeIndex + 4] & 0x1F);
    NSLog(@"~~~~~~~ Received NALU Type \"%@\" ~~~~~~~~", naluTypesStrings[nalu_type]);

    // if we havent already set up our format description with our SPS PPS parameters, we
    // can't process any frames except type 7 that has our parameters
    if (nalu_type != 7 && _formatDesc == NULL) {
        NSLog(@"Video error: Frame is not an I Frame and format description is null");
        return;
    }

    // NALU type 7 is the SPS parameter NALU
    if (nalu_type == 7) {
        // find where the second PPS start code begins, (the 0x00 00 00 01 code)
        // from which we also get the length of the first SPS code
        for (int i = startCodeIndex + 4; i < startCodeIndex + 40; i++) {
            if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01) {
                secondStartCodeIndex = i;
                //_spsSize = secondStartCodeIndex;   // includes the header in the size
//#ifdef DEBUG
                _spsSize = secondStartCodeIndex - startCodeIndex;   // includes the header in the size
//#endif

                break;
            }
        }

        // find what the second NALU type is
        nalu_type = (frame[secondStartCodeIndex + 4] & 0x1F);
//        NSLog(@"~~~n7~~ Received NALU Type \"%@\" ~~~~~~~~", naluTypesStrings[nalu_type]);
    }


    // type 8 is the PPS parameter NALU
    if(nalu_type == 8) {

        // find where the NALU after this one starts so we know how long the PPS parameter is
//#ifdef DEBUG
        for (int i = _spsSize + 4 + startCodeIndex; i < _spsSize + 30; i++) {
//#endif
        //for (int i = _spsSize + 4; i < _spsSize + 30; i++) {
            if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01) {
                thirdStartCodeIndex = i;
                //_ppsSize = thirdStartCodeIndex - _spsSize;
//#ifdef DEBUG
                _ppsSize = thirdStartCodeIndex - _spsSize - startCodeIndex;
//#endif
                break;
            }
        }

        // allocate enough data to fit the SPS and PPS parameters into our data objects.
        // VTD doesn't want you to include the start code header (4 bytes long) so we add the - 4 here
        sps = malloc(_spsSize - 4);
        pps = malloc(_ppsSize - 4);

        // copy in the actual sps and pps values, again ignoring the 4 byte header
//#ifdef DEBUG
        memcpy (sps, &frame[4+startCodeIndex], _spsSize-4);
        memcpy (pps, &frame[_spsSize+4+startCodeIndex], _ppsSize-4);
        NSLog(@"SPS Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x", sps[0],sps[1],sps[2],sps[3],sps[4],sps[5],sps[6],sps[7],sps[8],sps[9],sps[10],sps[11],sps[12],sps[13],sps[14],sps[15]);
        NSLog(@"PPS Raw: %02X, %02x, %02x, %02x", pps[0],pps[1],pps[2],pps[3]);
//#endif
//        memcpy (sps, &frame[4], _spsSize-4);
//        memcpy (pps, &frame[_spsSize+4], _ppsSize-4);

        // now we set our H264 parameters
        uint8_t*  parameterSetPointers[2] = {sps, pps};
        size_t parameterSetSizes[2] = {_spsSize-4, _ppsSize-4};

        status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2,
                                                                     (const uint8_t *const*)parameterSetPointers,
                                                                     parameterSetSizes, 4,
                                                                     &_formatDesc);

//        NSLog(@"\t\t Creation of CMVideoFormatDescription: %@", (status == noErr) ? @"successful!" : @"failed...");
        if(status != noErr)
            NSLog(@"\t\t Format Description ERROR type: %d", (int)status);

        // See if decomp session can convert from previous format description
        // to the new one, if not we need to remake the decomp session.
        // This snippet was not necessary for my applications but it could be for yours
        /*BOOL needNewDecompSession = (VTDecompressionSessionCanAcceptFormatDescription(_decompressionSession, _formatDesc) == NO);
         if(needNewDecompSession)
         {
         [self createDecompSession];
         }*/

        // now lets handle the IDR frame that (should) come after the parameter sets
        // I say "should" because that's how I expect my H264 stream to work, YMMV
        nalu_type = (frame[thirdStartCodeIndex + 4] & 0x1F);
//        NSLog(@"~~~n8~~ Received NALU Type \"%@\" ~~~~~~~~", naluTypesStrings[nalu_type]);

        withSPSPPS = YES;
    }

    // create our VTDecompressionSession.  This isnt neccessary if you choose to use AVSampleBufferDisplayLayer
    //
    if (videoTimestamp == 0 && _decompressionSession != NULL)
    {
        if (_decompressionSession != NULL)
        {
            VTDecompressionSessionInvalidate(_decompressionSession);
            _decompressionSession = NULL;
        }
    }
    if ((status == noErr) && (_decompressionSession == NULL)) {
        [self createDecompSession];
    }

    // type 5 is an IDR frame NALU.  The SPS and PPS NALUs should always be followed by an IDR (or IFrame) NALU, as far as I know
    if(nalu_type == 5) {
        // find the offset, or where the SPS and PPS NALUs end and the IDR frame NALU begins
//#ifdef DEBUG
        int offset = _spsSize + _ppsSize + startCodeIndex;
        NSLog(@"Start IDR at %d", offset);
//#endif
//        int offset = _spsSize + _ppsSize;
        blockLength = frameSize - offset;
        //        NSLog(@"Block Length : %ld", blockLength);
        data = malloc(blockLength);
        data = memcpy(data, &frame[offset], blockLength);

        // replace the start code header on this NALU with its size.
        // AVCC format requires that you do this.
        // htonl converts the unsigned int from host to network byte order
        uint32_t dataLength32 = htonl (blockLength - 4);
        memcpy (data, &dataLength32, sizeof (uint32_t));

        // create a block buffer from the IDR NALU
        status = CMBlockBufferCreateWithMemoryBlock(NULL, data,  // memoryBlock to hold buffered data
                                                    blockLength,  // block length of the mem block in bytes.
                                                    kCFAllocatorNull, NULL,
                                                    0, // offsetToData
                                                    blockLength,   // dataLength of relevant bytes, starting at offsetToData
                                                    0, &blockBuffer);

//        NSLog(@"\t\t BlockBufferCreation: \t %@", (status == kCMBlockBufferNoErr) ? @"successful!" : @"failed...");
    }

    // NALU type 1 is non-IDR (or PFrame) picture
    if (nalu_type == 1) {
        // non-IDR frames do not have an offset due to SPS and PSS, so the approach
        // is similar to the IDR frames just without the offset
//#ifdef DEBUG
        if (withSPSPPS)
        {
            blockLength = frameSize-(_spsSize + _ppsSize + startCodeIndex);
            data = malloc(blockLength);
            data = memcpy(data, &frame[0+startCodeIndex+ _spsSize + _ppsSize], blockLength);
        }
        else
        {
            blockLength = frameSize-startCodeIndex;
            data = malloc(blockLength);
            data = memcpy(data, &frame[0+startCodeIndex], blockLength);

        }
//#endif
//        blockLength = frameSize;
//        data = malloc(blockLength);
//        data = memcpy(data, &frame[0], blockLength);

        // again, replace the start header with the size of the NALU
        uint32_t dataLength32 = htonl (blockLength - 4);
        memcpy (data, &dataLength32, sizeof (uint32_t));

        status = CMBlockBufferCreateWithMemoryBlock(NULL, data,  // memoryBlock to hold data. If NULL, block will be alloc when needed
                                                    blockLength,  // overall length of the mem block in bytes
                                                    kCFAllocatorNull, NULL,
                                                    0,     // offsetToData
                                                    blockLength,  // dataLength of relevant data bytes, starting at offsetToData
                                                    0, &blockBuffer);

//        NSLog(@"\t\t BlockBufferCreation: \t %@", (status == kCMBlockBufferNoErr) ? @"successful!" : @"failed...");
    }

    // now create our sample buffer from the block buffer,
    if(status == noErr) {
        // here I'm not bothering with any timing specifics since in my case we displayed all frames immediately
        const size_t sampleSize = blockLength;

        status = CMSampleBufferCreate(kCFAllocatorDefault,
                                      blockBuffer, true, NULL, NULL,
                                      _formatDesc, 1, 0, NULL, 1,
                                      &sampleSize, &sampleBuffer);

//        NSLog(@"\t\t SampleBufferCreate: \t %@", (status == noErr) ? @"successful!" : @"failed...");
    }

    if(status == noErr) {
        // set some values of the sample buffer's attachments
        CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, YES);
        CFMutableDictionaryRef dict = (CFMutableDictionaryRef)CFArrayGetValueAtIndex(attachments, 0);
        CFDictionarySetValue(dict, kCMSampleAttachmentKey_DisplayImmediately, kCFBooleanTrue);

        // either send the samplebuffer to a VTDecompressionSession or to an AVSampleBufferDisplayLayer
        [self render:sampleBuffer];
    }

    // free memory to avoid a memory leak, do the same for sps, pps and blockbuffer
    if (NULL != data) {
        free (data);
        data = NULL;
    }
    return;
}

- (void) createDecompSession {
    // make sure to destroy the old VTD session
    _decompressionSession = NULL;
    VTDecompressionOutputCallbackRecord callBackRecord;
    callBackRecord.decompressionOutputCallback = decompressionSessionDecodeFrameCallback;

    // this is necessary if you need to make calls to Objective C "self" from within in the callback method.
    callBackRecord.decompressionOutputRefCon = (__bridge void *)self;

    // you can set some desired attributes for the destination pixel buffer.  I didn't use this but you may
    // if you need to set some attributes, be sure to uncomment the dictionary in VTDecompressionSessionCreate
    /*NSDictionary *destinationImageBufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
     [NSNumber numberWithBool:YES],
     (id)kCVPixelBufferOpenGLESCompatibilityKey,
     nil];*/

    OSStatus status =  VTDecompressionSessionCreate(NULL, _formatDesc, NULL,
                                                    NULL, // (__bridge CFDictionaryRef)(destinationImageBufferAttributes)
                                                    &callBackRecord, &_decompressionSession);
    NSLog(@"Video Decompression Session Create: \t %@", (status == noErr) ? @"successful!" : @"failed...");
    if(status != noErr)
        NSLog(@"\t\t VTD ERROR type: %d", (int)status);
}

void decompressionSessionDecodeFrameCallback(void *decompressionOutputRefCon,
                                             void *sourceFrameRefCon,
                                             OSStatus status,
                                             VTDecodeInfoFlags infoFlags,
                                             CVImageBufferRef imageBuffer,
                                             CMTime presentationTimeStamp,
                                             CMTime presentationDuration) {
    if (status != noErr) {
        NSError *error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
        NSLog(@"Decompressed error: %@", error);
    }
    else {
        //NSLog(@"Decompressed sucessfully: pts: %f", CMTimeGetSeconds(presentationTimeStamp));

//        NSLog(@"Decompressed sucessfully");
        CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
        CIContext *temporaryContext = [CIContext contextWithOptions:nil];
        CGImageRef videoImage = [temporaryContext
                                 createCGImage:ciImage
                                 fromRect:CGRectMake(0, 0,
                                                     CVPixelBufferGetWidth(imageBuffer),
                                                     CVPixelBufferGetHeight(imageBuffer))];

        UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
        CGImageRelease(videoImage);

        VideoDecoder *decoder = (__bridge VideoDecoder *)decompressionOutputRefCon;
        [decoder.delegate videoDecoderImage:image];
//        [decoder renderImage:image];

    }
}

- (void)renderImage:(UIImage *)img {
    dispatch_async(dispatch_get_main_queue(), ^{
//        [self->mVideoImageView setImage:img];
        [self->_delegate videoDecoderImage:img];
    });
}

- (void)render:(CMSampleBufferRef)sampleBuffer {

     VTDecodeFrameFlags flags = kVTDecodeFrame_EnableAsynchronousDecompression;
     VTDecodeInfoFlags flagOut;
     NSDate* currentTime = [NSDate date];
     OSStatus status = VTDecompressionSessionDecodeFrame(_decompressionSession, sampleBuffer, flags,
     (void*)CFBridgingRetain(currentTime), &flagOut);

    if (noErr != status)
        NSLog(@"video decode error: %d", status);

     CFRelease(sampleBuffer);

    // if you're using AVSampleBufferDisplayLayer, you only need to use this line of code
//    if (mVideoLayer) {
//        [mVideoLayer enqueueSampleBuffer:sampleBuffer];
//    }
}

@end

Заранее спасибо

1 Ответ

0 голосов
/ 14 февраля 2020

В тех случаях, когда вы получаете ошибку 12909, убедитесь, что вы успешно можете создать CMBlockBufferCreateWithMemoryBlock, используя значения sps & pps.

В некоторых NALU (содержащих IDR) Frame), вы не получите последовательность SPS + PPS + IDR, скорее, вы можете получить последовательность SPS + PPS + SEI + IDR .

Следовательно, если вы переходите по ссылочной ссылке, которую вы упомянули выше, она может не работать в случае SPS + PPS + SEI + IDR NALU. Следовательно, из-за неудачного / отсутствия создания CMBlockBufferCreateWithMemoryBlock последующий не-IDR-кадр может не распаковываться.

В случае SEI, поскольку он не-VCL, просто выполните итерацию по нему, чтобы найти следующий запуск код, вам не обязательно делать что-либо с SEI, кроме того, что для успешной распаковки.

...