?? mov.c
字號:
{ MKTAG( 'm', 'o', 'o', 'v' ), mov_read_moov },{ MKTAG( 'm', 'v', 'h', 'd' ), mov_read_mvhd },{ MKTAG( 'S', 'M', 'I', ' ' ), mov_read_smi }, /* Sorenson extension ??? */{ MKTAG( 'a', 'l', 'a', 'c' ), mov_read_extradata }, /* alac specific atom */{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_glbl },{ MKTAG( 's', 't', 'b', 'l' ), mov_read_default },{ MKTAG( 's', 't', 'c', 'o' ), mov_read_stco },{ MKTAG( 's', 't', 's', 'c' ), mov_read_stsc },{ MKTAG( 's', 't', 's', 'd' ), mov_read_stsd }, /* sample description */{ MKTAG( 's', 't', 's', 's' ), mov_read_stss }, /* sync sample */{ MKTAG( 's', 't', 's', 'z' ), mov_read_stsz }, /* sample size */{ MKTAG( 's', 't', 't', 's' ), mov_read_stts },{ MKTAG( 't', 'k', 'h', 'd' ), mov_read_tkhd }, /* track header */{ MKTAG( 't', 'r', 'a', 'k' ), mov_read_trak },{ MKTAG( 'u', 'd', 't', 'a' ), mov_read_udta },{ MKTAG( 'w', 'a', 'v', 'e' ), mov_read_wave },{ MKTAG( 'e', 's', 'd', 's' ), mov_read_esds },{ MKTAG( 'w', 'i', 'd', 'e' ), mov_read_wide }, /* place holder */{ MKTAG( 'c', 'm', 'o', 'v' ), mov_read_cmov },{ 0, NULL }};/* XXX: is it sufficient ? */static int mov_probe(AVProbeData *p){ unsigned int offset; uint32_t tag; int score = 0; /* check file header */ offset = 0; for(;;) { /* ignore invalid offset */ if ((offset + 8) > (unsigned int)p->buf_size) return score; tag = AV_RL32(p->buf + offset + 4); switch(tag) { /* check for obvious tags */ case MKTAG( 'j', 'P', ' ', ' ' ): /* jpeg 2000 signature */ case MKTAG( 'm', 'o', 'o', 'v' ): case MKTAG( 'm', 'd', 'a', 't' ): case MKTAG( 'p', 'n', 'o', 't' ): /* detect movs with preview pics like ew.mov and april.mov */ case MKTAG( 'u', 'd', 't', 'a' ): /* Packet Video PVAuthor adds this and a lot of more junk */ return AVPROBE_SCORE_MAX; /* those are more common words, so rate then a bit less */ case MKTAG( 'e', 'd', 'i', 'w' ): /* xdcam files have reverted first tags */ case MKTAG( 'w', 'i', 'd', 'e' ): case MKTAG( 'f', 'r', 'e', 'e' ): case MKTAG( 'j', 'u', 'n', 'k' ): case MKTAG( 'p', 'i', 'c', 't' ): return AVPROBE_SCORE_MAX - 5; case MKTAG( 'f', 't', 'y', 'p' ): case MKTAG( 's', 'k', 'i', 'p' ): case MKTAG( 'u', 'u', 'i', 'd' ): offset = AV_RB32(p->buf+offset) + offset; /* if we only find those cause probedata is too small at least rate them */ score = AVPROBE_SCORE_MAX - 50; break; default: /* unrecognized tag */ return score; } } return score;}static void mov_build_index(MOVContext *mov, AVStream *st){ MOVStreamContext *sc = st->priv_data; offset_t current_offset; int64_t current_dts = 0; unsigned int stts_index = 0; unsigned int stsc_index = 0; unsigned int stss_index = 0; unsigned int i, j, k; if (sc->sample_sizes || st->codec->codec_type == CODEC_TYPE_VIDEO || sc->audio_cid == -2) { unsigned int current_sample = 0; unsigned int stts_sample = 0; unsigned int keyframe, sample_size; unsigned int distance = 0; st->nb_frames = sc->sample_count; for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first) stsc_index++; for (j = 0; j < sc->sample_to_chunk[stsc_index].count; j++) { if (current_sample >= sc->sample_count) { av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n"); goto out; } keyframe = !sc->keyframe_count || current_sample + 1 == sc->keyframes[stss_index]; if (keyframe) { distance = 0; if (stss_index + 1 < sc->keyframe_count) stss_index++; } sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample]; dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", " "size %d, distance %d, keyframe %d\n", st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe); if(sc->sample_to_chunk[stsc_index].id - 1 == sc->pseudo_stream_id) av_add_index_entry(st, current_offset, current_dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0); current_offset += sample_size; assert(sc->stts_data[stts_index].duration % sc->time_rate == 0); current_dts += sc->stts_data[stts_index].duration / sc->time_rate; distance++; stts_sample++; current_sample++; if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) { stts_sample = 0; stts_index++; } } } } else { /* read whole chunk */ unsigned int chunk_samples, chunk_size, chunk_duration; unsigned int frames = 1; for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first) stsc_index++; chunk_samples = sc->sample_to_chunk[stsc_index].count; /* get chunk size */ if (sc->sample_size > 1 || st->codec->codec_id == CODEC_ID_PCM_U8 || st->codec->codec_id == CODEC_ID_PCM_S8) chunk_size = chunk_samples * sc->sample_size; else if (sc->samples_per_frame > 0 && (chunk_samples * sc->bytes_per_frame % sc->samples_per_frame == 0)) { if (sc->samples_per_frame < 1024) chunk_size = chunk_samples * sc->bytes_per_frame / sc->samples_per_frame; else { chunk_size = sc->bytes_per_frame; frames = chunk_samples / sc->samples_per_frame; chunk_samples = sc->samples_per_frame; } } else { /* workaround to find nearest next chunk offset */ chunk_size = INT_MAX; for (j = 0; j < mov->fc->nb_streams; j++) { MOVStreamContext *msc = mov->fc->streams[j]->priv_data; for (k = msc->next_chunk; k < msc->chunk_count; k++) { if (msc->chunk_offsets[k] > current_offset && msc->chunk_offsets[k] - current_offset < chunk_size) { chunk_size = msc->chunk_offsets[k] - current_offset; msc->next_chunk = k; break; } } } /* check for last chunk */ if (chunk_size == INT_MAX) for (j = 0; j < mov->mdat_count; j++) { dprintf(mov->fc, "mdat %d, offset %"PRIx64", size %"PRId64", current offset %"PRIx64"\n", j, mov->mdat_list[j].offset, mov->mdat_list[j].size, current_offset); if (mov->mdat_list[j].offset <= current_offset && mov->mdat_list[j].offset + mov->mdat_list[j].size > current_offset) chunk_size = mov->mdat_list[j].offset + mov->mdat_list[j].size - current_offset; } assert(chunk_size != INT_MAX); for (j = 0; j < mov->fc->nb_streams; j++) { MOVStreamContext *msc = mov->fc->streams[j]->priv_data; msc->next_chunk = 0; } } for (j = 0; j < frames; j++) { av_add_index_entry(st, current_offset, current_dts, chunk_size, 0, AVINDEX_KEYFRAME); /* get chunk duration */ chunk_duration = 0; while (chunk_samples > 0) { if (chunk_samples < sc->stts_data[stts_index].count) { chunk_duration += sc->stts_data[stts_index].duration * chunk_samples; sc->stts_data[stts_index].count -= chunk_samples; break; } else { chunk_duration += sc->stts_data[stts_index].duration * chunk_samples; chunk_samples -= sc->stts_data[stts_index].count; if (stts_index + 1 < sc->stts_count) stts_index++; } } current_offset += sc->bytes_per_frame; dprintf(mov->fc, "AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", size %d, " "duration %d\n", st->index, i, current_offset, current_dts, chunk_size, chunk_duration); assert(chunk_duration % sc->time_rate == 0); current_dts += chunk_duration / sc->time_rate; } } } out: /* adjust sample count to avindex entries */ sc->sample_count = st->nb_index_entries;}static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap){ MOVContext *mov = s->priv_data; ByteIOContext *pb = s->pb; int i, err; MOV_atom_t atom = { 0, 0, 0 }; mov->fc = s; if(!url_is_streamed(pb)) /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */ atom.size = url_fsize(pb); else atom.size = INT64_MAX; /* check MOV header */ err = mov_read_default(mov, pb, atom); if (err<0 || (!mov->found_moov && !mov->found_mdat)) { av_log(s, AV_LOG_ERROR, "mov: header not found !!! (err:%d, moov:%d, mdat:%d) pos:%"PRId64"\n", err, mov->found_moov, mov->found_mdat, url_ftell(pb)); return -1; } dprintf(mov->fc, "on_parse_exit_offset=%d\n", (int) url_ftell(pb)); for(i=0; i<s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; /* sanity checks */ if(!sc->stts_count || !sc->chunk_count || !sc->sample_to_chunk_sz || (!sc->sample_size && !sc->sample_count)){ av_log(s, AV_LOG_ERROR, "missing mandatory atoms, broken header\n"); sc->sample_count = 0; //ignore track continue; } if(!sc->time_rate) sc->time_rate=1; if(!sc->time_scale) sc->time_scale= mov->time_scale; av_set_pts_info(st, 64, sc->time_rate, sc->time_scale); if (st->codec->codec_type == CODEC_TYPE_AUDIO && sc->stts_count == 1) st->codec->frame_size = sc->stts_data[0].duration; if(st->duration != AV_NOPTS_VALUE){ assert(st->duration % sc->time_rate == 0); st->duration /= sc->time_rate; } sc->ffindex = i; mov_build_index(mov, st); } for(i=0; i<s->nb_streams; i++) { MOVStreamContext *sc = s->streams[i]->priv_data; /* Do not need those anymore. */ av_freep(&sc->chunk_offsets); av_freep(&sc->sample_to_chunk); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); } av_freep(&mov->mdat_list); return 0;}static int mov_read_packet(AVFormatContext *s, AVPacket *pkt){ MOVContext *mov = s->priv_data; MOVStreamContext *sc = 0; AVIndexEntry *sample = 0; int64_t best_dts = INT64_MAX; int i; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *msc = st->priv_data; if (st->discard != AVDISCARD_ALL && msc->current_sample < msc->sample_count) { AVIndexEntry *current_sample = &st->index_entries[msc->current_sample]; int64_t dts = av_rescale(current_sample->timestamp * (int64_t)msc->time_rate, AV_TIME_BASE, msc->time_scale); dprintf(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts); if (!sample || (url_is_streamed(s->pb) && current_sample->pos < sample->pos) || (!url_is_streamed(s->pb) && ((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) || (FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))) { sample = current_sample; best_dts = dts; sc = msc; } } } if (!sample) return -1; /* must be done just before reading, to avoid infinite loop on sample */ sc->current_sample++; if (url_fseek(s->pb, sample->pos, SEEK_SET) != sample->pos) { av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n", sc->ffindex, sample->pos); return -1; } av_get_packet(s->pb, pkt, sample->size);#ifdef CONFIG_DV_DEMUXER if (mov->dv_demux && sc->dv_audio_container) { dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size); av_free(pkt->data); pkt->size = 0; if (dv_get_packet(mov->dv_demux, pkt) < 0) return -1; }#endif pkt->stream_index = sc->ffindex; pkt->dts = sample->timestamp; if (sc->ctts_data) { assert(sc->ctts_data[sc->sample_to_ctime_index].duration % sc->time_rate == 0); pkt->pts = pkt->dts + sc->ctts_data[sc->sample_to_ctime_index].duration / sc->time_rate; /* update ctts context */ sc->sample_to_ctime_sample++; if (sc->sample_to_ctime_index < sc->ctts_count && sc->ctts_data[sc->sample_to_ctime_index].count == sc->sample_to_ctime_sample) { sc->sample_to_ctime_index++; sc->sample_to_ctime_sample = 0; } } else { pkt->pts = pkt->dts; } pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0; pkt->pos = sample->pos; dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration); return 0;}static int mov_seek_stream(AVStream *st, int64_t timestamp, int flags){ MOVStreamContext *sc = st->priv_data; int sample, time_sample; int i; sample = av_index_search_timestamp(st, timestamp, flags); dprintf(st->codec, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample); if (sample < 0) /* not sure what to do */ return -1; sc->current_sample = sample; dprintf(st->codec, "stream %d, found sample %d\n", st->index, sc->current_sample); /* adjust ctts index */ if (sc->ctts_data) { time_sample = 0; for (i = 0; i < sc->ctts_count; i++) { int next = time_sample + sc->ctts_data[i].count; if (next > sc->current_sample) { sc->sample_to_ctime_index = i; sc->sample_to_ctime_sample = sc->current_sample - time_sample; break; } time_sample = next; } } return sample;}static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags){ AVStream *st; int64_t seek_timestamp, timestamp; int sample; int i; if (stream_index >= s->nb_streams) return -1; st = s->streams[stream_index]; sample = mov_seek_stream(st, sample_time, flags); if (sample < 0) return -1; /* adjust seek timestamp to found sample timestamp */ seek_timestamp = st->index_entries[sample].timestamp; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (stream_index == i || st->discard == AVDISCARD_ALL) continue; timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base); mov_seek_stream(st, timestamp, flags); } return 0;}static int mov_read_close(AVFormatContext *s){ int i; MOVContext *mov = s->priv_data; for(i=0; i<s->nb_streams; i++) { MOVStreamContext *sc = s->streams[i]->priv_data; av_freep(&sc->ctts_data); } if(mov->dv_demux){ for(i=0; i<mov->dv_fctx->nb_streams; i++){ av_freep(&mov->dv_fctx->streams[i]->codec); av_freep(&mov->dv_fctx->streams[i]); } av_freep(&mov->dv_fctx); av_freep(&mov->dv_demux); } return 0;}AVInputFormat mov_demuxer = { "mov,mp4,m4a,3gp,3g2,mj2", "QuickTime/MPEG4/Motion JPEG 2000 format", sizeof(MOVContext), mov_probe, mov_read_header, mov_read_packet, mov_read_close, mov_read_seek,};
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -