FFmpeg+javacpp中FFmpegFrameGrabber

FFmpeg+javacpp中FFmpegFrameGrabber

  • 1、FFmpegFrameGrabber
    • [1.1 Demo使用](#1.1 Demo使用)
    • [1.2 音频相关](#1.2 音频相关)
    • [1.3 视频相关](#1.3 视频相关)
  • 2、Frame属性
    • [2.1 视频帧属性](#2.1 视频帧属性)

    • [2.2 音频帧属性](#2.2 音频帧属性)

    • [2.3 音频视频区分](#2.3 音频视频区分)

      JavaCV 1.5.12 API
      JavaCPP Presets for FFmpeg 7.1.1-1.5.12 API

1、FFmpegFrameGrabber

org\bytedeco\javacv\FFmpegFrameGrabber.java

java 复制代码
public class FFmpegFrameGrabber extends FrameGrabber {

    public static class Exception extends FrameGrabber.Exception {
        public Exception(String message) { super(message + " (For more details, make sure FFmpegLogCallback.set() has been called.)"); }
        public Exception(String message, Throwable cause) { super(message, cause); }
    }

    public static String[] getDeviceDescriptions() throws Exception {
        tryLoad();
        throw new UnsupportedOperationException("Device enumeration not support by FFmpeg.");
    }

    public static FFmpegFrameGrabber createDefault(File deviceFile)   throws Exception { return new FFmpegFrameGrabber(deviceFile); }
    public static FFmpegFrameGrabber createDefault(String devicePath) throws Exception { return new FFmpegFrameGrabber(devicePath); }
    public static FFmpegFrameGrabber createDefault(int deviceNumber)  throws Exception { throw new Exception(FFmpegFrameGrabber.class + " does not support device numbers."); }

    private static Exception loadingException = null;
    public static void tryLoad() throws Exception {
        if (loadingException != null) {
            throw loadingException;
        } else {
            try {
                Loader.load(org.bytedeco.ffmpeg.global.avutil.class);
                Loader.load(org.bytedeco.ffmpeg.global.swresample.class);
                Loader.load(org.bytedeco.ffmpeg.global.avcodec.class);
                Loader.load(org.bytedeco.ffmpeg.global.avformat.class);
                Loader.load(org.bytedeco.ffmpeg.global.swscale.class);

                // Register all formats and codecs
                av_jni_set_java_vm(Loader.getJavaVM(), null);
//                avcodec_register_all();
//                av_register_all();
                avformat_network_init();

                Loader.load(org.bytedeco.ffmpeg.global.avdevice.class);
                avdevice_register_all();
            } catch (Throwable t) {
                if (t instanceof Exception) {
                    throw loadingException = (Exception)t;
                } else {
                    throw loadingException = new Exception("Failed to load " + FFmpegFrameGrabber.class, t);
                }
            }
        }
    }

    static {
        try {
            tryLoad();
//            FFmpegLockCallback.init();
        } catch (Exception ex) { }
    }

    public FFmpegFrameGrabber(URL url) {
        this(url.toString());
    }
    public FFmpegFrameGrabber(File file) {
        this(file.getAbsolutePath());
    }
    public FFmpegFrameGrabber(String filename) {
        this.filename = filename;
        this.pixelFormat = AV_PIX_FMT_NONE;
        this.sampleFormat = AV_SAMPLE_FMT_NONE;
    }
    /** Calls {@code FFmpegFrameGrabber(inputStream, Integer.MAX_VALUE - 8)}
     *  so that the whole input stream is seekable. */
    public FFmpegFrameGrabber(InputStream inputStream) {
        this(inputStream, Integer.MAX_VALUE - 8);
    }
    /** Set maximumSize to 0 to disable seek and minimize startup time. */
    public FFmpegFrameGrabber(InputStream inputStream, int maximumSize) {
        this.inputStream = inputStream;
        this.closeInputStream = true;
        this.pixelFormat = AV_PIX_FMT_NONE;
        this.sampleFormat = AV_SAMPLE_FMT_NONE;
        this.maximumSize = maximumSize;
    }
    public void release() throws Exception {
        synchronized (org.bytedeco.ffmpeg.global.avcodec.class) {
            releaseUnsafe();
        }
    }
    public synchronized void releaseUnsafe() throws Exception {
        started = false;

        if (plane_ptr != null && plane_ptr2 != null) {
            plane_ptr.releaseReference();
            plane_ptr2.releaseReference();
            plane_ptr = plane_ptr2 = null;
        }

        if (pkt != null) {
            if (pkt.stream_index() != -1) {
                av_packet_unref(pkt);
            }
            pkt.releaseReference();
            pkt = null;
        }

        if (default_layout != null) {
            default_layout.releaseReference();
            default_layout = null;
        }

        // Free the RGB image
        if (image_ptr != null) {
            for (int i = 0; i < image_ptr.length; i++) {
                if (imageMode != ImageMode.RAW) {
                    av_free(image_ptr[i]);
                }
            }
            image_ptr = null;
        }
        if (picture_rgb != null) {
            av_frame_free(picture_rgb);
            picture_rgb = null;
        }

        // Free the native format picture frame
        if (picture != null) {
            av_frame_free(picture);
            picture = null;
        }

        // Close the video codec
        if (video_c != null) {
            avcodec_free_context(video_c);
            video_c = null;
        }

        // Free the audio samples frame
        if (samples_frame != null) {
            av_frame_free(samples_frame);
            samples_frame = null;
        }

        // Close the audio codec
        if (audio_c != null) {
            avcodec_free_context(audio_c);
            audio_c = null;
        }

        // Close the video file
        if (inputStream == null && oc != null && !oc.isNull()) {
            avformat_close_input(oc);
            oc = null;
        }

        if (img_convert_ctx != null) {
            sws_freeContext(img_convert_ctx);
            img_convert_ctx = null;
        }

        if (samples_ptr_out != null) {
            for (int i = 0; i < samples_ptr_out.length; i++) {
                av_free(samples_ptr_out[i].position(0));
            }
            samples_ptr_out = null;
            samples_buf_out = null;
        }

        if (samples_convert_ctx != null) {
            swr_free(samples_convert_ctx);
            samples_convert_ctx.releaseReference();
            samples_convert_ctx = null;
        }

        frameGrabbed  = false;
        frame         = null;
        timestamp     = 0;
        frameNumber   = 0;

        if (inputStream != null) {
            try {
                if (oc == null) {
                    // when called a second time
                    if (closeInputStream) {
                        inputStream.close();
                    }
                } else if (maximumSize > 0) {
                    try {
                        inputStream.reset();
                    } catch (IOException ex) {
                        // "Resetting to invalid mark", give up?
                        System.err.println("Error on InputStream.reset(): " + ex);
                    }
                }
            } catch (IOException ex) {
                throw new Exception("Error on InputStream.close(): ", ex);
            } finally {
                inputStreams.remove(oc);
                if (avio != null) {
                    if (avio.buffer() != null) {
                        av_free(avio.buffer());
                        avio.buffer(null);
                    }
                    av_free(avio);
                    avio = null;
                }
                if (oc != null) {
                    avformat_close_input(oc);
                    oc = null;
                }
            }
        }
    }
    @Override protected void finalize() throws Throwable {
        super.finalize();
        release();
    }

    static Map<Pointer,InputStream> inputStreams = Collections.synchronizedMap(new HashMap<Pointer,InputStream>());

    static class ReadCallback extends Read_packet_Pointer_BytePointer_int {
        @Override public int call(Pointer opaque, BytePointer buf, int buf_size) {
            try {
                byte[] b = new byte[buf_size];
                InputStream is = inputStreams.get(opaque);
                int size = is.read(b, 0, buf_size);
                if (size < 0) {
                    return AVERROR_EOF();
                } else {
                    buf.put(b, 0, size);
                    return size;
                }
            }
            catch (Throwable t) {
                System.err.println("Error on InputStream.read(): " + t);
                return -1;
            }
        }
    }

    static class SeekCallback extends Seek_Pointer_long_int {
        @Override public long call(Pointer opaque, long offset, int whence) {
            try {
                InputStream is = inputStreams.get(opaque);
                long size = 0;
                switch (whence) {
                    case 0: is.reset(); break; // SEEK_SET
                    case 1: break;             // SEEK_CUR
                    case 2:                    // SEEK_END
                        is.reset();
                        while (true) {
                            long n = is.skip(Long.MAX_VALUE);
                            if (n == 0) break;
                            size += n;
                        }
                        offset += size;
                        is.reset();
                        break;
                    case AVSEEK_SIZE:
                        long remaining = 0;
                        while (true) {
                            long n = is.skip(Long.MAX_VALUE);
                            if (n == 0) break;
                            remaining += n;
                        }
                        is.reset();
                        while (true) {
                            long n = is.skip(Long.MAX_VALUE);
                            if (n == 0) break;
                            size += n;
                        }
                        offset = size - remaining;
                        is.reset();
                        break;
                    default: return -1;
                }
                long remaining = offset;
                while (remaining > 0) {
                    long skipped = is.skip(remaining);
                    if (skipped == 0) break; // end of the stream
                    remaining -= skipped;
                }
                return whence == AVSEEK_SIZE ? size : 0;
            } catch (Throwable t) {
                System.err.println("Error on InputStream.reset() or skip(): " + t);
                return -1;
            }
        }
    }

    static ReadCallback readCallback = new ReadCallback().retainReference();
    static SeekCallback seekCallback = new SeekCallback().retainReference();

    private InputStream     inputStream;
    private boolean         closeInputStream;
    private int             maximumSize;
    private AVIOContext     avio;
    private String          filename;
    private AVFormatContext oc;
    private AVStream        video_st, audio_st;
    private AVCodecContext  video_c, audio_c;
    private AVFrame         picture, picture_rgb;
    private BytePointer[]   image_ptr;
    private Buffer[]        image_buf;
    private AVFrame         samples_frame;
    private BytePointer[]   samples_ptr;
    private Buffer[]        samples_buf;
    private BytePointer[]   samples_ptr_out;
    private Buffer[]        samples_buf_out;
    private PointerPointer  plane_ptr, plane_ptr2;
    private AVPacket        pkt;
    private SwsContext      img_convert_ctx;
    private SwrContext      samples_convert_ctx;
    private int             samples_channels, samples_format, samples_rate;
    private boolean         frameGrabbed;
    private Frame           frame;
    private int[]           streams;
    private AVChannelLayout default_layout;

    private volatile boolean started = false;

    public boolean isCloseInputStream() {
        return closeInputStream;
    }
    public void setCloseInputStream(boolean closeInputStream) {
        this.closeInputStream = closeInputStream;
    }

    /**
     * Is there a video stream?
     * @return  {@code video_st!=null;}
     */
    public boolean hasVideo() {
        return video_st!=null;
    }

    /**
     * Is there an audio stream?
     * @return  {@code audio_st!=null;}
     */
    public boolean hasAudio() {
        return audio_st!=null;
    }

    @Override public double getGamma() {
        // default to a gamma of 2.2 for cheap Webcams, DV cameras, etc.
        if (gamma == 0.0) {
            return 2.2;
        } else {
            return gamma;
        }
    }

    @Override public String getFormat() {
        if (oc == null) {
            return super.getFormat();
        } else {
            return oc.iformat().name().getString();
        }
    }

    @Override public int getImageWidth() {
        return imageWidth > 0 || video_c == null ? super.getImageWidth() : video_c.width();
    }

    @Override public int getImageHeight() {
        return imageHeight > 0 || video_c == null ? super.getImageHeight() : video_c.height();
    }

    @Override public int getAudioChannels() {
        return audioChannels > 0 || audio_c == null ? super.getAudioChannels() : audio_c.ch_layout().nb_channels();
    }

    @Override public int getPixelFormat() {
        if (imageMode == ImageMode.COLOR || imageMode == ImageMode.GRAY) {
            if (pixelFormat == AV_PIX_FMT_NONE) {
                return imageMode == ImageMode.COLOR ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_GRAY8;
            } else {
                return pixelFormat;
            }
        } else if (video_c != null) { // RAW
            return video_c.pix_fmt();
        } else {
            return super.getPixelFormat();
        }
    }

    @Override public int getVideoCodec() {
        return video_c == null ? super.getVideoCodec() : video_c.codec_id();
    }

    @Override
    public String getVideoCodecName(){
        return  video_c == null ? super.getVideoCodecName() : video_c.codec().name().getString();
    }

    @Override public int getVideoBitrate() {
        return video_c == null ? super.getVideoBitrate() : (int)video_c.bit_rate();
    }

    @Override public double getAspectRatio() {
        if (video_st == null) {
            return super.getAspectRatio();
        } else {
            AVRational r = av_guess_sample_aspect_ratio(oc, video_st, picture);
            double a = (double)r.num() / r.den();
            return a == 0.0 ? 1.0 : a;
        }
    }

    /** Returns {@link #getVideoFrameRate()} */
    @Override public double getFrameRate() {
        return getVideoFrameRate();
    }

    /**Estimation of audio frames per second.
     *
     * Care must be taken as this method may require unnecessary call of
     * grabFrame(true, false, false, false, false) with frameGrabbed set to true.
     *
     * @return (double) getSampleRate()) / samples_frame.nb_samples()
     * if samples_frame.nb_samples() is not zero, otherwise return 0
     */
    public double getAudioFrameRate() {
        if (audio_st == null) {
            return 0.0;
        } else {
            if (samples_frame == null || samples_frame.nb_samples() == 0) {
                try {
                    grabFrame(true, false, false, false, false);
                    frameGrabbed = true;
                } catch (Exception e) {
                    return 0.0;
                }
            }
            if (samples_frame != null && samples_frame.nb_samples() != 0)
                return ((double) getSampleRate()) / samples_frame.nb_samples();
            else return 0.0;

        }
    }

    public double getVideoFrameRate() {
        if (video_st == null) {
            return super.getFrameRate();
        } else {
            AVRational r = video_st.avg_frame_rate();
            if (r.num() == 0 && r.den() == 0) {
                r = video_st.r_frame_rate();
            }
            return (double)r.num() / r.den();
        }
    }

    @Override public int getAudioCodec() {
        return audio_c == null ? super.getAudioCodec() : audio_c.codec_id();
    }

    @Override public String getAudioCodecName() {
        return audio_c == null ? super.getAudioCodecName() : audio_c.codec().name().getString();
    }

    @Override public int getAudioBitrate() {
        return audio_c == null ? super.getAudioBitrate() : (int)audio_c.bit_rate();
    }

    @Override public int getSampleFormat() {
        if (sampleMode == SampleMode.SHORT || sampleMode == SampleMode.FLOAT) {
            if (sampleFormat == AV_SAMPLE_FMT_NONE) {
                return sampleMode == SampleMode.SHORT ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT;
            } else {
                return sampleFormat;
            }
        } else if (audio_c != null) { // RAW
            return audio_c.sample_fmt();
        } else {
            return super.getSampleFormat();
        }
    }

    @Override public int getSampleRate() {
        return sampleRate > 0 || audio_c == null ? super.getSampleRate() : audio_c.sample_rate();
    }

    @Override public Map<String, String> getMetadata() {
        if (oc == null) {
            return super.getMetadata();
        }
        AVDictionaryEntry entry = null;
        Map<String, String> metadata = new HashMap<String, String>();
        while ((entry = av_dict_get(oc.metadata(), "", entry, AV_DICT_IGNORE_SUFFIX)) != null) {
            metadata.put(entry.key().getString(charset), entry.value().getString(charset));
        }
        return metadata;
    }

    @Override public Map<String, String> getVideoMetadata() {
        if (video_st == null) {
            return super.getVideoMetadata();
        }
        AVDictionaryEntry entry = null;
        Map<String, String> metadata = new HashMap<String, String>();
        while ((entry = av_dict_get(video_st.metadata(), "", entry, AV_DICT_IGNORE_SUFFIX)) != null) {
            metadata.put(entry.key().getString(charset), entry.value().getString(charset));
        }
        return metadata;
    }

    @Override public Map<String, String> getAudioMetadata() {
        if (audio_st == null) {
            return super.getAudioMetadata();
        }
        AVDictionaryEntry entry = null;
        Map<String, String> metadata = new HashMap<String, String>();
        while ((entry = av_dict_get(audio_st.metadata(), "", entry, AV_DICT_IGNORE_SUFFIX)) != null) {
            metadata.put(entry.key().getString(charset), entry.value().getString(charset));
        }
        return metadata;
    }

    @Override public String getMetadata(String key) {
        if (oc == null) {
            return super.getMetadata(key);
        }
        AVDictionaryEntry entry = av_dict_get(oc.metadata(), key, null, 0);
        return entry == null || entry.value() == null ? null : entry.value().getString(charset);
    }

    @Override public String getVideoMetadata(String key) {
        if (video_st == null) {
            return super.getVideoMetadata(key);
        }
        AVDictionaryEntry entry = av_dict_get(video_st.metadata(), key, null, 0);
        return entry == null || entry.value() == null ? null : entry.value().getString(charset);
    }

    @Override public String getAudioMetadata(String key) {
        if (audio_st == null) {
            return super.getAudioMetadata(key);
        }
        AVDictionaryEntry entry = av_dict_get(audio_st.metadata(), key, null, 0);
        return entry == null || entry.value() == null ? null : entry.value().getString(charset);
    }

    @Override public Map<String, Buffer> getVideoSideData() {
        if (video_st == null) {
            return super.getVideoSideData();
        }
        videoSideData = new HashMap<String, Buffer>();
        for (int i = 0; i < video_st.nb_side_data(); i++) {
            AVPacketSideData sd = video_st.side_data().position(i);
            String key = av_packet_side_data_name(sd.type()).getString();
            Buffer value = sd.data().capacity(sd.size()).asBuffer();
            videoSideData.put(key, value);
        }
        return videoSideData;
    }

    @Override public Buffer getVideoSideData(String key) {
        return getVideoSideData().get(key);
    }

    /** Returns the rotation in degrees from the side data of the video stream, or 0 if unknown. */
    public double getDisplayRotation() {
        ByteBuffer b = (ByteBuffer)getVideoSideData("Display Matrix");
        return b != null ? av_display_rotation_get(new IntPointer(new BytePointer(b))) : 0;
    }

    @Override public Map<String, Buffer> getAudioSideData() {
        if (audio_st == null) {
            return super.getAudioSideData();
        }
        audioSideData = new HashMap<String, Buffer>();
        for (int i = 0; i < audio_st.nb_side_data(); i++) {
            AVPacketSideData sd = audio_st.side_data().position(i);
            String key = av_packet_side_data_name(sd.type()).getString();
            Buffer value = sd.data().capacity(sd.size()).asBuffer();
            audioSideData.put(key, value);
        }
        return audioSideData;
    }

    @Override public Buffer getAudioSideData(String key) {
        return getAudioSideData().get(key);
    }

    /** default override of super.setFrameNumber implies setting
     *  of a frame close to a video frame having that number */
    @Override public void setFrameNumber(int frameNumber) throws Exception {
        if (hasVideo()) setTimestamp(Math.round((1000000L * frameNumber + 500000L)/ getFrameRate()));
        else super.frameNumber = frameNumber;
    }

    /** if there is video stream tries to seek to video frame with corresponding timestamp
     *  otherwise sets super.frameNumber only because frameRate==0 if there is no video stream */
    public void setVideoFrameNumber(int frameNumber) throws Exception {
        // best guess, AVSEEK_FLAG_FRAME has not been implemented in FFmpeg...
        if (hasVideo()) setVideoTimestamp(Math.round((1000000L * frameNumber + 500000L)/ getFrameRate()));
        else super.frameNumber = frameNumber;
    }

    /** if there is audio stream tries to seek to audio frame with corresponding timestamp
     *  ignoring otherwise */
    public void setAudioFrameNumber(int frameNumber) throws Exception {
        // best guess, AVSEEK_FLAG_FRAME has not been implemented in FFmpeg...
        if (hasAudio()) setAudioTimestamp(Math.round((1000000L * frameNumber + 500000L)/ getAudioFrameRate()));
    }

    /** setTimestamp without checking frame content (using old code used in JavaCV versions prior to 1.4.1) */
    @Override public void setTimestamp(long timestamp) throws Exception {
        setTimestamp(timestamp, false);
    }

    /** setTimestamp with possibility to select between old quick seek code or new code
     * doing check of frame content. The frame check can be useful with corrupted files, when seeking may
     * end up with an empty frame not containing video nor audio */
    public void setTimestamp(long timestamp, boolean checkFrame) throws Exception {
        setTimestamp(timestamp, checkFrame ? EnumSet.of(Frame.Type.VIDEO, Frame.Type.AUDIO) : null);
    }

    /** setTimestamp with resulting video frame type if there is a video stream.
     * This should provide precise seek to a video frame containing the requested timestamp
     * in most cases.
     * */
    public void setVideoTimestamp(long timestamp) throws Exception {
        setTimestamp(timestamp, EnumSet.of(Frame.Type.VIDEO));
    }

    /** setTimestamp with resulting audio frame type if there is an audio stream.
     * This should provide precise seek to an audio frame containing the requested timestamp
     * in most cases.
     * */
    public void setAudioTimestamp(long timestamp) throws Exception {
        setTimestamp(timestamp, EnumSet.of(Frame.Type.AUDIO));
    }

    /** setTimestamp with a priority the resulting frame should be:
     *  video (frameTypesToSeek contains only Frame.Type.VIDEO),
     *  audio (frameTypesToSeek contains only Frame.Type.AUDIO),
     *  or any (frameTypesToSeek contains both)
     */
    private synchronized void setTimestamp(long timestamp, EnumSet<Frame.Type> frameTypesToSeek) throws Exception {
        int ret;
        if (oc == null) {
            super.timestamp = timestamp;
        } else {
            timestamp = timestamp * AV_TIME_BASE / 1000000L;

            /* the stream start time */
            long ts0 = oc.start_time() != AV_NOPTS_VALUE ? oc.start_time() : 0;

            if (frameTypesToSeek != null //new code providing check of frame content while seeking to the timestamp
                    && (frameTypesToSeek.contains(Frame.Type.VIDEO) || frameTypesToSeek.contains(Frame.Type.AUDIO))
                    && (hasVideo() || hasAudio())) {

                /*     After the call of ffmpeg's avformat_seek_file(...) with the flag set to AVSEEK_FLAG_BACKWARD
                 * the decoding position should be located before the requested timestamp in a closest position
                 * from which all the active streams can be decoded successfully.
                 * The following seeking consists of two stages:
                 * 1. Grab frames till the frame corresponding to that "closest" position
                 * (the first frame containing decoded data).
                 *
                 * 2. Grab frames till the desired timestamp is reached. The number of steps is restricted
                 * by doubled estimation of frames between that "closest" position and the desired position.
                 *
                 * frameTypesToSeek parameter sets the preferred type of frames to seek.
                 * It can be chosen from three possible types: VIDEO, AUDIO or any of them.
                 * The setting means only a preference in the type. That is, if VIDEO or AUDIO is
                 * specified but the file does not have video or audio stream - any type will be used instead.
                 */

                /* Check if file contains requested streams */
                if ((frameTypesToSeek.contains(Frame.Type.VIDEO) && !hasVideo() ) ||
                        (frameTypesToSeek.contains(Frame.Type.AUDIO) && !hasAudio() ))
                    frameTypesToSeek = EnumSet.of(Frame.Type.VIDEO, Frame.Type.AUDIO);

                /*  If frameTypesToSeek is set explicitly to VIDEO or AUDIO
                 *  we need to use start time of the corresponding stream
                 *  instead of the common start time
                 */
                if (frameTypesToSeek.size()==1) {
                    if (frameTypesToSeek.contains(Frame.Type.VIDEO)) {
                        if (video_st!=null && video_st.start_time() != AV_NOPTS_VALUE) {
                            AVRational time_base = video_st.time_base();
                            ts0 = 1000000L * video_st.start_time() * time_base.num() / time_base.den();
                        }
                    }
                    else if (frameTypesToSeek.contains(Frame.Type.AUDIO)) {
                        if (audio_st!=null && audio_st.start_time() != AV_NOPTS_VALUE) {
                            AVRational time_base = audio_st.time_base();
                            ts0 = 1000000L * audio_st.start_time() * time_base.num() / time_base.den();
                        }
                    }
                }

                /*  Sometimes the ffmpeg's avformat_seek_file(...) function brings us not to a position before
                 *  the desired but few frames after. In case we need a frame-precision seek we may
                 *  try to request an earlier timestamp.
                 */
                long early_ts = timestamp;

                /* add the stream start time */
                timestamp += ts0;
                early_ts += ts0;

                long initialSeekPosition = Long.MIN_VALUE;
                long maxSeekSteps = 0;
                long count = 0;
                Frame seekFrame = null;
                do {
                    if ((ret = avformat_seek_file(oc, -1, 0L, early_ts, early_ts, AVSEEK_FLAG_BACKWARD)) < 0)
                        throw new Exception("avformat_seek_file() error " + ret + ": Could not seek file to timestamp " + timestamp + ".");
                    if (video_c != null) {
                        avcodec_flush_buffers(video_c);
                    }
                    if (audio_c != null) {
                        avcodec_flush_buffers(audio_c);
                    }
                    if (pkt.stream_index() != -1) {
                        av_packet_unref(pkt);
                        pkt.stream_index(-1);
                    }
                    seekFrame = grabFrame(frameTypesToSeek.contains(Frame.Type.AUDIO), frameTypesToSeek.contains(Frame.Type.VIDEO), false, false, false);
                    if (seekFrame == null) return;
                    initialSeekPosition = seekFrame.timestamp;
                    if(early_ts==0L) break;
                    early_ts-=500000L;
                    if(early_ts<0) early_ts=0L;
                } while (initialSeekPosition>timestamp);
                double frameDuration = 0.0;
                if (seekFrame.image != null && this.getFrameRate() > 0)
                    frameDuration =  AV_TIME_BASE / (double)getFrameRate();
                else if (seekFrame.samples != null && samples_frame != null && getSampleRate() > 0) {
                    frameDuration =  AV_TIME_BASE * samples_frame.nb_samples() / (double)getSampleRate();
                }

                if(frameDuration>0.0) {
                    maxSeekSteps = 0; //no more grab if the distance to the requested timestamp is smaller than frameDuration
                    if (timestamp - initialSeekPosition + 1 > frameDuration)  //allow for a rounding error
                              maxSeekSteps = (long)(10*(timestamp - initialSeekPosition)/frameDuration);
                }
                else if (initialSeekPosition < timestamp) maxSeekSteps = 1000;

                double delta = 0.0; //for the timestamp correction
                count = 0;
                while(count < maxSeekSteps) {
                    seekFrame = grabFrame(frameTypesToSeek.contains(Frame.Type.AUDIO), frameTypesToSeek.contains(Frame.Type.VIDEO), false, false, false);
                    if (seekFrame == null) return; //is it better to throw NullPointerException?

                    count++;
                    double ts=seekFrame.timestamp;
                    frameDuration = 0.0;
                    if (seekFrame.image != null && this.getFrameRate() > 0)
                        frameDuration =  AV_TIME_BASE / (double)getFrameRate();
                    else if (seekFrame.samples != null && samples_frame != null && getSampleRate() > 0)
                        frameDuration =  AV_TIME_BASE * samples_frame.nb_samples() / (double)getSampleRate();

                    delta = 0.0;
                    if (frameDuration>0.0) {
                        delta = (ts-ts0)/frameDuration - Math.round((ts-ts0)/frameDuration);
                        if (Math.abs(delta)>0.2) delta=0.0;
                    }
                    ts-=delta*frameDuration; // corrected timestamp
                    if (ts + frameDuration > timestamp) break;
                }
            } else { //old quick seeking code used in JavaCV versions prior to 1.4.1
                /* add the stream start time */
                timestamp += ts0;
                if ((ret = avformat_seek_file(oc, -1, Long.MIN_VALUE, timestamp, Long.MAX_VALUE, AVSEEK_FLAG_BACKWARD)) < 0) {
                    throw new Exception("avformat_seek_file() error " + ret + ": Could not seek file to timestamp " + timestamp + ".");
                }
                if (video_c != null) {
                    avcodec_flush_buffers(video_c);
                }
                if (audio_c != null) {
                    avcodec_flush_buffers(audio_c);
                }
                if (pkt.stream_index() != -1) {
                    av_packet_unref(pkt);
                    pkt.stream_index(-1);
                }
                /* comparing to timestamp +/- 1 avoids rouding issues for framerates
                which are no proper divisors of 1000000, e.g. where
                av_frame_get_best_effort_timestamp in grabFrame sets this.timestamp
                to ...666 and the given timestamp has been rounded to ...667
                (or vice versa)
                 */
                int count = 0; // prevent infinite loops with corrupted files
                while (this.timestamp > timestamp + 1 && grabFrame(true, true, false, false) != null && count++ < 1000) {
                    // flush frames if seeking backwards
                }
                count = 0;
                while (this.timestamp < timestamp - 1 && grabFrame(true, true, false, false) != null && count++ < 1000) {
                    // decode up to the desired frame
                }
            }
            frameGrabbed = true;
        }
    }

    /** Returns {@link #getLengthInVideoFrames()} */
    @Override public int getLengthInFrames() {
        // best guess...
        return getLengthInVideoFrames();
    }

    @Override public long getLengthInTime() {
        return oc.duration() * 1000000L / AV_TIME_BASE;
    }

    /** Returns {@code (int) Math.round(getLengthInTime() * getFrameRate() / 1000000L)}, which is an approximation in general. */
    public int getLengthInVideoFrames() {
        // best guess...
        return (int) Math.round(getLengthInTime() * getFrameRate() / 1000000L);
    }

    public int getLengthInAudioFrames() {
        // best guess...
        double afr = getAudioFrameRate();
        if (afr > 0) return (int) (getLengthInTime() * afr / 1000000L);
        else return 0;
    }

    public AVFormatContext getFormatContext() {
        return oc;
    }

    /** Calls {@code start(true)}. */
    @Override public void start() throws Exception {
        start(true);
    }
    /** Set findStreamInfo to false to minimize startup time, at the expense of robustness. */
    public void start(boolean findStreamInfo) throws Exception {
        synchronized (org.bytedeco.ffmpeg.global.avcodec.class) {
            startUnsafe(findStreamInfo);
        }
    }
    public void startUnsafe() throws Exception {
        startUnsafe(true);
    }
    public synchronized void startUnsafe(boolean findStreamInfo) throws Exception {
        try (PointerScope scope = new PointerScope()) {

        if (oc != null && !oc.isNull()) {
            throw new Exception("start() has already been called: Call stop() before calling start() again.");
        }

        int ret;
        img_convert_ctx = null;
        oc              = new AVFormatContext(null);
        video_c         = null;
        audio_c         = null;
        plane_ptr       = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS).retainReference();
        plane_ptr2      = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS).retainReference();
        pkt             = new AVPacket().retainReference();
        frameGrabbed    = false;
        frame           = new Frame();
        timestamp       = 0;
        frameNumber     = 0;
        default_layout  = new AVChannelLayout().retainReference();

        pkt.stream_index(-1);

        // Open video file
        AVInputFormat f = null;
        if (format != null && format.length() > 0) {
            if ((f = av_find_input_format(format)) == null) {
                throw new Exception("av_find_input_format() error: Could not find input format \"" + format + "\".");
            }
        }
        AVDictionary options = new AVDictionary(null);
        if (frameRate > 0) {
            AVRational r = av_d2q(frameRate, 1001000);
            av_dict_set(options, "framerate", r.num() + "/" + r.den(), 0);
        }
        if (pixelFormat >= 0) {
            av_dict_set(options, "pixel_format", av_get_pix_fmt_name(pixelFormat).getString(), 0);
        } else if (imageMode != ImageMode.RAW) {
            av_dict_set(options, "pixel_format", imageMode == ImageMode.COLOR ? "bgr24" : "gray8", 0);
        }
        if (imageWidth > 0 && imageHeight > 0) {
            av_dict_set(options, "video_size", imageWidth + "x" + imageHeight, 0);
        }
        if (sampleRate > 0) {
            av_dict_set(options, "sample_rate", "" + sampleRate, 0);
        }
        if (audioChannels > 0) {
            av_dict_set(options, "channels", "" + audioChannels, 0);
        }
        for (Entry<String, String> e : this.options.entrySet()) {
            av_dict_set(options, e.getKey(), e.getValue(), 0);
        }
        if (inputStream != null) {
            if (!inputStream.markSupported()) {
                inputStream = new BufferedInputStream(inputStream);
            }
            inputStream.mark(maximumSize);
            oc = avformat_alloc_context();
            avio = avio_alloc_context(new BytePointer(av_malloc(4096)), 4096, 0, oc, readCallback, null, maximumSize > 0 ? seekCallback : null);
            oc.pb(avio);

            filename = inputStream.toString();
            inputStreams.put(oc, inputStream);
        }
        if ((ret = avformat_open_input(oc, filename, f, options)) < 0) {
            av_dict_set(options, "pixel_format", null, 0);
            if ((ret = avformat_open_input(oc, filename, f, options)) < 0) {
                throw new Exception("avformat_open_input() error " + ret + ": Could not open input \"" + filename + "\". (Has setFormat() been called?)");
            }
        }
        FFmpegLogCallback.logRejectedOptions(options, "avformat_open_input");
        av_dict_free(options);

        oc.max_delay(maxDelay);

        // Retrieve stream information, if desired
        if (findStreamInfo && (ret = avformat_find_stream_info(oc, (PointerPointer)null)) < 0) {
            throw new Exception("avformat_find_stream_info() error " + ret + ": Could not find stream information.");
        }

        if (av_log_get_level() >= AV_LOG_INFO) {
            // Dump information about file onto standard error
            av_dump_format(oc, 0, filename, 0);
        }

        // Find the first stream with the user-specified disposition property
        int nb_streams = oc.nb_streams();
        for (int i = 0; i < nb_streams; i++) {
            AVStream st = oc.streams(i);
            AVCodecParameters par = st.codecpar();
            if (videoStream < 0 && par.codec_type() == AVMEDIA_TYPE_VIDEO && st.disposition() == videoDisposition) {
                videoStream = i;
            } else if (audioStream < 0 && par.codec_type() == AVMEDIA_TYPE_AUDIO && st.disposition() == audioDisposition) {
                audioStream = i;
            }
        }

        // Find the first video and audio stream, unless the user specified otherwise
        video_st = audio_st = null;
        AVCodecParameters video_par = null, audio_par = null;
        streams = new int[nb_streams];
        for (int i = 0; i < nb_streams; i++) {
            AVStream st = oc.streams(i);
            // Get a pointer to the codec context for the video or audio stream
            AVCodecParameters par = st.codecpar();
            streams[i] = par.codec_type();
            if (video_st == null && par.codec_type() == AVMEDIA_TYPE_VIDEO
                    && par.codec_id() != AV_CODEC_ID_NONE && (videoStream < 0 || videoStream == i)) {
                video_st = st;
                video_par = par;
                videoStream = i;
            } else if (audio_st == null && par.codec_type() == AVMEDIA_TYPE_AUDIO
                    && par.codec_id() != AV_CODEC_ID_NONE && (audioStream < 0 || audioStream == i)) {
                audio_st = st;
                audio_par = par;
                audioStream = i;
            }
        }
        if (video_st == null && audio_st == null) {
            throw new Exception("Did not find a video or audio stream inside \"" + filename
                    + "\" for videoStream == " + videoStream + " and audioStream == " + audioStream + ".");
        }

        if (video_st != null) {
            // Find the decoder for the video stream
            AVCodec codec = avcodec_find_decoder_by_name(videoCodecName);
            if (codec == null) {
                codec = avcodec_find_decoder(video_par.codec_id());
            }
            if (codec == null) {
                throw new Exception("avcodec_find_decoder() error: Unsupported video format or codec not found: " + video_par.codec_id() + ".");
            }

            /* Allocate a codec context for the decoder */
            if ((video_c = avcodec_alloc_context3(codec)) == null) {
                throw new Exception("avcodec_alloc_context3() error: Could not allocate video decoding context.");
            }

            /* copy the stream parameters from the muxer */
            if ((ret = avcodec_parameters_to_context(video_c, video_st.codecpar())) < 0) {
                releaseUnsafe();
                throw new Exception("avcodec_parameters_to_context() error " + ret + ": Could not copy the video stream parameters.");
            }

            options = new AVDictionary(null);
            for (Entry<String, String> e : videoOptions.entrySet()) {
                av_dict_set(options, e.getKey(), e.getValue(), 0);
            }

            // Enable multithreading when available
            video_c.thread_count(0);

            // Open video codec
            if ((ret = avcodec_open2(video_c, codec, options)) < 0) {
                throw new Exception("avcodec_open2() error " + ret + ": Could not open video codec.");
            }
            FFmpegLogCallback.logRejectedOptions(options, "avcodec_open2");
            av_dict_free(options);

            // Hack to correct wrong frame rates that seem to be generated by some codecs
            if (video_c.time_base().num() > 1000 && video_c.time_base().den() == 1) {
                video_c.time_base().den(1000);
            }

            // Allocate video frame and an AVFrame structure for the RGB image
            if ((picture = av_frame_alloc()) == null) {
                throw new Exception("av_frame_alloc() error: Could not allocate raw picture frame.");
            }
            if ((picture_rgb = av_frame_alloc()) == null) {
                throw new Exception("av_frame_alloc() error: Could not allocate RGB picture frame.");
            }

            initPictureRGB();
        }

        if (audio_st != null) {
            // Find the decoder for the audio stream
            AVCodec codec = avcodec_find_decoder_by_name(audioCodecName);
            if (codec == null) {
                codec = avcodec_find_decoder(audio_par.codec_id());
            }
            if (codec == null) {
                throw new Exception("avcodec_find_decoder() error: Unsupported audio format or codec not found: " + audio_par.codec_id() + ".");
            }

            /* Allocate a codec context for the decoder */
            if ((audio_c = avcodec_alloc_context3(codec)) == null) {
                throw new Exception("avcodec_alloc_context3() error: Could not allocate audio decoding context.");
            }

            /* copy the stream parameters from the muxer */
            if ((ret = avcodec_parameters_to_context(audio_c, audio_st.codecpar())) < 0) {
                releaseUnsafe();
                throw new Exception("avcodec_parameters_to_context() error " + ret + ": Could not copy the audio stream parameters.");
            }

            options = new AVDictionary(null);
            for (Entry<String, String> e : audioOptions.entrySet()) {
                av_dict_set(options, e.getKey(), e.getValue(), 0);
            }

            // Enable multithreading when available
            audio_c.thread_count(0);

            // Open audio codec
            if ((ret = avcodec_open2(audio_c, codec, options)) < 0) {
                throw new Exception("avcodec_open2() error " + ret + ": Could not open audio codec.");
            }
            FFmpegLogCallback.logRejectedOptions(options, "avcodec_open2");
            av_dict_free(options);

            // Allocate audio samples frame
            if ((samples_frame = av_frame_alloc()) == null) {
                throw new Exception("av_frame_alloc() error: Could not allocate audio frame.");
            }

            samples_ptr = new BytePointer[] { null };
            samples_buf = new Buffer[] { null };
        }
        started = true;

        }
    }

    private void initPictureRGB() {
        int width  = imageWidth  > 0 ? imageWidth  : video_c.width();
        int height = imageHeight > 0 ? imageHeight : video_c.height();

        switch (imageMode) {
            case COLOR:
            case GRAY:
                // If size changes I new allocation is needed -> free the old one.
                if (image_ptr != null) {
                    // First kill all references, then free it.
                    image_buf = null;
                    BytePointer[] temp = image_ptr;
                    image_ptr = null;
                    av_free(temp[0]);
                }
                int fmt = getPixelFormat();

                // work around bug in swscale: https://trac.ffmpeg.org/ticket/1031
                int align = 64;
                int stride = width;
                for (int i = 1; i <= align; i += i) {
                     stride = (width + (i - 1)) & ~(i - 1);
                     av_image_fill_linesizes(picture_rgb.linesize(), fmt, stride);
                     if ((picture_rgb.linesize(0) & (align - 1)) == 0) {
                        break;
                    }
                }

                // Determine required buffer size and allocate buffer
                int size = av_image_get_buffer_size(fmt, stride, height, 1);
                image_ptr = new BytePointer[] { new BytePointer(av_malloc(size)).capacity(size) };
                image_buf = new Buffer[] { image_ptr[0].asBuffer() };

                // Assign appropriate parts of buffer to image planes in picture_rgb
                // Note that picture_rgb is an AVFrame, but AVFrame is a superset of AVPicture
                av_image_fill_arrays(new PointerPointer(picture_rgb), picture_rgb.linesize(), image_ptr[0], fmt, stride, height, 1);
                picture_rgb.format(fmt);
                picture_rgb.width(width);
                picture_rgb.height(height);
                break;

            case RAW:
                image_ptr = new BytePointer[] { null };
                image_buf = new Buffer[] { null };
                break;

            default:
                assert false;
        }
    }

    @Override public void stop() throws Exception {
        release();
    }

    @Override public synchronized void trigger() throws Exception {
        if (oc == null || oc.isNull()) {
            throw new Exception("Could not trigger: No AVFormatContext. (Has start() been called?)");
        }
        if (pkt.stream_index() != -1) {
            av_packet_unref(pkt);
            pkt.stream_index(-1);
        }
        for (int i = 0; i < numBuffers+1; i++) {
            if (av_read_frame(oc, pkt) < 0) {
                return;
            }
            av_packet_unref(pkt);
        }
    }

    private void processImage() throws Exception {
        frame.imageWidth  = imageWidth  > 0 ? imageWidth  : video_c.width();
        frame.imageHeight = imageHeight > 0 ? imageHeight : video_c.height();
        frame.imageDepth = Frame.DEPTH_UBYTE;
        switch (imageMode) {
            case COLOR:
            case GRAY:
                // Deinterlace Picture
                if (deinterlace) {
                    throw new Exception("Cannot deinterlace: Functionality moved to FFmpegFrameFilter.");
                }

                // Has the size changed?
                if (frame.imageWidth != picture_rgb.width() || frame.imageHeight != picture_rgb.height()) {
                    initPictureRGB();
                }

                // Copy "metadata" fields
                av_frame_copy_props(picture_rgb, picture);

                // Convert the image into BGR or GRAY format that OpenCV uses
                img_convert_ctx = sws_getCachedContext(img_convert_ctx,
                        video_c.width(), video_c.height(), video_c.pix_fmt(),
                        frame.imageWidth, frame.imageHeight, getPixelFormat(),
                        imageScalingFlags != 0 ? imageScalingFlags : SWS_BILINEAR,
                        null, null, (DoublePointer)null);
                if (img_convert_ctx == null) {
                    throw new Exception("sws_getCachedContext() error: Cannot initialize the conversion context.");
                }

                // Convert the image from its native format to RGB or GRAY
                sws_scale(img_convert_ctx, new PointerPointer(picture), picture.linesize(), 0,
                        video_c.height(), new PointerPointer(picture_rgb), picture_rgb.linesize());
                frame.imageStride = picture_rgb.linesize(0);
                frame.image = image_buf;
                frame.opaque = picture_rgb;
                break;

            case RAW:
                frame.imageStride = picture.linesize(0);
                BytePointer ptr = picture.data(0);
                if (ptr != null && !ptr.equals(image_ptr[0])) {
                    image_ptr[0] = ptr.capacity(frame.imageHeight * frame.imageStride);
                    image_buf[0] = ptr.asBuffer();
                }
                frame.image = image_buf;
                frame.opaque = picture;
                break;

            default:
                assert false;
        }
        frame.image[0].limit(frame.imageHeight * frame.imageStride);
        frame.imageChannels = frame.imageStride / frame.imageWidth;
    }

    private void processSamples() throws Exception {
        int ret;

        int sample_format = samples_frame.format();
        int planes = av_sample_fmt_is_planar(sample_format) != 0 ? (int)samples_frame.ch_layout().nb_channels() : 1;
        int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.ch_layout().nb_channels(),
                samples_frame.nb_samples(), audio_c.sample_fmt(), 1) / planes;
        if (samples_buf == null || samples_buf.length != planes) {
            samples_ptr = new BytePointer[planes];
            samples_buf = new Buffer[planes];
        }
        frame.sampleRate = audio_c.sample_rate();
        frame.audioChannels = audio_c.ch_layout().nb_channels();
        frame.samples = samples_buf;
        frame.opaque = samples_frame;
        int sample_size = data_size / av_get_bytes_per_sample(sample_format);
        for (int i = 0; i < planes; i++) {
            BytePointer p = samples_frame.data(i);
            if (!p.equals(samples_ptr[i]) || samples_ptr[i].capacity() < data_size) {
                samples_ptr[i] = p.capacity(data_size);
                ByteBuffer b   = p.asBuffer();
                switch (sample_format) {
                    case AV_SAMPLE_FMT_U8:
                    case AV_SAMPLE_FMT_U8P:  samples_buf[i] = b; break;
                    case AV_SAMPLE_FMT_S16:
                    case AV_SAMPLE_FMT_S16P: samples_buf[i] = b.asShortBuffer();  break;
                    case AV_SAMPLE_FMT_S32:
                    case AV_SAMPLE_FMT_S32P: samples_buf[i] = b.asIntBuffer();    break;
                    case AV_SAMPLE_FMT_FLT:
                    case AV_SAMPLE_FMT_FLTP: samples_buf[i] = b.asFloatBuffer();  break;
                    case AV_SAMPLE_FMT_DBL:
                    case AV_SAMPLE_FMT_DBLP: samples_buf[i] = b.asDoubleBuffer(); break;
                    default: assert false;
                }
            }
            samples_buf[i].position(0).limit(sample_size);
        }

        if (audio_c.ch_layout().nb_channels() != getAudioChannels() || audio_c.sample_fmt() != getSampleFormat() || audio_c.sample_rate() != getSampleRate()) {
            if (samples_convert_ctx == null || samples_channels != getAudioChannels() || samples_format != getSampleFormat() || samples_rate != getSampleRate()) {
                if (samples_convert_ctx == null) {
                    samples_convert_ctx = new SwrContext().retainReference();
                }
                av_channel_layout_default(default_layout, getAudioChannels());
                if ((ret = swr_alloc_set_opts2(samples_convert_ctx, default_layout, getSampleFormat(), getSampleRate(),
                        audio_c.ch_layout(), audio_c.sample_fmt(), audio_c.sample_rate(), 0, null)) < 0) {
                    throw new Exception("swr_alloc_set_opts2() error " + ret + ": Cannot allocate the conversion context.");
                } else if ((ret = swr_init(samples_convert_ctx)) < 0) {
                    throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context.");
                }
                samples_channels = getAudioChannels();
                samples_format = getSampleFormat();
                samples_rate = getSampleRate();
            }

            int sample_size_in = samples_frame.nb_samples();
            int planes_out = av_sample_fmt_is_planar(samples_format) != 0 ? (int)samples_frame.ch_layout().nb_channels() : 1;
            int sample_size_out = swr_get_out_samples(samples_convert_ctx, sample_size_in);
            int sample_bytes_out = av_get_bytes_per_sample(samples_format);
            int buffer_size_out = sample_size_out * sample_bytes_out * (planes_out > 1 ? 1 : samples_channels);
            if (samples_buf_out == null || samples_buf.length != planes_out || samples_ptr_out[0].capacity() < buffer_size_out) {
                for (int i = 0; samples_ptr_out != null && i < samples_ptr_out.length; i++) {
                    av_free(samples_ptr_out[i].position(0));
                }
                samples_ptr_out = new BytePointer[planes_out];
                samples_buf_out = new Buffer[planes_out];

                for (int i = 0; i < planes_out; i++) {
                    samples_ptr_out[i] = new BytePointer(av_malloc(buffer_size_out)).capacity(buffer_size_out);
                    ByteBuffer b = samples_ptr_out[i].asBuffer();
                    switch (samples_format) {
                        case AV_SAMPLE_FMT_U8:
                        case AV_SAMPLE_FMT_U8P:  samples_buf_out[i] = b; break;
                        case AV_SAMPLE_FMT_S16:
                        case AV_SAMPLE_FMT_S16P: samples_buf_out[i] = b.asShortBuffer();  break;
                        case AV_SAMPLE_FMT_S32:
                        case AV_SAMPLE_FMT_S32P: samples_buf_out[i] = b.asIntBuffer();    break;
                        case AV_SAMPLE_FMT_FLT:
                        case AV_SAMPLE_FMT_FLTP: samples_buf_out[i] = b.asFloatBuffer();  break;
                        case AV_SAMPLE_FMT_DBL:
                        case AV_SAMPLE_FMT_DBLP: samples_buf_out[i] = b.asDoubleBuffer(); break;
                        default: assert false;
                    }
                }
            }
            frame.sampleRate = samples_rate;
            frame.audioChannels = samples_channels;
            frame.samples = samples_buf_out;

            if ((ret = swr_convert(samples_convert_ctx, plane_ptr.put(samples_ptr_out), sample_size_out, plane_ptr2.put(samples_ptr), sample_size_in)) < 0) {
                throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
            }
            for (int i = 0; i < planes_out; i++) {
                samples_ptr_out[i].position(0).limit(ret * (planes_out > 1 ? 1 : samples_channels));
                samples_buf_out[i].position(0).limit(ret * (planes_out > 1 ? 1 : samples_channels));
            }
        }
    }

    public Frame grab() throws Exception {
        return grabFrame(true, true, true, false, true);
    }
    public Frame grabImage() throws Exception {
        return grabFrame(false, true, true, false, false);
    }
    public Frame grabSamples() throws Exception {
        return grabFrame(true, false, true, false, false);
    }
    public Frame grabKeyFrame() throws Exception {
        return grabFrame(false, true, true, true, false);
    }
    public Frame grabFrame(boolean doAudio, boolean doVideo, boolean doProcessing, boolean keyFrames) throws Exception {
        return grabFrame(doAudio, doVideo, doProcessing, keyFrames, true);
    }
    public synchronized Frame grabFrame(boolean doAudio, boolean doVideo, boolean doProcessing, boolean keyFrames, boolean doData) throws Exception {
        try (PointerScope scope = new PointerScope()) {

        if (oc == null || oc.isNull()) {
            throw new Exception("Could not grab: No AVFormatContext. (Has start() been called?)");
        } else if ((!doVideo || video_st == null) && (!doAudio || audio_st == null) && !doData) {
            return null;
        }
        if (!started) {
            throw new Exception("start() was not called successfully!");
        }

        boolean videoFrameGrabbed = frameGrabbed && frame.image != null;
        boolean audioFrameGrabbed = frameGrabbed && frame.samples != null;
        boolean dataFrameGrabbed = frameGrabbed && frame.data != null;
        frameGrabbed = false;
        if (doVideo && videoFrameGrabbed) {
            if (doProcessing) {
                processImage();
            }
            frame.keyFrame = picture.key_frame() != 0;
            return frame;
        } else if (doAudio && audioFrameGrabbed) {
            if (doProcessing) {
                processSamples();
            }
            frame.keyFrame = samples_frame.key_frame() != 0;
            return frame;
        } else if (doData && dataFrameGrabbed) {
            return frame;
        }

        frame.keyFrame = false;
        frame.imageWidth = 0;
        frame.imageHeight = 0;
        frame.imageDepth = 0;
        frame.imageChannels = 0;
        frame.imageStride = 0;
        frame.image = null;
        frame.sampleRate = 0;
        frame.audioChannels = 0;
        frame.samples = null;
        frame.data = null;
        frame.opaque = null;
        frame.type = null;

        boolean done = false;
        boolean readPacket = pkt.stream_index() == -1;
        while (!done) {
            int ret = 0;
            if (readPacket) {
                if (pkt.stream_index() != -1) {
                    // Free the packet that was allocated by av_read_frame
                    av_packet_unref(pkt);
                    pkt.stream_index(-1);
                }
                if ((ret = av_read_frame(oc, pkt)) < 0) {
                    if (ret == AVERROR_EAGAIN()) {
                        try {
                            Thread.sleep(10);
                            continue;
                        } catch (InterruptedException ex) {
                            // reset interrupt to be nice
                            Thread.currentThread().interrupt();
                            return null;
                        }
                    }
                    if ((doVideo && video_st != null) || (doAudio && audio_st != null)) {
                        // The video or audio codec may have buffered some frames
                        pkt.stream_index(doVideo && video_st != null ? video_st.index() : audio_st.index());
                        pkt.flags(AV_PKT_FLAG_KEY);
                        pkt.data(null);
                        pkt.size(0);
                    } else {
                        pkt.stream_index(-1);
                        return null;
                    }
                }
            }

            frame.streamIndex = pkt.stream_index();

            // Is this a packet from the video stream?
            if (doVideo && video_st != null && frame.streamIndex == video_st.index()
                    && (!keyFrames || pkt.flags() == AV_PKT_FLAG_KEY)) {
                // Decode video frame
                if (readPacket) {
                    ret = avcodec_send_packet(video_c, pkt);
                    if (pkt.data() == null && pkt.size() == 0) {
                        pkt.stream_index(-1);
                    }
                    if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF()) {
                        // The video codec may have buffered some frames
                    } else if (ret < 0) {
                        // Ignore errors to emulate the behavior of the old API
                        // throw new Exception("avcodec_send_packet() error " + ret + ": Error sending a video packet for decoding.");
                    }
                }

                // Did we get a video frame?
                while (!done) {
                    ret = avcodec_receive_frame(video_c, picture);
                    if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF()) {
                        if (pkt.data() == null && pkt.size() == 0) {
                            pkt.stream_index(-1);
                            doVideo = false;
                            if (doAudio) {
                                readPacket = false;
                                break;
                            }
                            return null;
                        } else {
                            readPacket = true;
                            break;
                        }
                    } else if (ret < 0) {
                        // Ignore errors to emulate the behavior of the old API
                        // throw new Exception("avcodec_receive_frame() error " + ret + ": Error during video decoding.");
                        readPacket = true;
                        break;
                    }

                    if (!keyFrames || picture.pict_type() == AV_PICTURE_TYPE_I) {
                        long pts = picture.best_effort_timestamp();
                        AVRational time_base = video_st.time_base();
                        timestamp = 1000000L * pts * time_base.num() / time_base.den();
                        long ts0 = oc.start_time() != AV_NOPTS_VALUE ? oc.start_time() : 0;
                        // best guess, AVCodecContext.frame_number = number of decoded frames...
                        frameNumber = (int)Math.round((timestamp - ts0) * getFrameRate() / 1000000L);
                        frame.image = image_buf;
                        if (doProcessing) {
                            processImage();
                        }
                        /* the picture is allocated by the decoder. no need to
                           free it */
                        done = true;
                        frame.timestamp = timestamp;
                        frame.keyFrame = picture.key_frame() != 0;
                        frame.pictType = (char)av_get_picture_type_char(picture.pict_type());
                        frame.type = Frame.Type.VIDEO;
                    }
                }
            } else if (doAudio && audio_st != null && frame.streamIndex == audio_st.index()) {
                // Decode audio frame
                if (readPacket) {
                    ret = avcodec_send_packet(audio_c, pkt);
                    if (ret < 0) {
                        // Ignore errors to emulate the behavior of the old API
                        // throw new Exception("avcodec_send_packet() error " + ret + ": Error sending an audio packet for decoding.");
                    }
                }

                // Did we get an audio frame?
                while (!done) {
                    ret = avcodec_receive_frame(audio_c, samples_frame);
                    if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF()) {
                        if (pkt.data() == null && pkt.size() == 0) {
                            pkt.stream_index(-1);
                            doAudio = false;
                            return null;
                        } else {
                            readPacket = true;
                            break;
                        }
                    } else if (ret < 0) {
                        // Ignore errors to emulate the behavior of the old API
                        // throw new Exception("avcodec_receive_frame() error " + ret + ": Error during audio decoding.");
                        readPacket = true;
                        break;
                    }

                    long pts = samples_frame.best_effort_timestamp();
                    AVRational time_base = audio_st.time_base();
                    timestamp = 1000000L * pts * time_base.num() / time_base.den();
                    frame.samples = samples_buf;
                    /* if a frame has been decoded, output it */
                    if (doProcessing) {
                        processSamples();
                    }
                    done = true;
                    frame.timestamp = timestamp;
                    frame.keyFrame = samples_frame.key_frame() != 0;
                    frame.type = Frame.Type.AUDIO;
                }
            } else if (readPacket && doData
                    && frame.streamIndex > -1 && frame.streamIndex < streams.length
                    && streams[frame.streamIndex] != AVMEDIA_TYPE_VIDEO && streams[frame.streamIndex] != AVMEDIA_TYPE_AUDIO) {
                // Export the stream byte data for non audio / video frames
                frame.data = pkt.data().position(0).capacity(pkt.size()).asByteBuffer();
                frame.opaque = pkt;
                done = true;
                switch (streams[frame.streamIndex]) {
                    case AVMEDIA_TYPE_DATA: frame.type = Frame.Type.DATA; break;
                    case AVMEDIA_TYPE_SUBTITLE: frame.type = Frame.Type.SUBTITLE; break;
                    case AVMEDIA_TYPE_ATTACHMENT: frame.type = Frame.Type.ATTACHMENT; break;
                    default: frame.type = null;
                }
            } else {
                // Current packet is not needed (different stream index required)
                readPacket = true;
            }
        }
        return frame;

        }
    }

    public synchronized AVPacket grabPacket() throws Exception {
        if (oc == null || oc.isNull()) {
            throw new Exception("Could not grab: No AVFormatContext. (Has start() been called?)");
        }
        if (!started) {
            throw new Exception("start() was not called successfully!");
        }

        // Return the next frame of a stream.
        if (av_read_frame(oc, pkt) < 0) {
            return null;
        }

        return pkt;
    }
}

1.1 Demo使用

grabber.start() 初始化相关信息,grabber.stop() 释放相关对象。如AVFormatContext
grabber.grab() 获取音视频帧,关键帧frame.keyFrame,视频帧frame.image,音频帧frame.samples
FrameJavaCV 中用于表示媒体帧的通用类

简单使用:FFmpeg+javacpp中仿ffplay播放

java 复制代码
public class Demo {
    public static void main(String[] args) throws Exception {
        String url = "G:\\视频\\动漫\\长安三万里2023.mp4";
        FFmpegFrameGrabber grabber = FFmpegFrameGrabber.createDefault(url);
        grabber.start();
        Frame frame;
        int count = 0;
        while ((frame = grabber.grab()) != null) {
            XLog.d(count + "-Frame keyFrame=" + frame.keyFrame + " type: " + frame.type + ", getType:" + frame.getTypes());
            if (frame.image != null) {

            }
            if (frame.samples != null && frame.samples.length > 0) {

            }
            count++;
            if (count > 200) break;
        }
        grabber.stop();
    }
}

1.2 音频相关

方法 说明
int getAudioBitrate() 获取音频流的比特率(bitrate)信息
int getAudioChannels() 获取当前音频流的通道数量(声道数)
int getAudioCodec()
String getAudioCodecName() 获取当前音频流所使用的音频编码器(Codec)名称
double getAudioFrameRate()
Map<String,String> getAudioMetadata()
String getAudioMetadata(String key)
Map<String,Buffer> getAudioSideData()
Buffer getAudioSideData(String key)
int getLengthInAudioFrames() 获取音频流的总帧数(以音频帧为单位)
boolean hasAudio() 是否有音频流
void setAudioFrameNumber(int frameNumber)
void setAudioTimestamp(long timestamp)
int getSampleFormat() 获取音频采样格式(Sample Format)
int getSampleRate() 获取音频流的采样率(Sample Rate)
Frame grabSamples() 抓取音频采样数据(Samples);只抓取音频帧(适合纯音频处理)

1.3 视频相关

方法 说明
double getDisplayRotation() 获取视频流中指定的显示旋转角度(Display Rotation)
double getFrameRate() 获取视频流的帧率(Frame Rate); 即getVideoFrameRate()
int getLengthInFrames() 获取视频文件中视频流的总帧数;即getLengthInVideoFrames()
int getLengthInVideoFrames() 获取视频文件中视频流的总帧数
int getVideoBitrate() 获取视频流的比特率(bitrate),即单位时间内视频数据的传输速率
int getVideoCodec()
String getVideoCodecName() 获取视频流所使用的编码器名称(视频编码格式)
double getVideoFrameRate() 获取视频流的帧率(Frame Rate
Map<String,String> getVideoMetadata()
String getVideoMetadata(String key)
Map<String,Buffer> getVideoSideData()
Buffer getVideoSideData(String key)
boolean hasVideo() 判断当前媒体文件或流是否包含视频流
void setVideoFrameNumber(int frameNumber)
void setVideoTimestamp(long timestamp)

2、Frame属性

org/bytedeco/javacv/Frame.java

Frame属性 说明
int audioChannels Information associated with the samples field.
ByteBuffer data Buffer to hold a data stream associated with a frame.
static int DEPTH_BYTE Constants to be used for imageDepth.
static int DEPTH_DOUBLE Constants to be used for imageDepth.
static int DEPTH_FLOAT Constants to be used for imageDepth.
static int DEPTH_INT Constants to be used for imageDepth.
static int DEPTH_LONG Constants to be used for imageDepth.
static int DEPTH_SHORT Constants to be used for imageDepth.
static int DEPTH_UBYTE Constants to be used for imageDepth.
static int DEPTH_USHORT Constants to be used for imageDepth.
Buffer[] image Buffers to hold image pixels from multiple channels for a video frame.
int imageChannels Information associated with the image field.
int imageDepth Information associated with the image field.
int imageHeight Information associated with the image field.
int imageStride Information associated with the image field.
int imageWidth Information associated with the image field.
boolean keyFrame A flag set by a FrameGrabber or a FrameRecorder to indicate a key frame.
Object opaque The underlying data object, for example, Pointer, AVFrame, IplImage, or Mat.
char pictType The type of the image frame ('I', 'P', 'B', etc).
int sampleRate Information associated with the samples field.
Buffer[] samples Buffers to hold audio samples from multiple channels for an audio frame.
int streamIndex Stream number the audio
long timestamp 时间戳(微秒),用于音视频同步
Frame.Type type The type of the stream.

2.1 视频帧属性

Frame属性 说明
Buffer[] image 图像数据数组,每个元素是一个通道的 Buffer(如 RGB 为 3 个 Buffer)
int imageChannels 图像通道数(如:RGB = 3,RGBA = 4)
int imageDepth 图像深度,如:Frame.DEPTH_UBYTE(8位无符号整数,最常用)\ Frame.DEPTH_BYTE(8位有符号)\ Frame.DEPTH_SHORT(16位)\ Frame.DEPTH_INT(32位整数)\ Frame.DEPTH_FLOAT(32位浮点)
int imageHeight 图像高度(像素)
int imageStride 图像行步长(每行字节数),用于对齐内存
int imageWidth 图像宽度(像素)

2.2 音频帧属性

Frame属性 说明
int audioChannels 音频通道数(如:单声道 = 1,立体声 = 2)
int sampleRate 采样率(如 44100 Hz)
Buffer[] samples 音频采样数据数组,每个元素是一个通道的数据(如立体声为 2 个 Buffer)
int streamIndex 音频流索引

2.3 音频视频区分

grabber.grab() 获取音视频帧;只处理音频使用grabber.grabSamples(),只处理视频使用grabber.grabImage()

java 复制代码
if (frame.image != null) { /* 有图像 */ }
if (frame.samples != null && frame.samples.length > 0) { /* 有音频 */ }

从上面Demo来看,imagesamples 不会同时存在:

  • 如果是视频帧,image != null,samples == null。
  • 如果是音频帧,samples != null,image == null。
xml 复制代码
2025/07/27 01:28:28.668 XhBruce : 0-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.679 XhBruce : 1-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.681 XhBruce : 2-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.683 XhBruce : 3-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.685 XhBruce : 4-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.689 XhBruce : 5-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.694 XhBruce : 6-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.702 XhBruce : 7-Frame keyFrame=true type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.702 XhBruce : 8-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.704 XhBruce : 9-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.705 XhBruce : 10-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.709 XhBruce : 11-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.709 XhBruce : 12-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.711 XhBruce : 13-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.711 XhBruce : 14-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.713 XhBruce : 15-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.713 XhBruce : 16-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.716 XhBruce : 17-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.716 XhBruce : 18-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.719 XhBruce : 19-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.719 XhBruce : 20-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.721 XhBruce : 21-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.722 XhBruce : 22-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.724 XhBruce : 23-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.724 XhBruce : 24-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.727 XhBruce : 25-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.727 XhBruce : 26-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.731 XhBruce : 27-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.732 XhBruce : 28-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.734 XhBruce : 29-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.734 XhBruce : 30-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.737 XhBruce : 31-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.737 XhBruce : 32-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.739 XhBruce : 33-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.740 XhBruce : 34-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.742 XhBruce : 35-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.745 XhBruce : 36-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.745 XhBruce : 37-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.748 XhBruce : 38-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.748 XhBruce : 39-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.751 XhBruce : 40-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.751 XhBruce : 41-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.754 XhBruce : 42-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.754 XhBruce : 43-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.756 XhBruce : 44-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.757 XhBruce : 45-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.759 XhBruce : 46-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.759 XhBruce : 47-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.761 XhBruce : 48-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.761 XhBruce : 49-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.764 XhBruce : 50-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.764 XhBruce : 51-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.766 XhBruce : 52-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.766 XhBruce : 53-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.769 XhBruce : 54-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.769 XhBruce : 55-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.772 XhBruce : 56-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.772 XhBruce : 57-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.775 XhBruce : 58-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.776 XhBruce : 59-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.779 XhBruce : 60-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.780 XhBruce : 61-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.782 XhBruce : 62-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.783 XhBruce : 63-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.786 XhBruce : 64-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.787 XhBruce : 65-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.790 XhBruce : 66-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.790 XhBruce : 67-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.793 XhBruce : 68-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.795 XhBruce : 69-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.797 XhBruce : 70-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.798 XhBruce : 71-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.799 XhBruce : 72-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.800 XhBruce : 73-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.803 XhBruce : 74-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.803 XhBruce : 75-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.805 XhBruce : 76-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.806 XhBruce : 77-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.809 XhBruce : 78-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.809 XhBruce : 79-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.811 XhBruce : 80-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.812 XhBruce : 81-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.814 XhBruce : 82-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.814 XhBruce : 83-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.817 XhBruce : 84-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.818 XhBruce : 85-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.821 XhBruce : 86-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.822 XhBruce : 87-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.824 XhBruce : 88-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.824 XhBruce : 89-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.827 XhBruce : 90-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.828 XhBruce : 91-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.830 XhBruce : 92-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.832 XhBruce : 93-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.835 XhBruce : 94-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.836 XhBruce : 95-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.838 XhBruce : 96-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.839 XhBruce : 97-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.842 XhBruce : 98-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.842 XhBruce : 99-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.845 XhBruce : 100-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.846 XhBruce : 101-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.850 XhBruce : 102-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.853 XhBruce : 103-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.856 XhBruce : 104-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.856 XhBruce : 105-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.859 XhBruce : 106-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.860 XhBruce : 107-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.863 XhBruce : 108-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.863 XhBruce : 109-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.866 XhBruce : 110-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.867 XhBruce : 111-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.869 XhBruce : 112-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.870 XhBruce : 113-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.873 XhBruce : 114-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.873 XhBruce : 115-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.876 XhBruce : 116-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.876 XhBruce : 117-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.881 XhBruce : 118-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.883 XhBruce : 119-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.884 XhBruce : 120-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.887 XhBruce : 121-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.888 XhBruce : 122-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.891 XhBruce : 123-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.891 XhBruce : 124-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.893 XhBruce : 125-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.895 XhBruce : 126-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.897 XhBruce : 127-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.898 XhBruce : 128-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.901 XhBruce : 129-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.901 XhBruce : 130-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.903 XhBruce : 131-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.904 XhBruce : 132-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.906 XhBruce : 133-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.907 XhBruce : 134-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.910 XhBruce : 135-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.910 XhBruce : 136-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.912 XhBruce : 137-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.913 XhBruce : 138-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.915 XhBruce : 139-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.916 XhBruce : 140-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.918 XhBruce : 141-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.919 XhBruce : 142-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.921 XhBruce : 143-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.922 XhBruce : 144-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.924 XhBruce : 145-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.924 XhBruce : 146-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.927 XhBruce : 147-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.928 XhBruce : 148-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.931 XhBruce : 149-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.931 XhBruce : 150-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.934 XhBruce : 151-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.934 XhBruce : 152-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.936 XhBruce : 153-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.937 XhBruce : 154-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.940 XhBruce : 155-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.941 XhBruce : 156-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.943 XhBruce : 157-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.944 XhBruce : 158-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.947 XhBruce : 159-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.947 XhBruce : 160-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.949 XhBruce : 161-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.950 XhBruce : 162-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.952 XhBruce : 163-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.953 XhBruce : 164-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.956 XhBruce : 165-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.957 XhBruce : 166-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.959 XhBruce : 167-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.960 XhBruce : 168-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.962 XhBruce : 169-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.963 XhBruce : 170-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.965 XhBruce : 171-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.965 XhBruce : 172-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.967 XhBruce : 173-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.968 XhBruce : 174-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.970 XhBruce : 175-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.970 XhBruce : 176-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.973 XhBruce : 177-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.973 XhBruce : 178-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.975 XhBruce : 179-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.976 XhBruce : 180-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.978 XhBruce : 181-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.978 XhBruce : 182-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.980 XhBruce : 183-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.981 XhBruce : 184-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.983 XhBruce : 185-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.984 XhBruce : 186-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.986 XhBruce : 187-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.987 XhBruce : 188-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.989 XhBruce : 189-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.989 XhBruce : 190-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.992 XhBruce : 191-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.993 XhBruce : 192-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.995 XhBruce : 193-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.995 XhBruce : 194-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.997 XhBruce : 195-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:28.997 XhBruce : 196-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:28.999 XhBruce : 197-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:29.000 XhBruce : 198-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
2025/07/27 01:28:29.002 XhBruce : 199-Frame keyFrame=false type: VIDEO, getType:[VIDEO]
2025/07/27 01:28:29.002 XhBruce : 200-Frame keyFrame=true type: AUDIO, getType:[AUDIO]
相关推荐
老谢不老3 小时前
ffmpeg-7.1.1 下载安装 windows 版,MP4 转 m3u8 切片,遇到报错 Unrecognized option ‘vbsf‘的解决办法
ffmpeg
努力做小白9 小时前
Linux驱动20 --- FFMPEG视频API
linux·驱动开发·单片机·嵌入式硬件·ffmpeg·lvgl
肥or胖2 天前
【音视频协议篇】WebRTC 快速入门
ffmpeg·音视频·webrtc
aqi002 天前
FFmpeg开发笔记(七十八)采用Kotlin+Compose的NextPlayer播放器
android·ffmpeg·音视频·直播·流媒体
QMCY_jason2 天前
Ubuntu 1804 编译ffmpeg qsv MediaSDK libva 遇到的问题记录
linux·ubuntu·ffmpeg
is08153 天前
使用 FFmpeg 实现 RTP 音频传输与播放
ffmpeg·音视频