Android端獲取攝像頭數(shù)據(jù)有好幾種。我使用的是onPreviewFrame(byte data[],Camera camera);這個回調(diào)函數(shù)
遇到的問題:
問題1、打印了下data[]的長度,是3110400。
手機攝像頭像素是1920*1080=2073600
3110400/2073600=1.5,這個1.5就懂了。
data[]里默認存放的是YUV420SP格式的幀,有YUV三個分量。其中像素占2/3,就是Y分量,是控制整體的視頻內(nèi)容的。而U、V占剩下的1/3,是控制顏色的。所以1920*1080=2073600算出來的是Y分量。剩下的是U和V的分量。幀的width和height就是Y分量的width和height。
問題2、YUV格式有好幾種,有YUV420SP有YUV420P,等等,只看這兩種吧。

從這兩張圖里也能看出來,Y分量通常占整個幀長度的2/3。
第一張YUV420SP,下邊U和V是相互交錯著的,而YUV420P的U和V就很整齊。YUV420P這個性質很重要,我們要利用這個性質,使分離YUV分量變得容易。
問題3、改變onPreviewFrame()回調(diào)的data[]的大小,改變預覽幀data[]的格式。
直接上
- <span style="white-space:pre"> </span>Camera.Parameters parameters = mCamera.getParameters();
通過parameters來設置預覽幀的大?。ň褪歉淖儙南袼兀?,和預覽幀的格式。
- parameters.setPreviewSize(864, 480);
- parameters.setPreviewFormat(ImageFormat.YV12);
如果你不設置setPreviewFormat的話,默認回調(diào)的data[]格式是YUV420SP。CbCr就是U和V。Cb控制藍色,Cr控制紅色。直接點進源碼里看就好。

所以一定要設置一下格式為ImageFormat.YV12,就是YUV420P。ImageFormat.YV12這個是我在prameters.setPreviewFormat(),下圖里找到的。。。

最后,最重要一點:把parameters設置回去!我忘了設置,調(diào)了好久,發(fā)現(xiàn)data[]永遠是3110400大小,改了prameters.setPreviewFormat()也沒用。
- mCamera.setParameters(parameters);
Android端完了,剩下的就是底層NDK端FFMpeg的調(diào)試。
講講遇到的問題:
問題1、由于開始先VS上做的實驗,然后移植。結果導致把所有代碼都放到一個jni函數(shù)里,結果一直崩潰,原來是:因為每一幀都要傳進來壓縮,所以一直在執(zhí)行av_register_all();等等類似代碼。導致崩潰。所以要把這類初始化的過程,單獨放到一個native方法里。把AVCodecContext *pCodecCtx;這種聲明成全局變量。
問題2、把傳進來的jbyteArray轉換成jbyte *,忘了在結束的時候釋放掉,導致運行一秒鐘左右就會內(nèi)存溢出。
- <span style="font-size:18px;">jbyte *yuv420sp = (jbyte*) (*env)->GetByteArrayElements(env, yuvdata, 0);</span>
最后一定要有這句,來把內(nèi)存釋放掉
- <span style="font-size:18px;">(*env)->ReleaseByteArrayElements(env, yuvdata, yuv420sp, 0);</span>
問題3、在jni里寫本地文件,路徑為char filename_out[] = "/storage/emulated/0/yourname.h264";
JAVA
- public class MainActivity extends Activity implements Camera.PreviewCallback,
- SurfaceHolder.Callback {
- List<Size> list;
- SurfaceView mSurfaceView;
- SurfaceHolder mSurfaceHolder;
- Camera mCamera;
- TextView tv;
- Handler mHandler = new Handler() {
- public void handleMessage(android.os.Message msg) {
- switch (msg.what) {
- case 1:
- byte[] bytedata = msg.getData().getByteArray("messageyuvdata");
- if (bytedata != null) {
- // tv.setText(temp+"");
- int count = addVideoData(bytedata);
- tv.setText("length:"+length);
- }
- break;
- case 2 :
- String s = msg.getData().getString("supportFrameSize");
- tv.setText(s);
- break;
- }
- };
- };
-
- int temp = 0;
- int length = 0;
- @Override
- protected void onCreate(Bundle savedInstanceState) {
- // TODO Auto-generated method stub
- super.onCreate(savedInstanceState);
- setContentView(R.layout.activity_main);
- tv = (TextView) findViewById(R.id.tv);
- temp = FFMpegLib.getVersion();
- mSurfaceView = (SurfaceView) this.findViewById(R.id.surfaceview);
- mSurfaceHolder = mSurfaceView.getHolder();
- mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
- mSurfaceHolder.addCallback(this);
-
- }
-
- @Override
- public void onPreviewFrame(byte[] data, Camera camera) {
- length = data.length;
- Log.i("log", data + "");
- Message msg = new Message();
- Bundle bl = new Bundle();
- bl.putByteArray("messageyuvdata", data);
- msg.setData(bl);
- msg.what = 1;
- mHandler.sendMessage(msg);
- }
-
- @Override
- public void surfaceChanged(SurfaceHolder holder, int format, int width,
- int height) {
- // TODO Auto-generated method stub
- mCamera.startPreview();
- }
-
- @Override
- public void surfaceCreated(SurfaceHolder holder) {
- // TODO Auto-generated method stub
- // 打開前置攝像頭
- mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
-
- try {
- Camera.Parameters parameters = mCamera.getParameters();
- List<Integer> supportedPictureFormats = parameters
- .getSupportedPictureFormats();
- for (Integer integer : supportedPictureFormats) {
- Log.i("sun", integer + "");
- }
- list = parameters.getSupportedPreviewSizes();
- parameters.setPreviewSize(864, 480);
- parameters.setPreviewFormat(ImageFormat.YV12);
- parameters.setPreviewFpsRange(20, 20); // 每秒顯示20~30幀
- parameters.setPictureFormat(PixelFormat.JPEG); // 設置圖片格式
- //parameters.setPreviewFormat(PixelFormat.YCbCr_420_SP);
- //parameters.setFlashMode(Parameters.FLASH_MODE_TORCH);
-
- String supportFrameSize = null;
- for (int i = 0; i < list.size(); i++) {
- int width =list.get(i).width;
- int height =list.get(i).height;
- supportFrameSize = supportFrameSize+width+"-"+height+"||||||";
- }
-
-
- Message msg = new Message();
- Bundle bl = new Bundle();
- bl.putString("supportFrameSize", supportFrameSize);
- msg.setData(bl);
- msg.what = 2;
- mHandler.sendMessage(msg);
- mCamera.setParameters(parameters);
-
- } catch (Exception e) {
- e.printStackTrace();
- }
-
- // 開始預覽
- try {
- // 設置哪個surfaceView顯示圖片
- mCamera.setPreviewDisplay(mSurfaceHolder);
- } catch (IOException e) {
- e.printStackTrace();
- }
-
- // 設置預覽幀的接口,就是通過這個接口,我們來獲得預覽幀的數(shù)據(jù)的
-
- mCamera.setPreviewCallback(MainActivity.this);
- mCamera.startPreview();
- }
-
- @Override
- public void surfaceDestroyed(SurfaceHolder holder) {
- // TODO Auto-generated method stub
- mCamera.stopPreview();
- mCamera.release();
- }
-
- public synchronized int addVideoData(byte[] data) {
- int s = FFMpegLib.Encoding(data);
- return s;
- }
-
- @Override
- protected void onStart() {
- super.onStart();
- }
- @Override
- protected void onDestroy() {
- super.onDestroy();
- FFMpegLib.CloseVideo();
- }
- }
JNI
- #include <string.h>
- #include <stdio.h>
- #include <android/log.h>
- #include <stdlib.h>
- #include <jni.h>
- #include <ffmpeg/libavcodec/avcodec.h>
- #include "ffmpeg/libavformat/avformat.h"
- #include "ffmpeg/libavdevice/avdevice.h"
- #include "ffmpeg/libavutil/avutil.h"
- #include "ffmpeg/libavutil/opt.h"
- #include "ffmpeg/libavutil/imgutils.h"
- #include "ffmpeg/libavutil/log.h"
-
- #define TEST_H264 1
-
- #ifdef ANDROID
- #include <jni.h>
- #include <android/log.h>
- #define LOGE(format, ...) __android_log_print(ANDROID_LOG_ERROR, "(>_<)", format, ##__VA_ARGS__)
- #define LOGI(format, ...) __android_log_print(ANDROID_LOG_INFO, "(^_^)", format, ##__VA_ARGS__)
- #else
- #define LOGE(format, ...) printf("(>_<) " format "\n", ##__VA_ARGS__)
- #define LOGI(format, ...) printf("(^_^) " format "\n", ##__VA_ARGS__)
- #endif
-
- AVCodec *pCodec;
- AVCodecContext *pCodecCtx = NULL;
- int i, ret, got_output;
- FILE *fp_out;
- AVFrame *pFrame;
- AVPacket pkt;
- int y_size;
- int framecnt = 0;
- char filename_out[] = "/storage/emulated/0/yourname.h264";
- int in_w = 864, in_h = 480;
- int count = 0;
-
- JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_getVersion(JNIEnv *env,
- jclass jclass) {
- avcodec_register_all();
-
- pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
- if (!pCodec) {
- printf("Codec not found\n");
- return -1;
- }
- pCodecCtx = avcodec_alloc_context3(pCodec);
- if (!pCodecCtx) {
- printf("Could not allocate video codec context\n");
- return -1;
- }
- pCodecCtx->bit_rate = 400000;
- pCodecCtx->width = in_w;
- pCodecCtx->height = in_h;
- pCodecCtx->time_base.num = 1;
- pCodecCtx->time_base.den = 20;
- pCodecCtx->gop_size = 10;
- pCodecCtx->max_b_frames = 5;
- pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
-
- av_opt_set(pCodecCtx->priv_data, "preset", "superfast", 0);
- // av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);
- av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
-
-
-
- if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
- printf("Could not open codec\n");
- return -1;
- }
- if ((fp_out = fopen(filename_out, "wb")) == NULL) {
- return -1;
- }
- y_size = pCodecCtx->width * pCodecCtx->height;
-
- return 1;
- }
- JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_Encoding(JNIEnv *env,
- jclass jclass, jbyteArray yuvdata) {
-
- jbyte *yuv420sp = (jbyte*) (*env)->GetByteArrayElements(env, yuvdata, 0);
-
- // av_opt_set(pCodecCtx->priv_data, "preset", "superfast", 0);
- // av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
-
-
- pFrame = av_frame_alloc();
- if (!pFrame) {
- printf("Could not allocate video frame\n");
- return -1;
- }
- pFrame->format = pCodecCtx->pix_fmt;
- pFrame->width = pCodecCtx->width;
- pFrame->height = pCodecCtx->height;
-
- ret = av_image_alloc(pFrame->data, pFrame->linesize, pCodecCtx->width,
- pCodecCtx->height, pCodecCtx->pix_fmt, 16);
- if (ret < 0) {
- printf("Could not allocate raw picture buffer\n");
- return -1;
- }
-
- av_init_packet(&pkt);
- pkt.data = NULL; // packet data will be allocated by the encoder
- pkt.size = 0;
-
- //Read raw YUV data 這里出錯了,是按YUV_SP處理的 應該是YUV_P
- pFrame->data[0] = yuv420sp; //PCM Data
- pFrame->data[1] = yuv420sp + y_size * 5 / 4; // V
- pFrame->data[2] = yuv420sp + y_size; // U
- pFrame->pts = count;
- count++;
- /* encode the image */
- ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
- int sizee = pkt.size;
- if (ret < 0) {
- printf("Error encoding frame\n");
- return -1;
- }
- if (got_output) {
- printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
- framecnt++;
- fwrite(pkt.data, 1, pkt.size, fp_out);
- av_free_packet(&pkt);
- av_freep(&pFrame->data[0]);
- av_frame_free(&pFrame);
- //(*env)->ReleaseByteArrayElements(env, yuvdata, ydata, 0);
- // return framecnt;
- }
- //av_freep(&pFrame->data[0]);
- //av_frame_free(&pFrame);
- (*env)->ReleaseByteArrayElements(env, yuvdata, yuv420sp, 0);
- return 1;
- }
-
- JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_CloseVideo(JNIEnv *env,
- jclass jclass) {
-
- for (got_output = 1; got_output; i++) {
- ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
- if (ret < 0) {
- printf("Error encoding frame\n");
- return -1;
- }
- if (got_output) {
- printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",
- pkt.size);
- fwrite(pkt.data, 1, pkt.size, fp_out);
- av_free_packet(&pkt);
- }
- }
-
- fclose(fp_out);
- avcodec_close(pCodecCtx);
- av_free(pCodecCtx);
- av_freep(&pFrame->data[0]);
- av_frame_free(&pFrame);
- return 0;
- }
|