Android 使用librtmp推流【音视频传输】

1.通过jni调用librtmp

下面是jni中提供给java调用的本地方法:

public final class PublishJni {
    static {
        System.loadLibrary("publish");
    }

    static native long init(String url, int w, int h, int timeOut);

    static native int sendSpsAndPps(long cptr, byte[] sps, int spsLen, byte[] pps,
                                    int ppsLen, long timestamp);

    static native int sendVideoData(long cptr, byte[] data, int len, long timestamp);

    static native int sendAacSpec(long cptr, byte[] data, int len);

    static native int sendAacData(long cptr, byte[] data, int len, long timestamp);

    static native int stop(long cptr);

}


publish.so包中导出的调用接口

#include 
#include 
#include "Rtmp.h"

#ifdef __cplusplus
extern "C" {
#endif

JNIEXPORT jlong JNICALL
Java_com_blueberry_media_PublishJni_init(JNIEnv *env, jclass type, jstring url_, jint w, jint h,
                                         jint timeOut) {

    const char *url = env->GetStringUTFChars(url_, 0);
    Rtmp *rtmp = new Rtmp();
    rtmp->init(url, w, h, timeOut);
    env->ReleaseStringUTFChars(url_, url);
    return reinterpret_cast (rtmp);
}


JNIEXPORT jint JNICALL
Java_com_blueberry_media_PublishJni_sendSpsAndPps(JNIEnv *env, jclass type, jlong cptr,
                                                  jbyteArray sps_, jint spsLen, jbyteArray pps_,
                                                  jint ppsLen, jlong timestamp) {
    jbyte *sps = env->GetByteArrayElements(sps_, NULL);
    jbyte *pps = env->GetByteArrayElements(pps_, NULL);
    Rtmp *rtmp = reinterpret_cast(cptr);
    int ret = rtmp->sendSpsAndPps((BYTE *) sps, spsLen, (BYTE *) pps, ppsLen, timestamp);

    env->ReleaseByteArrayElements(sps_, sps, 0);
    env->ReleaseByteArrayElements(pps_, pps, 0);
    return ret;
}

JNIEXPORT jint JNICALL
Java_com_blueberry_media_PublishJni_sendVideoData(JNIEnv *env, jclass type, jlong cptr,
                                                  jbyteArray data_, jint len, jlong timestamp) {
    jbyte *data = env->GetByteArrayElements(data_, NULL);
    Rtmp *rtmp = reinterpret_cast (cptr);
    int ret = rtmp->sendVideoData((BYTE *) data, len, timestamp);

    env->ReleaseByteArrayElements(data_, data, 0);

    return ret;
}


JNIEXPORT jint JNICALL
Java_com_blueberry_media_PublishJni_sendAacSpec(JNIEnv *env, jclass type, jlong cptr,
                                                jbyteArray data_, jint len) {
    jbyte *data = env->GetByteArrayElements(data_, NULL);

    Rtmp *rtmp = reinterpret_cast (cptr);
    int ret = rtmp->sendAacSpec((BYTE *) data, len);

    env->ReleaseByteArrayElements(data_, data, 0);
    return ret;
}

JNIEXPORT jint JNICALL
Java_com_blueberry_media_PublishJni_sendAacData(JNIEnv *env, jclass type, jlong cptr,
                                                jbyteArray data_, jint len, jlong timestamp) {
    jbyte *data = env->GetByteArrayElements(data_, NULL);

    Rtmp *rtmp = reinterpret_cast (cptr);
    int ret = rtmp->sendAacData((BYTE *) data, len, timestamp);

    env->ReleaseByteArrayElements(data_, data, 0);
    return ret;
}

JNIEXPORT jint JNICALL
Java_com_blueberry_media_PublishJni_stop(JNIEnv *env, jclass type, jlong cptr) {
    Rtmp *rtmp = reinterpret_cast (cptr);
    delete rtmp;
    return 0;
}

#ifdef __cplusplus
}
#endif

Rtmp具体接口实现:

#include "Rtmp.h"

int Rtmp::init(std::string url, int w, int h, int timeOut) {

    RTMP_LogSetLevel(RTMP_LOGDEBUG);
    rtmp = RTMP_Alloc();
    RTMP_Init(rtmp);
    LOGI("time out = %d",timeOut);
    rtmp->Link.timeout = timeOut;
    RTMP_SetupURL(rtmp, (char *) url.c_str());
    RTMP_EnableWrite(rtmp);

    if (!RTMP_Connect(rtmp, NULL) ) {
        LOGI("RTMP_Connect error");
        return -1;
    }
    LOGI("RTMP_Connect success.");

    if (!RTMP_ConnectStream(rtmp, 0)) {
        LOGI("RTMP_ConnectStream error");
        return -1;
    }

    LOGI("RTMP_ConnectStream success.");
    return 0;
}

int Rtmp::sendSpsAndPps(BYTE *sps, int spsLen, BYTE *pps, int ppsLen, long timestamp) {

    int i;
    RTMPPacket *packet = (RTMPPacket *) malloc(RTMP_HEAD_SIZE + 1024);
    memset(packet, 0, RTMP_HEAD_SIZE);
    packet->m_body = (char *) packet + RTMP_HEAD_SIZE;
    BYTE *body = (BYTE *) packet->m_body;

    //VideoTagHeader[TagType==9]
    //Frame Type 4bits | CodecID 4bits | AVCPacketType 8bit | CompositionTime 24bits|
    //A)Frame Type, Type of video frame. The following values are defined:
    //1 = key frame (for AVC, a seekable frame)
    //2 = inter frame (for AVC, a non-seekable frame)
    //3 = disposable inter frame (H.263 only)
    //4 = generated key frame (reserved for server use only)
    //5 = video info/command frame

    //B)CodecID, Codec Identifier. The following values are defined:
    //2 = Sorenson H.263
    //3 = Screen video
    //4 = On2 VP6
    //5 = On2 VP6 with alpha channel
    //6 = Screen video version 2
    //7 = AVC

    //C)AVCPacketType, IF CodecID == 7:
    //0 = AVC sequence header
    //1 = AVC NALU
    //2 = AVC end of sequence (lower level NALU sequence ender is not required or supported)

    //D)CompositionTime, IF CodecID == 7:
    //IF AVCPacketType == 1
    //Composition time offset
    //ELSE
    //0
    //See ISO 14496-12, 8.15.3 for an explanation of composition
    //times. The offset in an FLV file is always in milliseconds.

    i = 0;
    body[i++] = 0x17; //1:keyframe(I帧) 7:AVC
    body[i++] = 0x00; //AVC packet type:AVC sequence header 

    body[i++] = 0x00; //composition time:3字节,AVC时无意义,全为0 
    body[i++] = 0x00;
    body[i++] = 0x00; //fill in 0

    /*AVCDecoderConfigurationRecord*/
    body[i++] = 0x01;   //configurationVersion  1(byte)    版本
    body[i++] = sps[1]; //AVCProfileIndecation  1(byte)    sps[1]
    body[i++] = sps[2]; //profile_compatibilty  1(byte)    sps[2]
    body[i++] = sps[3]; //AVCLevelIndication    1(byte)    sps[3]
    body[i++] = 0xff;   //lengthSizeMinusOne    1(byte)    FLV中NALU包长数据所使用的字节数,包长= (lengthSizeMinusOne & 3) + 1

    /*SPS*/
    body[i++] = 0xe1;                  //numOfSequenceParameterSets    1(byte)    SPS个数,通常为0xe1 个数= numOfSequenceParameterSets & 01F
    body[i++] = (spsLen >> 8) & 0xff;  //sequenceParameterSetLength    2(byte)    SPS长度
    body[i++] = spsLen & 0xff;         
    /*sps data*/
    memcpy(&body[i], sps, spsLen);     //sequenceParameterSetNALUnits  x(byte)    SPS内容

    i += spsLen;

    /*PPS*/
    body[i++] = 0x01;                   //numOfPictureParameterSets  1(byte)         PPS个数,通常为0x01
    /*sps data length*/
    body[i++] = (ppsLen >> 8) & 0xff;   //pictureParameterSetLength  2(byte)         PPS长度
    body[i++] = ppsLen & 0xff;
    memcpy(&body[i], pps, ppsLen);      //sequenceParameterSetNALUnits ppslen(byte)  PPS内容
    i += ppsLen;

    packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
    packet->m_nBodySize = i;
    packet->m_nChannel = 0x04;
    packet->m_nTimeStamp = 0;
    packet->m_hasAbsTimestamp = 0;
    packet->m_headerType = RTMP_PACKET_SIZE_MEDIUM;
    packet->m_nInfoField2 = rtmp->m_stream_id;

    /*发送*/
    if (RTMP_IsConnected(rtmp)) {
        RTMP_SendPacket(rtmp, packet, TRUE);
    }
    free(packet);
    return 0;
}

int Rtmp::sendVideoData(BYTE *buf, int len, long timestamp) {
    int type;

    //sps 与 pps 的帧界定符都是 00 00 00 01,而普通帧可能是 00 00 00 01 也有可能 00 00 01
    /*去掉帧界定符*/
    if (buf[2] == 0x00) {//startcode:/*00 00 00 01*/ 
        buf += 4;
        len -= 4;
    } else if (buf[2] == 0x01) {//startcode:/*00 00 01*/ 
        buf += 3;
        len - 3;
    }

    type = buf[0] & 0x1f; //1bit:forbidden,2bit:priority,5bit:type

    RTMPPacket *packet = (RTMPPacket *) malloc(RTMP_HEAD_SIZE + len + 9);
    memset(packet, 0, RTMP_HEAD_SIZE);
    packet->m_body = (char *) packet + RTMP_HEAD_SIZE;
    packet->m_nBodySize = len + 9;


    /* send video packet*/
    BYTE *body = (BYTE *) packet->m_body;
    memset(body, 0, len + 9);

    /*key frame*/
    body[0] = 0x27;                 //2:innerframe(P帧) 7:AVC 
    if (type == NAL_SLICE_IDR) {
        body[0] = 0x17;             //1:keyframe(I帧) 7:AVC
    }

    body[1] = 0x01;    //AVC packet type:AVC NALU  /*nal unit*/
    body[2] = 0x00;    //composition time:3字节,AVC时无意义,全为0 
    body[3] = 0x00;
    body[4] = 0x00;

    body[5] = (len >> 24) & 0xff;    //NALU length:(lengthSizeMinusOne & 3) + 1字节 NALU长度 
    body[6] = (len >> 16) & 0xff;
    body[7] = (len >> 8) & 0xff;
    body[8] = (len) & 0xff;

    /*copy data*/
    memcpy(&body[9], buf, len);      //NALU Data

    packet->m_hasAbsTimestamp = 0;
    packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
    packet->m_nInfoField2 = rtmp->m_stream_id;
    packet->m_nChannel = 0x04;
    packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
    packet->m_nTimeStamp = timestamp;

    if (RTMP_IsConnected(rtmp)) {
        RTMP_SendPacket(rtmp, packet, TRUE);
    }
    free(packet);

    return 0;
}

int Rtmp::sendAacSpec(BYTE *data, int spec_len) {
    RTMPPacket *packet;
    BYTE *body;
    int len = spec_len;//spec len 是2, AAC sequence header存放的是AudioSpecificConfig结构
    packet = (RTMPPacket *) malloc(RTMP_HEAD_SIZE + len + 2);
    memset(packet, 0, RTMP_HEAD_SIZE);
    packet->m_body = (char *) packet + RTMP_HEAD_SIZE;
    body = (BYTE *) packet->m_body;

    //AudioTagHeader[TagType==8]
    //SoundFormat 4bits | SoundRate 2bits | SoundSize 1bit | SoundType 1bits| AACPacketType 8bits|
    //A)SoundFormat, 音频格式. The following values are defined:
    //0 = Linear PCM, platform endian
    //1 = ADPCM
    //2 = MP3
    //3 = Linear PCM, little endian
    //4 = Nellymoser 16 kHz mono
    //5 = Nellymoser 8 kHz mono
    //6 = Nellymoser
    //7 = G.711 A-law logarithmic PCM
    //8 = G.711 mu-law logarithmic PCM
    //9 = reserved
    //10 = AAC
    //11 = Speex
    //14 = MP3 8 kHz
    //15 = Device-specific sound
    //Formats 7, 8, 14, and 15 are reserved.
    //AAC is supported in Flash Player 9,0,115,0 and higher.
    //Speex is supported in Flash Player 10 and higher.

    //B)SoundRate, 采样率. The following values are defined:
    //0 = 5.5 kHz
    //1 = 11 kHz
    //2 = 22 kHz
    //3 = 44 kHz

    //C)SoundSize, 采样精度. Size of each audio sample. This parameter only pertains to uncompressed formats. Compressed formats always decode to 16 bits internally.
    //0 = 8-bit samples
    //1 = 16-bit samples

    //D)SoundType, 声道数. Mono or stereo sound
    //0 = Mono sound
    //1 = Stereo sound

    //E)AACPacketType, AACAUDIODATA的类型. IF SoundFormat == 10
    //只有音频格式为AAC(0x0A),AudioTagHeader中才会多出1个字节的数据AACPacketType
    //0 = AAC sequence header
    //1 = AAC raw

    //1)第一个字节af,a就是10代表的意思是AAC.
    //后四位f代表如下:
    //前2个bit的含义 抽样频率,二进制11,代表44kHZ.
    //第3个bit,采样精度, 1代表音频用16位的.
    //第4个bit代表声道 1代表Stereo.

    //2)第2个字节00, 代表AACPacketType:
    //0 = AAC sequence header,1 = AAC raw。
    //第一个音频包用0,后面的都用1
    /*AF 00 +AAC sequence header*/
    body[0] = 0xAF;    
    body[1] = 0x00;
    memcpy(&body[2], data, len);/*data 是AAC sequeuece header数据*/

    //AudioSpecificConfig(2byte)结构定义:
    //audioObjectType:5bit,表示编码结构类型,AAC main编码为1,LOW低复杂度编码为2,SSR为3。
    //samplingFrequencyIndex:4bit,表示采样率。
    //0x00   96000
    //0x01   88200
    //0x02   64000
    //0x03   48000
    //0x04   44100
    //0x05   32000
    //0x06   24000
    //0x07   22050
    //0x08   16000
    //0x09   12000
    //0x0A   11025
    //0x0B     8000
    //0x0C   reserved
    //0x0D   reserved
    //0x0E   reserved
    //0x0F   escape value
    // 按理说,应该是:0 ~ 96000, 1~88200, 2~64000, 3~48000, 4~44100, 5~32000, 6~24000, 7~ 22050, 8~16000...),
    //通常aac固定选中44100,即应该对应为4,但是试验结果表明,当音频采样率小于等于44100时,应该选择3,而当音频采样率为48000时,应该选择2;
    //channelConfiguration:4bit,表示声道数。
    //frameLengthFlag:1bit
    //dependsOnCoreCoder:1bit
    //extensionFlag:1bit
    //最后3bit,固定为0

    packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
    packet->m_nBodySize = len + 2;
    packet->m_nChannel = STREAM_CHANNEL_AUDIO;
    packet->m_nTimeStamp = 0;
    packet->m_hasAbsTimestamp = 0;
    packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
    packet->m_nInfoField2 = rtmp->m_stream_id;

    if (RTMP_IsConnected(rtmp)) {
        RTMP_SendPacket(rtmp, packet, TRUE);
    }
    free(packet);

    return 0;
}

int Rtmp::sendAacData(BYTE *data, int len, long timeOffset) {
//    data += 5;
//    len += 5;
    if (len > 0) {
        RTMPPacket *packet;
        BYTE *body;
        packet = (RTMPPacket *) malloc(RTMP_HEAD_SIZE + len + 2);
        memset(packet, 0, RTMP_HEAD_SIZE);
        packet->m_body = (char *) packet + RTMP_HEAD_SIZE;
        body = (BYTE *) packet->m_body;

        /*AF 00 +AAC Raw data*/
        body[0] = 0xAF;
        body[1] = 0x01;
        memcpy(&body[2], data, len);

        packet->m_packetType = RTMP_PACKET_TYPE_AUDIO;
        packet->m_nBodySize = len + 2;
        packet->m_nChannel = STREAM_CHANNEL_AUDIO;
        packet->m_nTimeStamp = timeOffset;
        packet->m_hasAbsTimestamp = 0;
        packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
        packet->m_nInfoField2 = rtmp->m_stream_id;
        if (RTMP_IsConnected(rtmp)) {
            RTMP_SendPacket(rtmp, packet, TRUE);
        }
        LOGD("send packet body[0]=%x,body[1]=%x", body[0], body[1]);
        free(packet);

    }
    return 0;
}

int Rtmp::stop() const {
    RTMP_Close(rtmp);
    RTMP_Free(rtmp);
    return 0;
}

Rtmp::~Rtmp() { stop(); }










你可能感兴趣的:(Android,使用librtmp推流)