http://blog.csdn.net/yujun411522/article/details/46418491
本文出自:【yujun411522的博客】
service servicemanager /system/bin/servicemanager class core #class级别属于core级别 user system #用户和用户组属于system group system critical #非常重要的服务,如果一段时间重启次数过多,系统就会重启 onrestart restart zygote #如果重启则导致zygote、media重启, onrestart restart media
int main(int argc, char **argv) { struct binder_state *bs; void *svcmgr = BINDER_SERVICE_MANAGER; // 1.打开binder设备并用了IPC通信 bs = binder_open(128*1024); // 2.注册为context manager if (binder_become_context_manager(bs)) { LOGE("cannot become context manager (%s)\n", strerror(errno)); return -1; } svcmgr_handle = svcmgr; //进入死循环等待IPC数据 binder_loop(bs, svcmgr_handler); return 0; }
struct binder_state *binder_open(unsigned mapsize) { //mapsize =128*1024 ,128K //创建binder_state结构体,并分配内存 struct binder_state *bs; bs = malloc(sizeof(*bs)); if (!bs) { errno = ENOMEM; return 0; } //以读写方式打开binder设备 bs->fd = open("/dev/binder", O_RDWR); if (bs->fd < 0) { fprintf(stderr,"binder: cannot open device (%s)\n", strerror(errno)); goto fail_open; } //将文件映射到当前进程的虚拟地址空间,也就是映射到ServiceManager进程中 bs->mapsize = mapsize; bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); if (bs->mapped == MAP_FAILED) { fprintf(stderr,"binder: cannot map device (%s)\n", strerror(errno)); goto fail_map; } /* TODO: check version */ return bs; fail_map: close(bs->fd); fail_open: free(bs); return 0; }
struct binder_state { int fd;//文件描述符 void *mapped;//映射区的起始地址 unsigned mapsize;//映射区的大小 };
int binder_become_context_manager(struct binder_state *bs) { return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); }
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { //参数为 bs->fd, BINDER_SET_CONTEXT_MGR, 0 int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) return ret; mutex_lock(&binder_lock); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } //这里cmd=BINDER_SET_CONTEXT_MGR switch (cmd) { ..... case BINDER_WRITE_READ: case BINDER_SET_MAX_THREADS: case BINDER_SET_CONTEXT_MGR: case BINDER_THREAD_EXIT: //只能有一个context mananger if (binder_context_mgr_node != NULL) { ret = -EBUSY; goto err; } //context manager已经创建了,判断是否是当前线程创建 if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { ret = -EPERM; goto err; } } else //没有创建,则创建 binder_context_mgr_uid = current->cred->euid;//更新uid //创建一个context mananger node binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; } //增加引用计数 binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; break; default: ret = -EINVAL; goto err; } ret = 0; ...... }
可以看出binder_ioctl函数可以用来处理各种类型的指令:BINDER_WRITE_READ,BINDER_SET_MAX_THREADS,BINDER_SET_CONTEXT_MGR,BINDER_THREAD_EXIT,后面会涉及到。
void binder_loop(struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr; unsigned readbuf[32]; bwr.write_size = 0; bwr.write_consumed = 0; bwr.write_buffer = 0; readbuf[0] = BC_ENTER_LOOPER; //调用 binder_write binder_write(bs, readbuf, sizeof(unsigned)); for (;;) {//无限循环处理 bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (unsigned) readbuf; //再次调用ioctl,读取IPC数据 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); ... //调用biner_parse解析读取的数据 res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func); .... } }
int binder_write(struct binder_state *bs, void *data, unsigned len) { //bs,data=readbuf[0] struct binder_write_read bwr; int res; bwr.write_size = len; bwr.write_consumed = 0; bwr.write_buffer = (unsigned) data; bwr.read_size = 0;//write数据,这里read_size==0 bwr.read_consumed = 0; bwr.read_buffer = 0; 调用ioctl ,命令是BINDER_WRITE_READ res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); .. return res; }
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { ...... case BINDER_WRITE_READ: {//cmd=BINDER_WRITE_READ struct binder_write_read bwr; if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } //刚才设置的write_size>0,进入到这里 if (bwr.write_size > 0) { //调用了binder_thread_write函数 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); .. } //刚才设置的read_size=0,不会进入到这里 if (bwr.read_size > 0) { ... } if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } }由于bwr中wirtesize>0,read_size=0,所以调用binder_thread_write函数:
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) { uint32_t cmd; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { .... case BC_ENTER_LOOPER: if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID;//标记错误状态 } thread->looper |= BINDER_LOOPER_STATE_ENTERED;///标记状态为BINDER_LOOPER_STATE_ENTERED break; .... } return 0; }
进入到BC_ENTER_LOOPER,将looper标记为BINDER_LOOPER_STATE_ENTERED,标志着当前状态为Binder Looper。
binder_thread_write函数执行完之后返回到用户空间的binder_write函数,在binder_write函数执行完之后进入无限循环之中:
for (;;) {//无限循环处理 bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (unsigned) readbuf; //再次调用ioctl res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); ... res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func); .... }
2.无限循环
case BINDER_WRITE_READ: { struct binder_write_read bwr; .. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } ... if (bwr.write_size > 0) {//write_size=0,条件不成立 .... } if (bwr.read_size > 0) { //调用binder_thread_read函数 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; }
直接调用的binder_thread_read函数,该函数作用是读取IPC的数据。读取完IPC数据之后就调用biner_parse解析读取的数据
int binder_parse(struct binder_state *bs, struct binder_io *bio, uint32_t *ptr, uint32_t size, binder_handler func) { int r = 1; uint32_t *end = ptr + (size / 4); while (ptr < end) { uint32_t cmd = *ptr++; switch(cmd) { ..... case BR_TRANSACTION: { struct binder_txn *txn = (void *) ptr; if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) { LOGE("parse: txn too small!\n"); return -1; } binder_dump_txn(txn); if (func) {//func为指定的处理函数,这里是svcmgr_handler unsigned rdata[256/4]; struct binder_io msg; struct binder_io reply; int res; //初始化msg和reply数据 bio_init(&reply, rdata, sizeof(rdata), 4); bio_init_from_txn(&msg, txn); //调用处理函数func,这里就是svcmgr_handler函数来处理 res = func(bs, txn, &msg, &reply); //将上述处理结果reply发送给binder binder_send_reply(bs, &reply, txn->data, res); } ptr += sizeof(*txn) / sizeof(uint32_t); break; } ..... } } return r; }
int svcmgr_handler(struct binder_state *bs, struct binder_txn *txn, struct binder_io *msg, struct binder_io *reply) { struct svcinfo *si; uint16_t *s; unsigned len; void *ptr; uint32_t strict_policy; // LOGI("target=%p code=%d pid=%d uid=%d\n", // txn->target, txn->code, txn->sender_pid, txn->sender_euid); if (txn->target != svcmgr_handle) return -1; // Equivalent to Parcel::enforceInterface(), reading the RPC // header with the strict mode policy mask and the interface name. // Note that we ignore the strict_policy and don't propagate it // further (since we do no outbound RPCs anyway). strict_policy = bio_get_uint32(msg); s = bio_get_string16(msg, &len); if ((len != (sizeof(svcmgr_id) / 2)) || memcmp(svcmgr_id, s, sizeof(svcmgr_id))) { fprintf(stderr,"invalid id %s\n", str8(s)); return -1; } switch(txn->code) {//在添加或者查询service时会写入请求的命令 case SVC_MGR_GET_SERVICE: case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); //检测service函数 ptr = do_find_service(bs, s, len); if (!ptr) break; bio_put_ref(reply, ptr); return 0; case SVC_MGR_ADD_SERVICE: s = bio_get_string16(msg, &len); ptr = bio_get_ref(msg); //添加service函数 if (do_add_service(bs, s, len, ptr, txn->sender_euid)) return -1; break; case SVC_MGR_LIST_SERVICES: { unsigned n = bio_get_uint32(msg); si = svclist; //遍历service while ((n-- > 0) && si) si = si->next; if (si) { bio_put_string16(reply, si->name); return 0; } return -1; } default: LOGE("unknown code %d\n", txn->code); return -1; } bio_put_uint32(reply, 0); return 0; }
int do_add_service(struct binder_state *bs, uint16_t *s, unsigned len, void *ptr, unsigned uid) { struct svcinfo *si; .. //先判断当前用户是否可以注册,权限检测 if (!svc_can_register(uid, s)) { LOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n", str8(s), ptr, uid); return -1; } si = find_svc(s, len); if (si) {//已经注册了 if (si->ptr) { svcinfo_death(bs, si); } si->ptr = ptr; } else { //为新注册的服务分配内存 si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t)); if (!si) {//OOM LOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n", str8(s), ptr, uid); return -1; } si->ptr = ptr; si->len = len; memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); si->name[len] = '\0'; si->death.func = svcinfo_death; si->death.ptr = si; //新添加的服务增加到svclist中 si->next = svclist; svclist = si; } binder_acquire(bs, ptr); binder_link_to_death(bs, ptr, &si->death); return 0; }
int svc_can_register(unsigned uid, uint16_t *name) { unsigned n; if ((uid == 0) || (uid == AID_SYSTEM)) return 1; for (n = 0; n < sizeof(allowed) / sizeof(allowed[0]); n++) if ((uid == allowed[n].uid) && str16eq(name, allowed[n].name)) return 1; return 0; }
static struct { unsigned uid;//user id const char *name;//user name } allowed[] = { #ifdef LVMX { AID_MEDIA, "com.lifevibes.mx.ipc" }, #endif { AID_MEDIA, "media.audio_flinger" }, { AID_MEDIA, "media.player" }, { AID_MEDIA, "media.camera" }, { AID_MEDIA, "media.audio_policy" }, { AID_DRM, "drm.drmManager" }, { AID_NFC, "nfc" }, { AID_RADIO, "radio.phone" }, { AID_RADIO, "radio.sms" }, { AID_RADIO, "radio.phonesubinfo" }, { AID_RADIO, "radio.simphonebook" }, /* TODO: remove after phone services are updated: */ { AID_RADIO, "phone" }, { AID_RADIO, "sip" }, { AID_RADIO, "isms" }, { AID_RADIO, "iphonesubinfo" }, { AID_RADIO, "simphonebook" } }
void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len) { struct svcinfo *si; si = find_svc(s, len); if (si && si->ptr) { return si->ptr; } else { return 0; } }
struct svcinfo *find_svc(uint16_t *s16, unsigned len) { struct svcinfo *si; for (si = svclist; si; si = si->next) { if ((len == si->len) && !memcmp(s16, si->name, len * sizeof(uint16_t))) { return si; } } return 0; }
int main(int argc, char** argv) { //创建processstate对象,赋值给proc变量 sp<ProcessState> proc(ProcessState::self()); //获得servicemanager代理对象 sp<IServiceManager> sm = defaultServiceManager(); LOGI("ServiceManager: %p", sm.get()); 注册并运行下面四个服务 AudioFlinger::instantiate(); MediaPlayerService::instantiate(); CameraService::instantiate(); AudioPolicyService::instantiate(); //创建一个线程池 ProcessState::self()->startThreadPool(); //加入到线程池中 IPCThreadState::self()->joinThreadPool(); }
sp<ProcessState> ProcessState::self() { //通过单例模式,一个进程只有一个ProcessState if (gProcess != NULL) return gProcess; AutoMutex _l(gProcessMutex); if (gProcess == NULL) gProcess = new ProcessState; return gProcess; }
ProcessState::ProcessState() : mDriverFD(open_driver())//调研open_driver函数,将返回值赋给mDriverFD , mVMStart(MAP_FAILED) , mManagesContexts(false) , mBinderContextCheckFunc(NULL) , mBinderContextUserData(NULL) , mThreadPoolStarted(false) , mThreadPoolSeq(1) { if (mDriverFD >= 0) { // mmap the binder, providing a chunk of virtual address space to receive transactions. //然后调用mmap将binder映射到media service 进程之中 mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); .... }
static int open_driver() { //以读写的方式打开binder int fd = open("/dev/binder", O_RDWR); if (fd >= 0) { //如果当前进程执行exec系列函数时,关闭fd fcntl(fd, F_SETFD, FD_CLOEXEC); int vers; //发送BINDER_VERSION命令,查询版本号村version中 status_t result = ioctl(fd, BINDER_VERSION, &vers); if (result == -1) { LOGE("Binder ioctl to obtain version failed: %s", strerror(errno)); close(fd); fd = -1; } //比较版本号是否一致 if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) { LOGE("Binder driver protocol does not match user space protocol!"); close(fd); fd = -1; } size_t maxThreads = 15; //通知binder驱动,设置当前server线程池最大15 result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); if (result == -1) { LOGE("Binder ioctl to set max threads failed: %s", strerror(errno)); } } else { LOGW("Opening '/dev/binder' failed: %s\n", strerror(errno)); } return fd; }
sp<IServiceManager> defaultServiceManager() { if (gDefaultServiceManager != NULL) return gDefaultServiceManager; { //也是单例模式获取gDefaultServiceManager AutoMutex _l(gDefaultServiceManagerLock); if (gDefaultServiceManager == NULL) { 先调用getContextObject,再将结果传递到interface_case函数中 gDefaultServiceManager = interface_cast<IServiceManager>( ProcessState::self()->getContextObject(NULL)); } } return gDefaultServiceManager; }
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller) { return getStrongProxyForHandle(0); }
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle) { //handle=0 sp<IBinder> result; AutoMutex _l(mLock); handle_entry* e = lookupHandleLocked(handle); if (e != NULL) { IBinder* b = e->binder; if (b == NULL || !e->refs->attemptIncWeak(this)) { // b=new BpBinder(0); b = new BpBinder(handle); e->binder = b; if (b) e->refs = b->getWeakRefs(); result = b; } else { result.force_set(b); e->refs->decWeak(this); } } return result; }
BpBinder::BpBinder(int32_t handle) : mHandle(handle) , mAlive(1) , mObitsSent(0) , mObituaries(NULL) { LOGV("Creating BpBinder %p handle %d\n", this, mHandle); extendObjectLifetime(OBJECT_LIFETIME_WEAK); IPCThreadState::self()->incWeakHandle(handle); }
IPCThreadState* IPCThreadState::self() { if (gHaveTLS) { restart: const pthread_key_t k = gTLS; IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);//这里面有get,那么在哪里进行的set呢?就是在构造函数中 if (st) return st; return new IPCThreadState;//创建一个IPCThreadState对象,保证当前进程只有一个IPCThreadState } if (gShutdown) return NULL; pthread_mutex_lock(&gTLSMutex); if (!gHaveTLS) { if (pthread_key_create(&gTLS, threadDestructor) != 0) { pthread_mutex_unlock(&gTLSMutex); return NULL; } gHaveTLS = true; } pthread_mutex_unlock(&gTLSMutex); goto restart; }
IPCThreadState::IPCThreadState() : mProcess(ProcessState::self()),//将ProcessState变量赋值为mProcess mMyThreadId(androidGetTid()), mStrictModePolicy(0), mLastTransactionBinderFlags(0) { pthread_setspecific(gTLS, this);//设置gTLS为当前对象 clearCaller(); mIn.setDataCapacity(256);//mIn 、mOut 都是parcel类型,设置大小 mOut.setDataCapacity(256); }
void IPCThreadState::incWeakHandle(int32_t handle)//handle =0 { //向mOut写入BC_INCREFS 和0 mOut.writeInt32(BC_INCREFS); mOut.writeInt32(handle); }
sp<IInterface> IBinder::queryLocalInterface(const String16& descriptor) { return NULL; }
BpServiceManager(const sp<IBinder>& impl) : BpInterface<IServiceManager>(impl)//impl =new BpBinder(0),调用父类BpInterface 否则函数 { }
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote) : BpRefBase(remote)//调用了父类BpRefBase 构造函数 { }
class BpRefBase : public virtual RefBase { inline IBinder* remote() { return mRemote; } inline IBinder* remote() const { return mRemote; } private: BpRefBase(const BpRefBase& o); BpRefBase& operator=(const BpRefBase& o); IBinder* const mRemote; }
static status_t publish() { sp<IServiceManager> sm(defaultServiceManager()); return sm->addService(String16(SERVICE::getServiceName()), new SERVICE()); }
virtual status_t addService(const String16& name, const sp<IBinder>& service) { //参数media.audio_flinger,new AudioFlinger Parcel data, reply; 将service信息写入到data中 data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor()); data.writeString16(name); data.writeStrongBinder(service); //获取BpBinder对象来调用transact 方法,将结果返回值reply status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply); return err == NO_ERROR ? reply.readExceptionCode() : err; }
status_t BpBinder::transact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { // Once a binder has died, it will never come back to life. if (mAlive) { status_t status = IPCThreadState::self()->transact( mHandle, code, data, reply, flags); if (status == DEAD_OBJECT) mAlive = 0; return status; } return DEAD_OBJECT; }
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { status_t err = data.errorCheck(); flags |= TF_ACCEPT_FDS; if (err == NO_ERROR) { //先调用writeTransactionData方法处理数据 err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL); } if (err != NO_ERROR) { if (reply) reply->setError(err); return (mLastError = err); } if ((flags & TF_ONE_WAY) == 0) { if (reply) { //等待应答 err = waitForResponse(reply); } else { Parcel fakeReply; err = waitForResponse(&fakeReply); } } else { err = waitForResponse(NULL, NULL); } return err; }
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer) { //参数BC_TRANSACTION,TF_ACCEPT_FDS, 0,ADD_SERVICE_TRANSACTION , data, NULL binder_transaction_data tr;//Binder需要的数据 tr.target.handle = handle;//0 tr.code = code;//ADD_SERVICE_TRANSACTION tr.flags = binderFlags;//TF_ACCEPT_FDS tr.cookie = 0; tr.sender_pid = 0; tr.sender_euid = 0; const status_t err = data.errorCheck();//检测错误 if (err == NO_ERROR) {//没有错误,设置tr的内容 tr.data_size = data.ipcDataSize(); tr.data.ptr.buffer = data.ipcData(); tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t); tr.data.ptr.offsets = data.ipcObjects(); } else if (statusBuffer) { ... } else { return (mLastError = err); } //想mOut中写入BC_TRANSACTION 和tr mOut.writeInt32(cmd); mOut.write(&tr, sizeof(tr)); return NO_ERROR; }
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult) { int32_t cmd; int32_t err; while (1) { if ((err=talkWithDriver()) < NO_ERROR) break;//先调用talkWithDriver err = mIn.errorCheck(); if (err < NO_ERROR) break;//有错误直接退出 if (mIn.dataAvail() == 0) continue;//如果mIn中Binder Return字段为null,继续读取。 cmd = mIn.readInt32();//读取是哪一种命令,这里是BC_TRANSACTION ,没有匹配的,进入default switch (cmd) { case BR_TRANSACTION_COMPLETE: .. case BR_DEAD_REPLY: .. case BR_FAILED_REPLY: .. case BR_ACQUIRE_RESULT: .. case BR_REPLY: .. default: err = executeCommand(cmd);//执行executeCommand if (err != NO_ERROR) goto finish; break; } } finish: if (err != NO_ERROR) { if (acquireResult) *acquireResult = err; if (reply) reply->setError(err); mLastError = err; } return err; }
status_t IPCThreadState::talkWithDriver(bool doReceive) { binder_write_read bwr;//就是指令携带的数据 //dataSize表示当前存储了多少个字节,dataPosition表示读取的位置,如果dataPosition>dataSize //说明已经处理完上次的数据 const bool needRead = mIn.dataPosition() >= mIn.dataSize(); const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0; bwr.write_size = outAvail; bwr.write_buffer = (long unsigned int)mOut.data(); // This is what we'll read. if (doReceive && needRead) { bwr.read_size = mIn.dataCapacity();//设置读取信息大小地址 bwr.read_buffer = (long unsigned int)mIn.data(); } else { bwr.read_size = 0; } // Return immediately if there is nothing to do. if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR; bwr.write_consumed = 0; bwr.read_consumed = 0; status_t err; #if defined(HAVE_ANDROID_OS) 向binder_open发送BINDER_WRITE_READ命令 if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) err = NO_ERROR; else err = -errno; #else err = INVALID_OPERATION; #endif } while (err == -EINTR); if (err >= NO_ERROR) { if (bwr.write_consumed > 0) { if (bwr.write_consumed < (ssize_t)mOut.dataSize())//写入数据不完整,清空写入的数据 mOut.remove(0, bwr.write_consumed); else mOut.setDataSize(0);//写入完成,复位 } if (bwr.read_consumed > 0) {//读取了数据 mIn.setDataSize(bwr.read_consumed);//设置可读取数据大小 mIn.setDataPosition(0);//读取位置从0开始 } return NO_ERROR; } return err; }
status_t IPCThreadState::executeCommand(int32_t cmd) { BBinder* obj; RefBase::weakref_type* refs; status_t result = NO_ERROR; switch (cmd) { case BR_TRANSACTION: { binder_transaction_data tr; result = mIn.read(&tr, sizeof(tr)); if (result != NO_ERROR) break; .. Parcel reply; if (tr.target.ptr) { sp<BBinder> b((BBinder*)tr.cookie); //BBinder 的transact 方法 const status_t error = b->transact(tr.code, buffer, &reply, tr.flags); if (error < NO_ERROR) reply.setError(error); } else { const status_t error = the_context_object->transact(tr.code, buffer, &reply, tr.flags); if (error < NO_ERROR) reply.setError(error); } if ((tr.flags & TF_ONE_WAY) == 0) { LOG_ONEWAY("Sending reply to %d!", mCallingPid); sendReply(reply, 0); } else { LOG_ONEWAY("NOT sending reply to %d!", mCallingPid); } } break; default: printf("*** BAD COMMAND %d received from Binder driver\n", cmd); result = UNKNOWN_ERROR; break; } return result; }
//创建一个线程池 ProcessState::self()->startThreadPool(); //加入到线程池中 IPCThreadState::self()->joinThreadPool();
void ProcessState::startThreadPool() { AutoMutex _l(mLock); if (!mThreadPoolStarted) { mThreadPoolStarted = true; spawnPooledThread(true);//调用了spawnPooledThread 函数 } }
void ProcessState::spawnPooledThread(bool isMain) { if (mThreadPoolStarted) { int32_t s = android_atomic_add(1, &mThreadPoolSeq); char buf[32]; sp<Thread> t = new PoolThread(isMain); t->run(buf); } }
void IPCThreadState::joinThreadPool(bool isMain) { //isMain =true,write BC_ENTER_LOOPER mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER); androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_DEFAULT); status_t result; do { int32_t cmd; // When we've cleared the incoming command queue, process any pending derefs if (mIn.dataPosition() >= mIn.dataSize()) { size_t numPending = mPendingWeakDerefs.size(); if (numPending > 0) { for (size_t i = 0; i < numPending; i++) { RefBase::weakref_type* refs = mPendingWeakDerefs[i]; refs->decWeak(mProcess.get()); } mPendingWeakDerefs.clear(); } numPending = mPendingStrongDerefs.size(); if (numPending > 0) { for (size_t i = 0; i < numPending; i++) { BBinder* obj = mPendingStrongDerefs[i]; obj->decStrong(mProcess.get()); } mPendingStrongDerefs.clear(); } } // now get the next command to be processed, waiting if necessary //和binder进行通信 result = talkWithDriver(); if (result >= NO_ERROR) { size_t IN = mIn.dataAvail(); if (IN < sizeof(int32_t)) continue; cmd = mIn.readInt32(); //执行回去的CMD命令 result = executeCommand(cmd); } androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_DEFAULT); // Let this thread exit the thread pool if it is no longer // needed and it is not the main process thread. if(result == TIMED_OUT && !isMain) { break; } } while (result != -ECONNREFUSED && result != -EBADF); //退出时告诉Binder,BC_EXIT_LOOPER mOut.writeInt32(BC_EXIT_LOOPER); talkWithDriver(false); }
// establish binder interface to AudioFlinger service const sp<IAudioFlinger>& AudioSystem::get_audio_flinger() { Mutex::Autolock _l(gLock); if (gAudioFlinger.get() == 0) { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder; do { binder = sm->getService(String16("media.audio_flinger")); if (binder != 0) break; usleep(500000); // 0.5 s } while(true); if (gAudioFlingerClient == NULL) { gAudioFlingerClient = new AudioFlingerClient(); } else { if (gAudioErrorCallback) { gAudioErrorCallback(NO_ERROR); } } binder->linkToDeath(gAudioFlingerClient); gAudioFlinger = interface_cast<IAudioFlinger>(binder); gAudioFlinger->registerClient(gAudioFlingerClient); } return gAudioFlinger; }
virtual sp<IBinder> getService(const String16& name) const { unsigned n; for (n = 0; n < 5; n++){ //最多尝试5次,每次checkService sp<IBinder> svc = checkService(name); if (svc != NULL) return svc; LOGI("Waiting for service %s...\n", String8(name).string()); sleep(1); } return NULL; }
virtual sp<IBinder> checkService( const String16& name) const { Parcel data, reply; data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor()); data.writeString16(name);//"media.audio_flinger" remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply); return reply.readStrongBinder(); }
int svcmgr_handler(struct binder_state *bs, struct binder_txn *txn, struct binder_io *msg, struct binder_io *reply) { ..... case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); ptr = do_find_service(bs, s, len); if (!ptr) break; bio_put_ref(reply, ptr);//将找到的service信息保存在reply中 return 0; }
android::sp<IAudioFlinger>IIAudioFlinger::asInterface( const android::sp<android::IBinder>& obj) { android::sp<IAudioFlinger> intr; if (obj != NULL) { intr = static_cast<IAudioFlinger>( obj->queryLocalInterface( //BpBinder.qureyLocalInterface的返回值为null IServiceManager::descriptor).get()); if (intr == NULL) { //执行这一句,实际是new BpIAudioFlinger(new BpBinder(0)) intr = new BpIAudioFlinger(obj); } } return intr; }
virtual status_t setMasterVolume(float value) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); data.writeFloat(value); remote()->transact(SET_MASTER_VOLUME, data, &reply); return reply.readInt32(); }
sp<BBinder> b((BBinder*)tr.cookie); //BBinder 的transact 方法 const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);调用BBinder的transact方法:
status_t BBinder::transact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { data.setDataPosition(0); status_t err = NO_ERROR; switch (code) { case PING_TRANSACTION: reply->writeInt32(pingBinder()); break; default: err = onTransact(code, data, reply, flags); break; } if (reply != NULL) { reply->setDataPosition(0); } return err; }
status_t AudioFlinger::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { return BnAudioFlinger::onTransact(code, data, reply, flags); }
status_t BnAudioFlinger::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch(code) { case SET_MASTER_VOLUME: { CHECK_INTERFACE(IAudioFlinger, data, reply); reply->writeInt32( setMasterVolume(data.readFloat()) ); return NO_ERROR; } break; case XXXX break; .... } }