App 进程初始化binder、获取ServiceManager服务详解手机开发

数据结构

ProcessState:用户空间下表示进程状态,单个进程只有一个ProcessState对象。负责打开Binder驱动和建立线程池

IPCThreadState:用户空间下线程通过IPCthreadState跟binder驱动进行命令交互

App进程启动

Init进程启动了zygote进程、system_server进程、ServiceManager进程、lwkd服务等

zygote并没有利用binder通信,而是利用socket接收信息,在执行ZygoteInit.main()后便进入runSelectLoop()循环体内,当有客户端连接时便会执行ZygoteConnection.runOnce()方法,再经过层层调用后fork出新的应用进程;

java层Process.start()来启动一个进程,在ZygoteConnection.runOnce()中:

// ZygoteConnection.runOnce 
boolean runOnce(ZygoteServer zygoteServer) throws Zygote.MethodAndArgsCaller {
    
    pid = Zygote.forkAndSpecialize(parsedArgs.uid, parsedArgs.gid, parsedArgs.gids, 
                parsedArgs.debugFlags, rlimits, parsedArgs.mountExternal, parsedArgs.seInfo, 
                parsedArgs.niceName, fdsToClose, fdsToIgnore, parsedArgs.instructionSet, 
                parsedArgs.appDataDir); 
    ... 
        if (pid == 0) {
    // fork完成之后,子进程的pid会等于0,会进入 
            zygoteServer.closeServerSocket(); // 子进程结束掉zygote的socket监听 
            IoUtils.closeQuietly(serverPipeFd); 
            serverPipeFd = null; 
            handleChildProc(parsedArgs, descriptors, childPipeFd, newStderr); 
            return true; 
        } 
} 
 
private void handleChildProc(Arguments parsedArgs, 
        FileDescriptor[] descriptors, FileDescriptor pipeFd, PrintStream newStderr) 
        throws Zygote.MethodAndArgsCaller {
    
    ... 
    ZygoteInit.zygoteInit(parsedArgs.targetSdkVersion, 
    parsedArgs.remainingArgs, null /* classLoader */); 
} 
 
// ZygoteInit.java 
public static final void zygoteInit(int targetSdkVersion, String[] argv, 
        ClassLoader classLoader) throws Zygote.MethodAndArgsCaller {
    
    RuntimeInit.commonInit(); 
    ZygoteInit.nativeZygoteInit(); 
    RuntimeInit.applicationInit(targetSdkVersion, argv, classLoader); 
} 

ZygonteInit

//frameworks/base/core/jni/AndroidRuntime.cpp 
static void com_android_internal_os_ZygoteInit_nativeZygoteInit(JNIEnv* env, jobject clazz) 
{
    
    gCurRuntime->onZygoteInit(); 
} 
 
// frameworks/base/cmds/app_process/App_main.cpp  
virtual void onZygoteInit() 
{
    
    sp<ProcessState> proc = ProcessState::self(); 
    proc->startThreadPool(); 
} 

进程只有一个ProcessState实例,且只有在ProcessState对象建立时才打开Binder设备以及做内存映射

// frameworks/native/libs/binder/ProcessState.cpp 
// 单例模式 
sp<ProcessState> ProcessState::self() 
{
    
    Mutex::Autolock _l(gProcessMutex); 
    if (gProcess != NULL) {
    
        return gProcess; 
    } 
    gProcess = new ProcessState("/dev/binder"); 
    return gProcess; 
} 
 
ProcessState::ProcessState(const char *driver) 
    : mDriverName(String8(driver)) 
    , mDriverFD(open_driver(driver)) 
    , mVMStart(MAP_FAILED) 
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER) 
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER) 
    , mExecutingThreadsCount(0) 
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS) 
    , mStarvationStartTimeMs(0) 
    , mManagesContexts(false) 
    , mBinderContextCheckFunc(NULL) 
    , mBinderContextUserData(NULL) 
    , mThreadPoolStarted(false) 
    , mThreadPoolSeq(1) 
{
    
    if (mDriverFD >= 0) {
    
        // mmap the binder, providing a chunk of virtual address space to receive transactions. 
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); 
        if (mVMStart == MAP_FAILED) {
    
            close(mDriverFD); 
            mDriverFD = -1; 
            mDriverName.clear(); 
        } 
    } 
} 

open_driver、mmap() 已经分析过了,可以参考 https://blog.csdn.net/qq_15893929/article/details/103965875

startThreadPool创建一个PoolThread表示当前线程并注册为binder线程

void ProcessState::startThreadPool() 
{
    
    AutoMutex _l(mLock); 
    if (!mThreadPoolStarted) {
    
        mThreadPoolStarted = true; 
        spawnPooledThread(true); 
    } 
} 
 
// 创建一个新的子线程 
void ProcessState::spawnPooledThread(bool isMain) 
{
    
    if (mThreadPoolStarted) {
    
        String8 name = makeBinderThreadName(); 
        sp<Thread> t = new PoolThread(isMain); 
        t->run(name.string()); 
    } 
} 

PoolThread表示binder线程

class PoolThread : public Thread 
{
    
public: 
    explicit PoolThread(bool isMain) 
        : mIsMain(isMain) 
    {
    
    } 
     
protected: 
    virtual bool threadLoop() 
    {
    
        IPCThreadState::self()->joinThreadPool(mIsMain); 
        return false; 
    } 
     
    const bool mIsMain; 
}; 

IPCThreadState跟binder驱动交互

void IPCThreadState::joinThreadPool(bool isMain) 
{
    
    // 如果是主线程,则命令是BC_ENTER_LOOPER,如果是子线程则是BC_REGISTER_LOOPER 
    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER); 
 
    status_t result; 
    do {
    
        processPendingDerefs(); // 清除队列的引用 
        // now get the next command to be processed, waiting if necessary 
        result = getAndExecuteCommand(); // 阻塞获取下个命令并执行 
 
        if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
    
            ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting", 
                  mProcess->mDriverFD, result); 
            abort(); 
        } 
 
        // Let this thread exit the thread pool if it is no longer 
        // needed and it is not the main process thread. 
        if(result == TIMED_OUT && !isMain) {
    
            break; 
        } 
    } while (result != -ECONNREFUSED && result != -EBADF); 
 
    mOut.writeInt32(BC_EXIT_LOOPER); 
    talkWithDriver(false); 
} 
 
status_t IPCThreadState::getAndExecuteCommand() 
{
    
    status_t result; 
    int32_t cmd; 
    result = talkWithDriver(); 
    if (result >= NO_ERROR) {
    
        ... 
        pthread_mutex_unlock(&mProcess->mThreadCountLock); 
        result = executeCommand(cmd); 
        pthread_mutex_lock(&mProcess->mThreadCountLock); 
        ... 
    } 
    return result; 
} 
 
// 跟binder驱动ioctl,修改bwr,修改 mOut、mIn 
status_t IPCThreadState::talkWithDriver(bool doReceive) 
{
    
    binder_write_read bwr; 
    … 
 
    bwr.write_consumed = 0; 
    bwr.read_consumed = 0; 
    status_t err; 
    do {
    
       if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) 
            err = NO_ERROR; 
        else 
            err = -errno; 
    } while (err == -EINTR); 
 
    if (err >= NO_ERROR) {
    
        if (bwr.write_consumed > 0) {
    
            if (bwr.write_consumed < mOut.dataSize()) 
                mOut.remove(0, bwr.write_consumed); 
            else 
                mOut.setDataSize(0); 
        } 
        if (bwr.read_consumed > 0) {
    
            mIn.setDataSize(bwr.read_consumed); 
            mIn.setDataPosition(0); 
        } 
        return NO_ERROR; 
    } 
 
    return err; 
} 
 
status_t IPCThreadState::executeCommand(int32_t cmd) 
{
    
    BBinder* obj; 
    RefBase::weakref_type* refs; 
    status_t result = NO_ERROR; 
    switch ((uint32_t)cmd) {
    
    case BR_ACQUIRE: 
        ... 
        break; 
    case BR_TRANSACTION: 
        ... 
        break; 
    } 
    return result; 
} 

如何添加新的binder线程,因为会有同时多个任务需要执行

IPCThreadState线程中,进行ioctl(BINDER_WRITE_READ), 在binder内核中会进入binder_ioctl_write_read,如果bwr有读缓存的数据,

两个进程之间相互传递的数据格式是binder_transaction_data

进程跟驱动进程相互传递的数据格式是binder_write_read

那可能需要更多的binder线程来处理binder_transaction_data数据

static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block) 
{
     
void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 
void __user *ptr = buffer + *consumed; 
void __user *end = buffer + size;  
int ret = 0;  
int wait_for_proc_work; 
 
retry: 
    binder_inner_proc_lock(proc); 
    // 获取当前线程是否空闲等待,即线程事务栈和todo队列都为空 
    wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); 
    binder_inner_proc_unlock(proc); 
   // 线程即将进入睡眠等待,设置线程状态改为BINDER_LOOPER_STATE_WAITING*/ 
    thread->looper |= BINDER_LOOPER_STATE_WAITING; 
    if (non_block) {
    
        if (!binder_has_work(thread, wait_for_proc_work)) 
            ret = -EAGAIN; 
    } else {
    
        // wait_for_proc_work  线程没有事务项,需要进入阻塞等待 
        ret = binder_wait_for_work(thread, wait_for_proc_work); 
    } 
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 
    if (ret) 
       return ret; 
 
     while (1) {
    
            uint32_t cmd; 
            struct binder_transaction_data_secctx tr;      
            struct binder_transaction_data *trd = &tr.transaction_data;  
            struct binder_work *w = NULL;  
            switch (w->type) {
    
                case BINDER_WORK_TRANSACTION: {
    
                   ... 
                } break;  
                case BINDER_WORK_RETURN_ERROR: {
    
                  ... 
                } break;} 
done: 
*consumed = ptr - buffer;  
binder_inner_proc_lock(proc); 
 
// 当满足条件时候,会创建一个binder_thread,同时告诉给用户进程空间BR_SPAWN_LOOPER去创建多一个线程 
if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && 
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 
/*spawn a new thread if we leave this out */) {
     
    proc->requested_threads++; 
    binder_inner_proc_unlock(proc); 
// 返回给用户空间BR_SPAWN_LOOPER 
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))  
    return -EFAULT; 
    binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 
} else 
    binder_inner_proc_unlock(proc); 
return 0; 
} 

继续看用户进程IPCThreadState如何处理BR_SPAWN_LOOPER

status_t IPCThreadState::executeCommand(int32_t cmd) 
{
    
    switch ((uint32_t)cmd) {
    
        mProcess->spawnPooledThread(false); 
        break; 
    } 
    return result; 
} 

获取ServiceManager服务

在此之前,先介绍几个数据结构

IBinder

class IBinder : public virtual RefBase 
{
    
public: 
    virtual sp<IInterface>  queryLocalInterface(const String16& descriptor); 
    // 查询服务返回一个IInterface 
    virtual status_t pingBinder() = 0; 
    virtual status_t transact(uint32_t code,  const Parcel& data, Parcel* reply, uint32_t flags = 0) = 0; 
   // 发送IPC请求 
    virtual status_t  linkToDeath(const sp<DeathRecipient>& recipient, void* cookie = NULL,  
    uint32_t flags = 0) = 0; 
    // 死亡通知回调 
    virtual BBinder* localBinder(); 
    // 同一个进程下,返回一个BBinder,表示 binder实体 
    virtual BpBinder* remoteBinder(); 
    // 不同进程下,返回一个BpBinder,表示binder引用代理 
} 

BBinder

在用户进程空间下,表示binder实体

class BBinder : public IBinder  
{
    
protected: 
    virtual status_t  onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0); 
    // 接收IPC请求 
private: 
    std::atomic<Extras*> mExtras;  
    void* mReserved0; 
} 

BPBinder

在用户进程空间下,表示binder代理

// 在用户进程下,保存着binder_ref handle句柄 
class BpBinder : public IBinder 
{
    
public: 
    inline  int32_t  handle() const {
    return mHandle; } 
    virtual status_t    transact(   uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0); 
    // 发送IPC请求 
        KeyedVector<const void*, entry_t> mObjects; 
    }; 
private: 
    const   int32_t             mHandle; 
    mutable Mutex               mLock; 
    volatile int32_t    mAlive; 
    volatile int32_t    mObitsSent; 
    Vector<Obituary>*   mObituaries; 
    ObjectManager       mObjects; 
    Parcel*             mConstantData; 
    mutable String16            mDescriptorCache; 
}; 

所有的进程获取ServiceManger都是通过IServiceMnager.cpp的静态方法获取

App进程获取ServiceManager,就是获取ServiceManager的代理,在binder驱动角度看,就是App对应的binder_proc获取对应的binder_ref,在App用户进程上看,App获取对应ServiceManager的BpBinder。

// framework/native/libs/binder/IServiceMnager.cpp 
sp<IServiceManager> defaultServiceManager() 
{
    
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager; 
    {
    
        AutoMutex _l(gDefaultServiceManagerLock); 
        while (gDefaultServiceManager == NULL) {
    
            gDefaultServiceManager = interface_cast<IServiceManager>( 
                ProcessState::self()->getContextObject(NULL)); 
            if (gDefaultServiceManager == NULL) 
                sleep(1); 
        } 
    } 
    return gDefaultServiceManager; 
} 

从 ProcessState::self()->getContextObject(NULL) 获取ServiceManager,若为空,则说明ServiceManager进程还没起来,进入sleep,再次循环获取,直至非空。

ProcessState 是进程单例,表示当前进程状态

// frameworks/native/libs/binder/ProcessState.cppkkj 
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/) 
{
    
    // 获取句柄为0,即ServiceManager的BpBinder 
    return getStrongProxyForHandle(0); 
} 
 
struct handle_entry {
    
      IBinder* binder; // 指向BBinder或者BpBinder 
      RefBase::weakref_type* refs; 
}; 
 
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle) 
{
    
    sp<IBinder> result; 
    AutoMutex _l(mLock); 
    // 查找对应的BBinder或者BpBinder,在这里是取BpBinder 
    handle_entry* e = lookupHandleLocked(handle); 
    if (e != NULL) {
    
        IBinder* b = e->binder; 
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
    
            if (handle == 0) {
    // handle为0,即ServiceManager 
                Parcel data; 
                // 底层是binder_ioctl,查看对应的handle对应的binder是否已死 
                status_t status = IPCThreadState::self()->transact(0, IBinder::PING_TRANSACTION, data, NULL, 0); 
                if (status == DEAD_OBJECT) 
                   return NULL; 
            } 
            // 创建BpBinder 
            b = new BpBinder(handle);  
            e->binder = b; 
            if (b) e->refs = b->getWeakRefs(); 
            result = b; 
        } else {
    
            result.force_set(b); 
            e->refs->decWeak(this); 
        } 
    } 
    return result; 
} 

更多参考:http://gityuan.com/2016/10/29/binder-thread-pool/

利用 ProcessState::self()->getContextObject(NULL)) App进程获取到了ServiceManager的BpBinder,其中BpBinder的handle是0

gDefaultServiceManager = interface_cast(ProcessState::self()->getContextObject(NULL)); 转型为IServiceManager,接着App进程需要向ServiceManager查询所需要的服务,其实也是binder的跨进程通信。

原创文章,作者:奋斗,如若转载,请注明出处:https://blog.ytso.com/tech/app/6240.html

(0)
上一篇 2021年7月17日 00:44
下一篇 2021年7月17日 00:44

相关推荐

发表回复

登录后才能评论