文章目录
ServiceManager负责binder的查询和注册,由init进程通过解析init.rc文件而创建的
- do_add_service()函数:注册服务
- do_find_service()函数:查找服务
- binder_link_to_death()函数:结束服务
- binder_send_reply()函数:将注册结果返回给Binder驱动
ServiceManger 的初始化
// frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
// 打开binder驱动,申请分配内存
bs = binder_open(driver, 128*1024);
// 注册ServiceManager服务
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)/n", strerror(errno));
return -1;
}
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
binder_loop(bs, svcmgr_handler);
return 0;
}
binder_open
这里的binder_open 不是内核binder驱动的binder_open,而是在用户进程下。
// frameworks/native/cmds/servicemanager/binder.c
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
// 打开内核驱动,会调用到驱动的binder_open,返回一个文件句柄
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
// 查询binder版本,会调用到驱动的binder_ioctl,类型是BINDER_VERSION
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
// 比较当前binder版本与预期的是否一致
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)/n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize;
// 向内核申请mapSize大小的内存,并映射到内核空间,会调用到驱动的binder_mmap
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
return bs;
流程:
- ServiceManager进程打开binder驱动,驱动调用了binder_open(),binder内核会创建一个对应的binder_proc,初始化之后加入全局的红黑树。
- ServiceManager进程查询binder版本,驱动调用了binder_ioctl(),type是BINDER_VERSION
- ServiceManager进程mmap申请内核内存,驱动调用了binder_mmap(),binder驱动只为进程分配了一个buffer,但还没将内核虚拟空间和进程虚拟空间、物理内存映射
struct binder_state {
int fd; // binder文件描述符
void *mapped; //指向mmap的内存地址
size_t mapsize; //分配的内存大小,默认为128KB
};
binder驱动的BINDER_VERSION命令
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
binder_selftest_alloc(&proc->alloc);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
thread = binder_get_thread(proc);
switch (cmd) {
…...
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
// 将BINDER_CURRENT_PROTOCOL_VERSION拷贝到用户空间
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
…...
}
return ret;
}
ioctl(bs->fd, BINDER_VERSION, &vers) 的第三个参数将用户空间binder_version结构体的地址传入到内核驱动中,内核用put_user将BINDER_CURRENT_PROTOCOL_VERSION拷贝到用户空间的arg->protocol_version下。
之后所有的ioctl也是基本也是这样的操作。
在内核空间和用户空间交换数据时,get_user和put_user是两个两用的函数。相对于copy_to_user和copy_from_user,这两个函数主要用于完成一些简单类型变量(char、int、long等)的拷贝任务,对于一些复合类型的变量,比如数据结构或者数组类型,get_user和put_user函数还是无法做到
binder_become_context_manager
// servicemanager/binder.c
int binder_become_context_manager(struct binder_state *bs)
{
// 同上面,会调用到内核的binder_ioctl
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
binder驱动的 BINDER_SET_CONTEXT_MGR命令
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
…...
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
…...
}
return ret;
}
// 内核注册ServiceManager的binder
static int binder_ioctl_set_ctx_mgr(struct file *filp, struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data; // 通过文件获取binder_proc
struct binder_context *context = proc->context; // context 是一个静态变量, 记录了ServiceManager进程信息(binder_node、uid)
struct binder_node *new_node; // ServiceManager对应的binder_node
kuid_t curr_euid = current_euid(); // 获取当前的uid
mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
// 只可以注册一次ServiceManager
ret = -EBUSY;
goto out;
}
ret = security_binder_set_context_mgr(proc->tsk);
// 验证uid
if (uid_valid(context->binder_context_mgr_uid)) {
..….
} else {
// 设置context的uid
context->binder_context_mgr_uid = curr_euid;
}
// 创建一个binder_node
new_node = binder_new_node(proc, fbo);
binder_node_lock(new_node);
//修改binder_node 强引用弱引用
new_node->local_weak_refs++;
new_node->local_strong_refs++;
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
// 设置context的ServiceManager为当前的binder_node
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
// 将binder_node加入当前进程
binder_put_node(new_node);
out:
mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
binder_loop
重点来了,serviceManager进程怎么处理来自其他进程的请求:查询binder服务、注册binder服务
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
// 向binder驱动发送BC_ENTER_LOOPER命令
binder_write(bs, readbuf, sizeof(uint32_t));
//进入死循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
break;
}
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
break;
}
if (res < 0) {
break;
}
}
}
binder_write比较简单,将BC_XXX命令封装成binder_write_read数据结构,再向ioctl驱动发送BINDER_WRITE_READ,带上binder_write_read数据结构
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)/n",
strerror(errno));
}
return res;
}
ServiceManager loop流程
- 向binder驱动发送BC_ENTER_LOOPER命令
- 进入死循环
(1) ioctl读取binder_read_write数据,主要处理BC_TRANSACTION
(2) 解析binder_read_write数据,主要处理BR_TRANSACTION
binder驱动的BINDER_WRITE_READ命令
// binder驱动
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
....
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
break;
}
….
}
binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
// 从用户空间的binder_write_read拷贝到内核空间的binder_write_read结构
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
// 如果bwr有写缓存的数据
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
}
// 如果bwr有读缓存的数据
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
}
…
// 将bwr拷贝回用户空间
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
binder_thread_write
来看下binder_thread_write怎么处理BC_ENTER_LOOPER命令
static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed)
{
uint32_t cmd;
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
switch (cmd) {
case BC_ENTER_LOOPER:
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
}
// 标记当前线程进入了loop
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
}
}
}
binder_parse
最后剩下 binder_parse
binder_parse解析BR_XXX命令,从binder驱动返回来的数据
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_REPLY:
...
case BR_DEAD_BINDER:
...
default:
ALOGE("parse: OOPS %d/n", cmd);
return -1;
}
}
return r;
}
ServiceManager 注册服务、查询服务
这里不讨论客户端怎么跟ServiceManager交互,后面文章会说明,只讨论ServiceManager提供的两个方法:do_add_service和do_find_service,对应注册binder和查询binder
注册binder服务
int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated, pid_t spid)
{
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
// 检查权限
if (!svc_can_register(s, len, spid, uid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED/n",
str8(s, len), handle, uid);
return -1;
}
// 先查询binder 是否已经存在
si = find_svc(s, len);
if (si) {
if (si->handle) {
// 已存在,更新binder节点
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE/n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
// 修改句柄,指向binder_ref
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
// 内存分配失败
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY/n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle; // binder_ref
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '/0’;
// ServiceManager注册binder的死亡通知,svcinfo_death回调释放binder节点
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
// 向binder驱动发送BC_ACQUIRE,增加Binder的引用计数
binder_acquire(bs, handle);
// 向binder驱动注册死亡通知回调
binder_link_to_death(bs, handle, &si->death);
return 0;
}
svcinfo 是每个客户端binder服务在serviceManger的存储形式
struct svcinfo
{
struct svcinfo *next;// 下一个节点
uint32_t handle; // 指向binder_ref的句柄
struct binder_death death; // binder_death
int allow_isolated; // 是否允许被孤立进程获取
size_t len;
uint16_t name[0]; // binder服务名字
};
// 保存当前注册到ServiceManager的信息
struct svcinfo *svclist = NULL;
查询binder服务也很简单,遍历svclist找出svcinfo
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return NULL;
}
ServiceManager注册binder的死亡通知,svcinfo_death回调释放binder节点
// 负责释放binder节点,清理无效句柄
void svcinfo_death(struct binder_state *bs, void *ptr)
{
struct svcinfo *si = (struct svcinfo* ) ptr;
if (si->handle) {
binder_release(bs, si->handle);
si->handle = 0;
}
}
向binder驱动ioctl发送BC_RELEASE命令,使得内核释放binder节点,这里不关注binder内核如何释放binder节点
//binder.c
void binder_release(struct binder_state *bs, uint32_t target)
{
uint32_t cmd[2];
cmd[0] = BC_RELEASE;
cmd[1] = target;
binder_write(bs, cmd, sizeof(cmd));
}
// 向binder驱动ioctl发送BC_ACQUIRE命令,增加Binder的引用计数
void binder_acquire(struct binder_state *bs, uint32_t target)
{
uint32_t cmd[2];
cmd[0] = BC_ACQUIRE;
cmd[1] = target;
binder_write(bs, cmd, sizeof(cmd));
}
// 向binder驱动ioctl发送BC_REQUEST_DEATH_NOTIFICATION命令,注册binder的死亡通知回调
void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death)
{
struct {
uint32_t cmd;
struct binder_handle_cookie payload;
} __attribute__((packed)) data;
data.cmd = BC_REQUEST_DEATH_NOTIFICATION;
data.payload.handle = target;
data.payload.cookie = (uintptr_t) death;
binder_write(bs, &data, sizeof(data));
}
从上到下都只是写了ServiceManager如何添加、修改binder_ref句柄信息,但是binder_ref哪里来的,在后面文章介绍
查询binder服务
uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
// 从svclist遍历查询已经注册的scvinfo
struct svcinfo *si = find_svc(s, len);
// svclist 会包含所有的binder信息,包括已经释放的,即si->handle为0
if (!si || !si->handle) {
return 0;
}
// 不允许被孤立进程获取binder
if (!si->allow_isolated) {
// If this service doesn't allow access from isolated processes,
// then check the uid to see if it is isolated.
uid_t appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
//检查服务是否满足于查询条件
if (!svc_can_find(s, len, spid, uid)) {
return 0;
}
return si->handle;
}
原创文章,作者:奋斗,如若转载,请注明出处:https://blog.ytso.com/6242.html