文章目录
以下源码均来自
https://github.com/aosp-mirror/kernel_common.git
Binder驱动四个基本操作
在进程启动的时候跟binder驱动交互有四个操作:
init -> open -> mmap -> ioctl
-
init: 初始化驱动
-
open: 打开驱动
-
mmap: 在内核分配binder_buffer
-
ioctl: 进程跟驱动的操作交互
初始化字符设备
static int __init binder_init(void)
{
int ret;
char *device_name, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
char *device_names = NULL;
// 初始化 binder缓冲区
ret = binder_alloc_shrinker_init();
// 创建binder目录 /binder/proc
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
if (binder_debugfs_dir_entry_root) {
// 创建binder/proc/state文件
debugfs_create_file("state",
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
// 创建binder/proc/stats文件
debugfs_create_file("stats",
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
// 创建binder/proc/transactions文件
debugfs_create_file("transactions",
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
// 创建binder/proc/transaction_log文件
debugfs_create_file("transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
// 创建binder/proc/failed_transaction_log文件
debugfs_create_file("failed_transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
strcmp(binder_devices_param, "") != 0) {
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
// 创建binder设备
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
}
}
ret = init_binderfs();
return ret;
}
打开binder驱动设备
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
struct binderfs_info *info;
struct dentry *binder_binderfs_dir_entry_proc = NULL;
// 分配binder_proc结构体
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
// 初始化proc->todo队列
INIT_LIST_HEAD(&proc->todo);
// 设置进程优先级
proc->default_priority = task_nice(current);
…
// 初始化context,它记录了ServiceManager信息,是全局静态变量
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
INIT_LIST_HEAD(&proc->waiting_threads);
// 将proc保存到文件结构中,供下次调用使用
filp->private_data = proc;
mutex_lock(&binder_procs_lock);
// 将proc添加到全局的binder_procs
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
// 创建文件 /sys/kernel/debug/binde/proc/pid
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&proc_fops);
}
...
return 0;
}
进程调用了binder_open打开驱动设备,binder内核会创建一个对应的binder_proc,初始化之后加入全局的红黑树binder_procs
mmap 分配 binder_buffer
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
// 从flip文件取出binder_proc
struct binder_proc *proc = filp->private_data;
const char *failure_string;
// 映射分配内存 proc->alloc
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
return 0;
}
vm_struct描述的是内核虚拟地址, vm_area_struct 描述的是用户虚拟地址
// binder_alloc.c
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
const char *failure_string;
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
// 内存大小,默认4m
alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
// 设置alloc->buffer为用户虚拟地址vma首地址,vma->vm_start为用户空间起始地址
alloc->buffer = (void __user *)vma->vm_start;
// 分配page页数组物理空间,实际上page页还没申请物理内存
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL);
// 分配创建一个binder_buffer
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
// 将binder_buffer的用户缓存地址指向alloc->buffer,也就是用户虚拟地址vma首地址
buffer->user_data = alloc->buffer;
// 将buffer加入到alloc当前进程的buffers列表中
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
// 从binder_proc的结构可得,每个binder进程会管理一个空闲内核缓冲区红黑树,当binder_mmap映射内存到进程空间时,将会把一个新创建的空闲buffer加入到红黑树中
binder_insert_free_buffer(alloc, buffer);
// 异步事务的内核缓冲区大小设置为总缓冲区大小的一半
alloc->free_async_space = alloc->buffer_size / 2;
// 设置用户空间地址
binder_alloc_set_vma(alloc, vma);
mmgrab(alloc->vma_vm_mm);
return 0;
}
kmalloc、kzalloc、vmalloc
- kmalloc: 在内核空间申请内存,不做清零初始化操作,申请获得物理内存。
- kzalloc: 在kmalloc基础上增加初始化清零操作
- vmalloc: 在内核空间申请内存,它申请的内存是位于vmalloc_start到vmalloc_end之间的虚拟内存,获取的是虚拟内存地址。
binder_mmap未做用户空间和内核空间的映射
开始调用binder_mmap()的时候,binder驱动只为进分配了一个buffer,所以这里的alloc->buffer与buffer->data地址相同,但是还没有将用户虚拟地址与内核的虚拟地址映射在物理页面中,真正的申请物理页和映射动作是在binder_transaction才会开始
参考:
https://segmentfault.com/a/1190000014643994?utm_source=channel-hottest#item-3-5
binder_update_page_range
binder_update_page_range: 管理物理内存的分配和释放alloc、映射用户空间和内核空间
// allocate 为1表示申请分配内存,为0表示释放内存
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void __user *start, void __user *end) {
void __user *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
if (end <= start)
return 0;
trace_binder_update_page_range(alloc, allocate, start, end);
if (allocate == 0) // 释放内存
goto free_range;
// 检查物理页面的地址,若为空,则进行映射
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
if (!page->page_ptr) {
need_mm = true;
break;
}
}
if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
down_read(&mm->mmap_sem);
vma = alloc->vma;
}
// 遍历所有page
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
bool on_lru;
size_t index;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
if (page->page_ptr) {
// 已经有分配内存和映射
trace_binder_alloc_lru_start(alloc, index);
on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
continue;
}
trace_binder_alloc_page_start(alloc, index);
//分配一个page的物理内存,直接赋值给内核虚拟进程空间page->page_ptr
page->page_ptr = alloc_page(GFP_KERNEL |__GFP_HIGHMEM | __GFP_ZERO);
page->alloc = alloc;
INIT_LIST_HEAD(&page->lru);
user_page_addr = (uintptr_t)page_addr;
// 物理空间映射到用户虚拟进程空间
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (index + 1 > alloc->pages_high)
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
up_read(&mm->mmap_sem);
mmput(mm);
}
return 0;
free_range: // 释放内存
for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
bool ret;
size_t index;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
trace_binder_free_lru_start(alloc, index);
ret = list_lru_add(&binder_alloc_lru, &page->lru);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
if (page_addr == start)
break;
continue;
}
进程跟驱动的操作交互
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
// 从文件取出进程proc
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
binder_selftest_alloc(&proc->alloc);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
// 获取当前访问驱动的线程
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
// 读写数据,数据都封装在binder_write_read数据结构中
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
break;
case BINDER_SET_MAX_THREADS: {
// 设置最大线程数
int max_threads;
// 从用户空间读取最大线程数
if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
binder_inner_proc_lock(proc);
proc->max_threads = max_threads;
binder_inner_proc_unlock(proc);
break;
}
case BINDER_SET_CONTEXT_MGR_EXT: {
// 注册Service Manager服务
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
break;
}
case BINDER_SET_CONTEXT_MGR:
// 注册Service Manager服务
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
break;
case BINDER_THREAD_EXIT:
// 退出释放当前线程
binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
// 返回版本信息
struct binder_version __user *ver = ubuf;
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
case BINDER_GET_NODE_INFO_FOR_REF: {
// 获取binder_node信息
struct binder_node_info_for_ref info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_GET_NODE_DEBUG_INFO: {
// 获取binder_node debug信息
struct binder_node_debug_info info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_debug_info(proc, &info);
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
ret = 0;
return ret;
}
ioctl 基本流程
- 获取binder_proc
- binder_thread
- copyFromUser()
- 处理数据
- copyToUser()
ioctl 命令
-
BINDER_WRITE_READ
向驱动读取和写入数据.可同时读和写,数据结构是binder_write_read -
BINDER_SET_MAX_THREADS
设置线程池的最大的线程数,达到上限后驱动将不会在通知应用层启动新线程 -
BINDER_SET_CONTEXT_MGR
将本进程设置为binder系统的管理进程,只有servicemanager进程才可以注册 -
BINDER_THREAD_EXIT
通知驱动当前线程要退出了,以便驱动清理该线程相关的数据 -
BINDER_VERSION
获取binder的版本号 -
BC_INCREFS 、BC_ACQUIRE 、BC_RELEASE 、BC_DECREFS
增加或减少Binder的引用计数,用以实现强指针或弱指针的功能。
binder_write_read命令
binder_ioctl_write_read是最重要的一步,跟用户进程如何交互数据
binder_write_read 是驱动内核处理binder数据的数据结构,内部分成读缓存和写缓存
static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
// 从用户空间的binder_write_read拷贝到内核空间的binder_write_read结构
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
// 如果bwr有写缓存的数据
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
}
// 如果bwr有读缓存的数据
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
}
…
// 将bwr拷贝回用户空间
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
binder_thread_write
binder_thread_write表示处理写缓存数据,表示的是binder的发起端,发起了一次请求
proc表示目标进程,thread表示目标线程
static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
ptr += sizeof(uint32_t);
switch (cmd) {
...
case BC_TRANSACTION:
case BC_REPLY: {
…
}
}
return 0;
}
binder_thread_read
binder_thread_read表示处理读缓存数据,表示的是binder的接收端,接收到了一次请求,可能需要写回去返回值
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
retry:
binder_inner_proc_lock(proc);
// 获取当前线程是否空闲等待,即线程事务栈和todo队列都为空
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
binder_inner_proc_unlock(proc);
// 线程即将进入睡眠等待,设置线程状态改为BINDER_LOOPER_STATE_WAITING*/
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (non_block) {
if (!binder_has_work(thread, wait_for_proc_work))
ret = -EAGAIN;
} else {
// wait_for_proc_work 线程没有事务项,需要进入阻塞等待
ret = binder_wait_for_work(thread, wait_for_proc_work);
}
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data_secctx tr;
struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
...
} break;
case BINDER_WORK_RETURN_ERROR: {
...
} break;
…
}
done:
*consumed = ptr - buffer;
// 当满足条件时候,会创建一个binder_thread,同时告诉给用户进程空间 BR_SPAWN_LOOPER 去创建多一个线程
if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)
proc->requested_threads++;
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
}
}
return 0;
}
binder_wait_for_work
内核空间等待事务和唤醒,用户空间下的线程也会阻塞在ioctl中
// do_proc_work 表示是否阻塞等待
static int binder_wait_for_work(struct binder_thread *thread, bool do_proc_work)
{
DEFINE_WAIT(wait);
struct binder_proc *proc = thread->proc;
int ret = 0;
freezer_do_not_count();
binder_inner_proc_lock(proc);
for (;;) {
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
if (binder_has_work_ilocked(thread, do_proc_work))
break;
if (do_proc_work)
list_add(&thread->waiting_thread_node, &proc->waiting_threads);
binder_inner_proc_unlock(proc);
schedule();
binder_inner_proc_lock(proc);
list_del_init(&thread->waiting_thread_node);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&thread->wait, &wait);
binder_inner_proc_unlock(proc);
freezer_count();
return ret;
}
Linux 手工休眠线程方式:
- 建立并初始化一个等待队列项
DEFINE_WAIT(my_wait) <==> wait_queue_t my_wait; init_wait(&my_wait); - 将等待队列项添加到等待队列头中,并设置进程的状态
prepare_to_wait(wait_queue_head_t *queue, wait_queue_t *wait, int state) - 调用schedule(),告诉内核调度别的进程运行
- schedule返回,完成后续清理工作 finish_wait()
原创文章,作者:奋斗,如若转载,请注明出处:https://blog.ytso.com/tech/app/6243.html