Binder机制 - addService注册服务(2)处理请求

server端

首先放上流程图:

从这里开始,我们就转到对端SM了。

1 ServiceManager 端被唤醒(kernel)

注册server发起请求处理的讲解中,最终调用wake_up_interruptible(target_wait)唤醒目标进程ServiceManager,我们从ServiceManager在binder_thread_read的中断等待处讲起。

binder_thread_read

C 复制代码
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    ... // 从这里唤醒唤醒继续向下执行

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        // 1获取待处理事务
        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        } else {
            /* no data added */
            if (ptr - buffer == 4 &&
                !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4)
            break;

        switch (w->type) {
            case BINDER_WORK_TRANSACTION: {
                // 1把获取到的binder_work 转换成 binder_transaction
                t = container_of(w, struct binder_transaction, work);
            } break;
            ...
        }

        if (!t)
            continue;

        if (t->buffer->target_node) {
            // 事务中buffer持有的target_node指的是发起事务端的对端,当前情况即自己的binder实体
            struct binder_node *target_node = t->buffer->target_node;
            // 3把事务中的内容赋值给 binder_transaction_data 结构,
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            // 2设置线程优先级
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            // 命令 BR_TRANSACTION
            cmd = BR_TRANSACTION;
        } else {
            ...
        }

        // 3进一步赋值 binder_transaction_data结构体
        tr.code = t->code;    // ADD_SERVICE_TRANSACTION
        tr.flags = t->flags;  // TF_ACCEPT_FDS
        // uid
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
        // pid
        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        // 3数据缓冲区和对象偏移地址数组等相关数据
        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)(
                    (uintptr_t)t->buffer->data +
                    proc->user_buffer_offset);
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        // 4 把命令 BR_TRANSACTION 设置给用户空间
        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        // 打包数据给用户空间
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);

        binder_stat_br(proc, thread, cmd);
        ...
        // 将事务从todo队列中删除
        list_del(&t->work.entry);
        // 5 该标记表示内核缓冲区允许目标线程在用户空间发出 BC_FREE_BUFFER 释放
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            // 6 将t压入SM的线程的事务堆栈
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            // 如果是异步,释放t 占用的内核空间
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        ...
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
    }
    return 0;
}
  1. 从SM的binder线程的todo列表中获取待处理事务 binder_work w,转换成 binder_transaction t
  2. 设置目标线程优先级,使目标线程优先级高于发起线程,注意这里会保存原来的优先级,以便处理完后恢复。
  3. 把事务t中的内容赋值到 binder_transaction_data 结构 tr,内容包括:
    • ADD_SERVICE_TRANSACTION 命令
    • TF_ACCEPT_FDS 标记
    • 用户id、线程组pid:为了目标线程(sm)处理请求时可以识别发起者身份,做安全、合法性检查
    • 修改 tr 的数据缓冲区和偏移数组的地址值,使其指向 t 的数据缓冲区和偏移数组,由于是给用户空间,需要 +用户和内核偏移 来得到其对应的用户空间地址。
  4. BR_TRANSACTION 命令写给用户空间
  5. 标记 allow_user_free = 1,表示内核缓冲区允许目标线程在用户空间发出 BC_FREE_BUFFER 做释放
  6. 将 t 压入目标(sm)线程的事务堆栈

这里执行完毕后,内核代码执行完毕要进入用户空间代码了。接下来就回到前文SM启动流程中的循环等待事务的代码环节了:

2 回到ServiceManager(user)

binder_loop

C++ 复制代码
void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        // >>>当前在这里...
        ...

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        ...
    }
}

ioctl执行完毕,接下来要执行binder_parse

2.1 binder_parse 读内核传递的数据

binder_parse

C++ 复制代码
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        // 获取binder命令 BR_TRANSACTION
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);

        switch(cmd) {
            ...
            case BR_TRANSACTION: {
                // 获取数据 binder_transaction_data 
                struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
                ...
                if (func) {
                    unsigned rdata[256/4];
                    struct binder_io msg;
                    struct binder_io reply;
                    int res;
                    // 初始化一个 binder_io reply 用来存反馈数据
                    bio_init(&reply, rdata, sizeof(rdata), 4);
                    // 使用txn填充msg,即使用msg接收了txn的数据。
                    bio_init_from_txn(&msg, txn);
                    // svcmgr_handler
                    res = func(bs, txn, &msg, &reply);
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
                ptr += sizeof(*txn);
                break;
            }
            ...
    }

    return r;
}

读取到的binder命令是BR_TRANSACTION,对于该命令的处理,是获取传输进来的数据,并初始化一个用以反馈内容的binder_io reply 和一个接收传输进来的数据的binder_io msg

下面是binder_io结构体和用于初始化他们的函数:

c 复制代码
struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */

    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

void bio_init(struct binder_io *bio, void *data,
              size_t maxdata, size_t maxoffs)
{
    size_t n = maxoffs * sizeof(size_t);

    ...
    bio->data = bio->data0 = (char *) data + n;
    bio->offs = bio->offs0 = data;
    bio->data_avail = maxdata - n;
    bio->offs_avail = maxoffs;
    bio->flags = 0;
}

void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
    bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
    bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
    bio->data_avail = txn->data_size;
    bio->offs_avail = txn->offsets_size / sizeof(size_t);
    bio->flags = BIO_F_SHARED;
}

在bio_init_from_txn从传入数据结构封装binder_io时,有一个BIO_F_SHARED标记:

c 复制代码
#define BIO_F_SHARED    0x01  /* needs to be buffer freed */
#define BIO_F_OVERFLOW  0x02  /* ran out of space */
#define BIO_F_IOERROR   0x04
#define BIO_F_MALLOCED  0x08  /* needs to be free()'d */

BIO_F_SHARED:表示结构体binder_io 内部的数据缓冲区是一块在内核空间分配的内核缓冲区,并且可通过用户空间地址来共享访问。当进程使用完后,必须使用 BC_FREE_BUFFER 命令协议通知Binder驱动释放相应内核缓冲区。

BIO_F_MALLOCED:表示结构体binder_io 内部的数据缓冲区是通过函数malloc分配的,即它指向的是在自己用户空间分配的缓冲区。使用完成后,直接free释放即可

BIO_F_OVERFLOW 和 BIO_F_IOERROR:两个错误代码,BIO_F_OVERFLOW表示数据溢出,即从结构体binder_io 读出的数据大小超出其内部缓冲区大小;BIO_F_IOERROR表示从binder_io读数据发生IO错误。

继续往下分析。从前面SM启动的文章讲解中可知,func就是svcmgr_handler。

2.1.1 SVC_MGR_ADD_SERVICE

c 复制代码
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    ...
    // 1 取出STRICT_MODE_PENALTY_GATHER
    strict_policy = bio_get_uint32(msg);
    // 2 取出"android.os.IServiceManager"
    s = bio_get_string16(msg, &len);
    ...
    // 检查合法性,是否是 "android.os.IServiceManager"
    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }


    switch(txn->code) {
        ...
        case SVC_MGR_ADD_SERVICE:
            // 3 取出"media.player"
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            // 4 取出MediaPlayerService的binder引用描述
            handle = bio_get_ref(msg);
            // 5 取出isolated 1
            allow_isolated = bio_get_uint32(msg) ? 1 : 0;
            // 6
            if (do_add_service(bs, s, len, handle, txn->sender_euid,
                allow_isolated, txn->sender_pid))
                return -1;
            break;

        ...
    }

    bio_put_uint32(reply, 0);
    return 0;
}

可以看出svcmgr_handler函数实际是取出所有的buffer内容做处理,包括:

1.取出STRICT_MODE_PENALTY_GATHER

2.取出 "android.os.IServiceManager" 检查请求的合法性:

  • 这里的字符串长度检查 len != (sizeof(svcmgr_id) / 2,svcmgr_id是一个uint16_t数组,因此需要长度/2再做比较;
  • svcmgr_id 的内容就是 android.os.IServiceManager

3.取出需要注册的服务名 "media.player"

4.取出驱动为即将注册的Service组件创建的binder引用描述

5.取出allowIsolated

6.接着执行do_add_service做实际注册服务动作

do_add_service

C 复制代码
int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)
{
    struct svcinfo *si;

    if (!handle || (len == 0) || (len > 127))
        return -1;

    // 1检查合法性,是否可以注册
    if (!svc_can_register(s, len, spid, uid)) {
        ...
    }

    // 2根据"media.player"尝试获取service,从而判断是否已经注册过
    si = find_svc(s, len);
    if (si) {
        if (si->handle) {
            ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s, len), handle, uid);
            svcinfo_death(bs, si);
        }
        si->handle = handle;
    } else { // 3未注册过
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        ...
        si->handle = handle;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;
        svclist = si;
    }

    // 4 BC_ACQUIRE
    binder_acquire(bs, handle);
    // 5注册接收binder本地对象死亡通知,以便SM可以在该service死亡时采取相应处理。
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

入参:s:待注册的Service名 "media.player",uid:发起请求进程的用户id

1合法性检查

2根据service名尝试获取,从而判断是否已经注册过了

C 复制代码
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;

    for (si = svclist; si; si = si->next) {
        if ((len == si->len) &&
            !memcmp(s16, si->name, len * sizeof(uint16_t))) {
            return si;
        }
    }
    return NULL;
}
  • 如果已经注册过了,就把句柄赋值给之前已经注册过的保存的svcinfo结构体
  • 如果未注册过,就构建svcinfo结构体,赋值,并添加到保存svcinfo的全局列表svclist中。

3未注册过,把相关数据(尤其是服务名和handle)封装到svcinfo结构体,存入svclist列表。

4binder_acquire发起请求服务的命令

C 复制代码
void binder_acquire(struct binder_state *bs, uint32_t target)
{
    uint32_t cmd[2];
    cmd[0] = BC_ACQUIRE;
    cmd[1] = target;
    binder_write(bs, cmd, sizeof(cmd));
}
  • 向驱动发起 BC_ACQUIRE 指令,实际是在驱动增加了相应binder引用对象的引用计数。

5注册接收binder本地对象死亡通知,以便SM可以在该service死亡时采取相应处理。

SVC_MGR_ADD_SERVICE的主要任务是把表示服务的结构体svcinfo对象存入svclist容器,结构体主要数据是:服务名和handle值。

至此,注册到SM的动作完成。回到svcmgr_handler继续往下执行bio_put_uint32(reply, 0),开始组织反馈数据reply。svcmgr_handler 执行完成,回到binder_parse继续往下执行到binder_send_reply。

2.1.2 binder_send_reply 反馈

binder_send_reply

C 复制代码
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY;
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) { // status错误码,非0零表示前面的SM处理发生错误就,会进入这里
        ...
    } else { // 数据都封装到 data 结构体
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

入参:2 reply 进程间通信的结果数据,用以反馈给发起者;3 buffer_to_free 一个用户空间地址,指向一块用来传输进程间通信数据的内核缓冲区;4 status 用来描述SM是否成功处理了请求。

1 封装所有数据到data。

  • 函数中定义了一个匿名结构体 data,用来描述 BC_FREE_BUFFER和 BC_REPLY两个命令,分别用cmd_free 和 cmd_reply表示,以及两个命令所对应的数据。

2 调用binder_write发起写驱动。

向驱动发起通信(kernel)

调用 binder_write 函数:

C 复制代码
int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

封装数据到 binder_write_read结构体,调用 ioctl。

ioctl --> binder_ioctl --> binder_ioctl_write_read --> binder_thread_write 这个过程前面的文章已经讲过,我们这里直接分析 binder_thread_write 对两个命令BC_FREE_BUFFER和 BC_REPLY处理:

BC_FREE_BUFFER
C 复制代码
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    struct binder_context *context = proc->context;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error == BR_OK) {
        // 取到命令 BC_FREE_BUFFER
        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        ...
        switch (cmd) {
            ...

            case BC_FREE_BUFFER: {
                binder_uintptr_t data_ptr;
                struct binder_buffer *buffer;
                // 取到数据,是要被释放的内核缓冲区的用户空间地址
                if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(binder_uintptr_t);

                // 找到用户空间地址对应的内核缓冲区 buffer
                buffer = binder_buffer_lookup(proc, data_ptr);
                ...
                // 置空
                if (buffer->transaction) {
                    buffer->transaction->buffer = NULL;
                    buffer->transaction = NULL;
                }
                if (buffer->async_transaction && buffer->target_node) { //异步的处理
                    if (list_empty(&buffer->target_node->async_todo))
                        buffer->target_node->has_async_transaction = 0;
                    else
                        list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
                }
                // 减少内核缓冲区buffer内的binder实体或binder引用的引用计数
                binder_transaction_buffer_release(proc, buffer, NULL);
                // 释放内核缓冲区
                binder_free_buf(proc, buffer);
                break;

            }
            ...
        }
        *consumed = ptr - buffer;
    }
    return 0;
}

处理BC_FREE_BUFFER指令,获取需要释放的内核缓冲区的用户空间地址,通过地址找到对应内核缓冲区buffer,减少内核缓冲区内的binder实体或binder引用的引用计数,释放缓冲区。

binder_transaction_buffer_release 减少缓冲区内的binder对象的引用计数

C 复制代码
static void binder_transaction_buffer_release(struct binder_proc *proc,
                          struct binder_buffer *buffer,
                          binder_size_t *failed_at)
{
    binder_size_t *offp, *off_start, *off_end;
    int debug_id = buffer->debug_id;

    // 检查内核缓冲区buffer是不是分配给binder实体用的,即判断其成员target_node是否空
    if (buffer->target_node)
        // 是就减少引用计数
        binder_dec_node(buffer->target_node, 1, 0);
    // 寻找起始位置
    off_start = (binder_size_t *)(buffer->data +
                      ALIGN(buffer->data_size, sizeof(void *)));
    if (failed_at)
        off_end = failed_at;
    else
        off_end = (void *)off_start + buffer->offsets_size;
    // 循环遍历缓冲区内的binder对象,减少他们的引用计数
    for (offp = off_start; offp < off_end; offp++) {
        struct binder_object_header *hdr;
        size_t object_size = binder_validate_object(buffer, *offp);

        ...
        hdr = (struct binder_object_header *)(buffer->data + *offp);
        switch (hdr->type) {
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
                struct flat_binder_object *fp;
                struct binder_node *node;

                fp = to_flat_binder_object(hdr);
                node = binder_get_node(proc, fp->binder);
                ...
                binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
                        0);
            } break;
            case BINDER_TYPE_HANDLE:
            case BINDER_TYPE_WEAK_HANDLE: {
                struct flat_binder_object *fp;
                struct binder_ref *ref;

                fp = to_flat_binder_object(hdr);
                ref = binder_get_ref(proc, fp->handle,
                             hdr->type == BINDER_TYPE_HANDLE);

                ...
                binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
            } break;
            ...
        }
    }
}

至此 BC_FREE_BUFFER 命令处理完毕,回到 binder_thread_write 循环取到 BC_REPLY指令继续处理

BC_REPLY

binder_thread_write

C 复制代码
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }

binder_transaction:

C 复制代码
static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    binder_size_t *offp, *off_end;
    binder_size_t off_min;
    struct binder_proc *target_proc;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct list_head *target_list;
    wait_queue_head_t *target_wait;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error;

    ...
    // 1
    if (reply) {
        // 通过事务栈寻找目标进程对端进程(client端)
        // 在章节1 binder_thread_read中会把接收到的事务压入thread->transaction_stack
        // 这个任务就是client端发起请求创建给sm的任务
        in_reply_to = thread->transaction_stack;
        ...
        // 2 恢复优先级
        binder_set_nice(in_reply_to->saved_priority);
        ...
        thread->transaction_stack = in_reply_to->to_parent;

        // 3 获取目标thread,既发起请求的client端,具体到本文中就是MediaPlayerService
        // from的值是MediaPlayerService发起请求时binder_transaction()中赋值的。
        target_thread = in_reply_to->from;
        ...
        //发起通信的client端进程
        target_proc = target_thread->proc;
    } else {
        ...
    }
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        ...
    }
    e->to_proc = target_proc->pid;

    // 4
    // 分配一个待处理的事务t,(将要给sm的对端------client)
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    if (t == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_t_failed;
    }
    
     // 分配一个待完成的工作tcomplete,(将要给自己sm处理)。
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    ...
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    t->debug_id = ++binder_last_id;
    e->debug_id = t->debug_id;

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        // 走这里
        t->from = NULL;
    // 下面的一些赋值是初始化事务t
    t->sender_euid = proc->tsk->cred->euid;
    // 事务将交给target_proc进程进行处理
    t->to_proc = target_proc;
    // 事务将交给target_thread线程进行处理
    t->to_thread = target_thread;
    //事务编码
    t->code = tr->code;
    //事务标志
    t->flags = tr->flags;
    //事务优先级
    t->priority = task_nice(current);

    //分配空间
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    if (t->buffer == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    //保存事务
    t->buffer->transaction = t;
    t->buffer->target_node = target_node; // null
    trace_binder_transaction_alloc_buf(t->buffer);
    if (target_node) // null
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (binder_size_t *)(t->buffer->data +
                 ALIGN(tr->data_size, sizeof(void *)));

    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
               tr->data.ptr.buffer, tr->data_size)) {
        ...
    }
    if (copy_from_user(offp, (const void __user *)(uintptr_t)
               tr->data.ptr.offsets, tr->offsets_size)) {
        ...
    }
    ...
    off_end = (void *)offp + tr->offsets_size;
    off_min = 0;
    for (; offp < off_end; offp++) {
        ...
    }
    if (reply) {
        BUG_ON(t->buffer->async_transaction != 0);
        // 5 将in_reply_to从sm对端的binder_thread的任务堆栈出栈
        binder_pop_transaction(target_thread, in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        ...
    } else {
        ...
    }
    //设置事务的类型为BINDER_WORK_TRANSACTION
    t->work.type = BINDER_WORK_TRANSACTION;
    //将事务添加到target_list队列中,即target_list的待处理事务中
    list_add_tail(&t->work.entry, target_list);
    //设置待完成工作的类型为BINDER_WORK_TRANSACTION_COMPLETE
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    //将待完成工作添加到thread->todo队列中,即当前线程的待完成工作中。
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
    ...
}
  1. 入参reply为true走reply分支:reply分支没有给target_node赋值,因为本次通信后就不需要再有client反馈了,也就不需要在进程间通信结果数据中指定目标binder实体对象了。这在后面client端收到任务时也会以此为依据来判断任务是否是reply,从而做相应处理。

  2. 恢复线程优先级,前面发起请求流程有讲过提升线程优先级并备份优先级的动作,这里就是恢复到备份优先级

  3. 获取源binder线程:客户端发送请求的过程中会把任务binder_transaction压入到目标线程的任务栈中,此时可以通过自己的binder线程获取到任务栈并从中得到发起方的binder线程

  4. 创建任务t 和 tcomplete ,把他们分别封装成 BINDER_WORK_TRANSACTION和BINDER_WORK_TRANSACTION_COMPLETE任务项,并添加到目标(源发起者)todo队列和自己的todo队列中

  5. 对于发起者client端来说,它给的事务已被sm完成,它的对端sm的reply也即将完成,这个事务已没有存在的必要,需要出栈。binder_pop_transaction

C 复制代码
static void binder_pop_transaction(struct binder_thread *target_thread,
                   struct binder_transaction *t)
{
    if (target_thread) {
        BUG_ON(target_thread->transaction_stack != t);
        BUG_ON(target_thread->transaction_stack->from != target_thread);
        target_thread->transaction_stack =
            target_thread->transaction_stack->from_parent;
        t->from = NULL;
    }
    t->need_reply = 0;
    if (t->buffer)
        t->buffer->transaction = NULL;
    kfree(t);
    binder_stats_deleted(BINDER_STAT_TRANSACTION);
}

小结: binder_transaction创建了两个任务:给SM的 BINDER_WORK_TRANSACTION_COMPLETE 任务和发送给client端的Media的 BINDER_WORK_TRANSACTION 任务,并唤醒client端处理。

2.1.3 BR_TRANSACTION_COMPLETE

SM的 BINDER_WORK_TRANSACTION_COMPLETE 任务,和前面的分析一样,是binder驱动用来告诉SM上一个命令已经处理完毕。SM接收到后得到命令为BR_TRANSACTION_COMPLETE,SM对于此命令实际没有什么操作。回到binder_loop处的循环,再次进入循环发起ioctl并进入binder_thread_read 再次进入休眠。

client端被唤醒(kernel)

来到client端,在binder_thread_read 的等待处被唤醒,处理BINDER_WORK_TRANSACTION

唤醒并接收数据传入用户空间

binder_thread_read

C 复制代码
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;
    ...
    // >>>唤醒后..
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;
        // 1 todo队列取代办事务
        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        } else {
            ...
        }
        ...
        switch (w->type) {
            case BINDER_WORK_TRANSACTION: {
                t = container_of(w, struct binder_transaction, work);
            } break;
            ...
        }

        if (!t)
            continue;
        // 2 前面server端组织向client反馈的任务时有讲解,reply不会给target_node赋值,走else
        if (t->buffer->target_node) {
            ...
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY; // 得到任务 BR_REPLY
        }
        // 3 封装任务数据到tr 准备传给用户空间
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)(
                    (uintptr_t)t->buffer->data +
                    proc->user_buffer_offset);
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        // 4 命令给到用户空间
        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        // 4 任务内容拷贝到用户空间
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);

        ...
        //5删除已处理的事务
        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            ...
        } else { // BR_REPLY
            // 6 释放 t
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }

done:
    //更新bwr.read_comsumed的值
    *consumed = ptr - buffer;
    ...
    return 0;
}
  1. 从todo队列取任务,处理 case BINDER_WORK_TRANSACTION

  2. 获取命令 BR_REPLY:前面讲到的server端向client反馈的流程中,向内核写的binder_transaction函数中,处理reply分支时不会给t->buffer->target_node赋值,这是一个标记。在这里就以t->buffer->target_node是否为空为条件,空则赋值cmd = BR_REPLY。

  3. 封装任务binder_transaction内的要传输的数据到binder_transaction_data准备传给用户空间

  4. 命令 BR_REPLY 写给用户空间,binder_transaction_data 数据传递给用户空间

  5. 删除已处理完的事务结构,并标记 allow_user_free 表示对端可以通过BC_FREE_BUFFER释放缓冲区了

  6. 因为目标处理BR_REPLY后就结束了不会再有处理结果返回,可以释放 binder_transaction t 了。注意这里没有释放内核缓冲区,需要对端使用完后发送BC_FREE_BUFFER通知驱动释放。

回到用户空间(user)处理 BR_REPLY

waitForResponse

C 复制代码
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        // 从这里出来
        ...
        
        cmd = (uint32_t)mIn.readInt32();
        
        ...
        switch (cmd) {
        ...
        
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ...

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        // 数据填充进 reply
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        ...
                    }
                } else {
                    ...
                }
            }
            goto finish;

        ...
    }

    ...    
    return err;
}

ipcSetDataReference 数据放入 Parcel reply,

C 复制代码
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
    const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
    binder_size_t minOffset = 0;
    // 释放当前parcel对象内部数据缓冲区占用内存
    freeDataNoInit();
    mError = NO_ERROR;
    // 然后重新初始化当前parcel对象内部使用数据缓冲区
    mData = const_cast<uint8_t*>(data);
    mDataSize = mDataCapacity = dataSize;
    mDataPos = 0;
    mObjects = const_cast<binder_size_t*>(objects);
    mObjectsSize = mObjectsCapacity = objectsCount;
    mNextObjectHint = 0;
    // 函数指针relFunc指向freeBuffer函数
    mOwner = relFunc;
    mOwnerCookie = relCookie;
    for (size_t i = 0; i < mObjectsSize; i++) {
        binder_size_t offset = mObjects[i];
        if (offset < minOffset) {
            ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
                  __func__, (uint64_t)offset, (uint64_t)minOffset);
            mObjectsSize = 0;
            break;
        }
        minOffset = offset + sizeof(flat_binder_object);
    }
    scanForFds();
}

回到 transact

transact(mHandle, code, data, reply, flags) 的reply被填充数据,可以读取其中的数据并做处理。本文是addService,不需要reply,没有什么具体处理,到此结束。

相关推荐
Dingdangr2 小时前
Android中的Intent的作用
android
技术无疆2 小时前
快速开发与维护:探索 AndroidAnnotations
android·java·android studio·android-studio·androidx·代码注入
GEEKVIP2 小时前
Android 恢复挑战和解决方案:如何从 Android 设备恢复删除的文件
android·笔记·安全·macos·智能手机·电脑·笔记本电脑
Jouzzy9 小时前
【Android安全】Ubuntu 16.04安装GDB和GEF
android·ubuntu·gdb
极客先躯10 小时前
java和kotlin 可以同时运行吗
android·java·开发语言·kotlin·同时运行
Good_tea_h12 小时前
Android中的单例模式
android·单例模式
计算机源码社17 小时前
分享一个基于微信小程序的居家养老服务小程序 养老服务预约安卓app uniapp(源码、调试、LW、开题、PPT)
android·微信小程序·uni-app·毕业设计项目·毕业设计源码·计算机课程设计·计算机毕业设计开题
丶白泽17 小时前
重修设计模式-结构型-门面模式
android
晨春计19 小时前
【git】
android·linux·git
标标大人20 小时前
c语言中的局部跳转以及全局跳转
android·c语言·开发语言