我们以一个系统的核心服务,ServiceManager来看看整个系统的启动流程是怎么样的?
先看看ServiceManager的继承关系是什么样的
ServiceManager是一个独立的进程,它的创建时机甚至早于Zygote,启动的文件在init.rc中。
service.rc文件,这里边记录了和ServiceManager相关的服务:
上边的文件说明ServiceManager是一个系统服务,如果ServiceManager重启,涉及到healthd、zygote等等的系统服务的重启。
ServiceManager的整体处理流程
ServiceManager的初始化流程:
js
int main(int argc, char** argv) {
// 根据上面的rc文件,argc ==l,argv[0]=="/system/bin/servicemanager"
if (argc > 2) {
LOG(FATAL) << "usage: " << argv[0] << " [binder driver]";
}
// 此时,要使用的binder驱动为/dev/binder
const char* driver = argc == 2 ? argv[1] : "/dev/binder";
// (1)初始化Binder驱动
sp<ProcessState> ps = ProcessState::initWithDriver(driver);
ps->setThreadPoolMaxThreadCount(0);
ps->setCallRestriction(ProcessState::CallRestriction::FATAL_IF_NOT_ONEWAY);
// 实例化serviceManager
sp<ServiceManager> manager = new ServiceManager(std::make_unique<Access>());
// 将白身作为服务添加
if (!manager->addService("manager", manager, false /*allowIsolated*/, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk()) {
LOG(ERROR) << "Could not self register servicemanager";
}
// (2)设置服务端BBinder对象
IPCThreadState::self()->setTheContextObject(manager);
// 设置成为binder驱动的context manager,成为上下文的管理者
ps->becomeContextManager(nullptr, nullptr);
// 通过Looper epo11机制处理binder事务
sp<Looper> looper = Looper::prepare(false /*allowNonCallbacks*/);
BinderCallback::setupTo(looper);
// (3)Binder驱动中数据变化的监听
ClientCallbackCallback::setupTo(looper, manager);
while(true) {
looper->pollAll(-1);
}
// should not be reached
return EXIT_FAILURE;
}
实际上所有的进程都是使用ProcessState完成了Binder驱动的初始化。而对于IPCThreadState,它内部的初始化代码做了这些事情:
js
// IPCThreadstate是线程单例
IPCThreadState* IPCThreadState::self()
{
// 不是初次调用的情况,TLs的全称为Thread ocal storage
if (gHaveTLS.load(std::memory_order_acquire)) {
restart:
// 初次调用,生成线程私有变量key后
// TLS的全称为Thread ocal storage,表示线程本地储存空间,和java中的ThreadLocal其实是
const pthread_key_t k = gTLS;
// 先从线程本地储存空间中尝试获取值
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
// 没有的话就实例化一个
return new IPCThreadState;
}
// IPcThreadstate shutdown后不能再获取
// Racey, heuristic test for simultaneous shutdown.
if (gShutdown.load(std::memory_order_relaxed)) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return nullptr;
}
// 首次获取时gHaveTLS为false,会先走这里,TLS其实就相当于是Java中的ThreadLocal.
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS.load(std::memory_order_relaxed)) {
// 创建一个key,作为存放线程本地变量的key
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return nullptr;
}
// 创建完毕,gHaverLs置为true
gHaveTLS.store(true, std::memory_order_release);
}
// 回到qHaveTus为true的case.
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
BinderCallback中,会实现对消息的接受和处理,集体处理流程如注释所示:
js
class BinderCallback : public LooperCallback {
public:
static sp<BinderCallback> setupTo(const sp<Looper>& looper) {
sp<BinderCallback> cb = new BinderCallback;
int binder_fd = -1;
// 向binder驱动发送BC ENTER LOOPER事务请求,并获得binder没备的文件描述符
IPCThreadState::self()->setupPolling(&binder_fd);
LOG_ALWAYS_FATAL_IF(binder_fd < 0, "Failed to setupPolling: %d", binder_fd);
// Flush after setupPolling(), to make sure the binder driver
// knows about this thread handling commands.
// 检查写缓存是否有可写数据,有的话发送给binder驱动
IPCThreadState::self()->flushCommands();
// 监听binder文描述符
int ret = looper->addFd(binder_fd,
Looper::POLL_CALLBACK,
Looper::EVENT_INPUT,
cb,
nullptr /*data*/);
LOG_ALWAYS_FATAL_IF(ret != 1, "Failed to add binder FD to Looper");
return cb;
}
// 当binder驱动发米消息后,就可以通过Looper::handlevent函数接收并处理了
int handleEvent(int /* fd */, int /* events */, void* /* data */) override {
// 从binder驱动接收到消息并处理
IPCThreadState::self()->handlePolledCommands();
return 1; // Continue receiving callbacks.
}
};
在IPCThreadState里边
js
status_t IPCThreadState::handlePolledCommands()
{
status_t result;
// 当读缓存中数据未消费完时,持续循环
do {
result = getAndExecuteCommand();
} while (mIn.dataPosition() < mIn.dataSize());
// 当我们清空执行完所有的命令后,最后处理BRDECREFS和BRRELEASE
processPendingDerefs();
flushCommands();
return result;
}
status_t IPCThreadState::getAndExecuteCommand()
{
// 省略无关代码
// 这里边就会完成各种命令的执行了
result = executeCommand(cmd);
return result;
}
status_t IPCThreadState::executeCommand(int32_t cmd)
{
// 省略无关代码
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
// 对于其他binder服务端米说,tr.cookie为本地BBinder对象指针
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BHwBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags, reply_callback);
reinterpret_cast<BHwBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
// 对于serviceManager米说,使用the contextobject这个BBinder对象,the_context_object就是ServiceManager。ServiceManager并没有transact这个函数,这个函数实在BBinder中存在的。
error = the_context_object->ServiceManager并没有transact这个函数,这个函数实在BBinder中存在的。(tr.code, buffer, &reply, tr.flags, reply_callback);
}
可见IPCThreadState并没有自己处理这些事件,而实际上是交给了ServiceManager去处理。 在BBinder中,并没有处理这些详细逻辑的方式,它的实际处理逻辑实际上也是在子类中处理的。也就是服务器的具体实现类。
ini
status_t BBinder::transact(
// 我们需要把code转化成实际的函数用来调用处理。
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
err = pingBinder();
break;
case EXTENSION_TRANSACTION:
err = reply->writeStrongBinder(getExtension());
break;
case DEBUG_PID_TRANSACTION:
err = reply->writeInt32(getDebugPid());
break;
default:
err = onTransact(code, data, reply, flags);
break;
}
// In case this is being transacted on in the same process.
if (reply != nullptr) {
reply->setDataPosition(0);
}
return err;
}
服务端流程总结
驱动ioctl->looperCallBack(BinderCallback)->IPcThreadstate::self()->handlePolledcommands()//解读驱动给的参数->BBinder->transact>BnServiceManager.onTransact()->ServerviceManager.getService();