libuv 介绍
源码分析发现内部使用了 libuv,需要先简单了解下libuv的原理,和简单的使用方法
Libuv是一个跨平台的的基于事件驱动的异步io库,最初是为Node.js编写的。但是他提供的功能不仅仅是io,包括进程、线程、信号、定时器、进程间通信等
线程间通信
Demo 实现
arduino
#include <hilog/log.h>
#include <thread>
#include <unistd.h>
#include <uv.h>
uv_async_t async;
double percentage;
void print_progress(uv_async_t *handle) {
double percentage = *((double *)handle->data);
// fprintf(stderr, "Downloaded %.2f%%\n", percentage);
OH_LOG_Print(LOG_APP, LOG_WARN, 0xAAABB, "abtest", "<%{public}s %{public}f>", "print_progress", percentage);
}
void after(uv_work_t *req, int status) {
OH_LOG_Print(LOG_APP, LOG_WARN, 0xAAABB, "abtest", "<%{public}s>", "Download complete");
uv_close((uv_handle_t *)&async, NULL);
}
void fake_download(uv_work_t *req) {
int size = *((int *)req->data);
int downloaded = 0;
while (downloaded < size) {
percentage = downloaded * 100.0 / size;
async.data = (void *)&percentage;
uv_async_send(&async);
sleep(1);
downloaded += (200 + random()) % 1000; // can only download max 1000bytes/sec,
// but at least a 200;
}。
},,
void test_threads(){
uv_loop_t *loop = uv_default_loop();
uv_work_t req;
int size = 10240;
req.data = (void *)&size;
uv_async_init(loop, &async, print_progress);
uv_queue_work(loop, &req, fake_download, after);
uv_run(loop, UV_RUN_DEFAULT);
}
-
uv_default_loop
-
uv_run
-
核心实现:epoll ,非 io 事件使用eventfd
epoll_wait
阻塞避免了时间 循环 一直工作导致占用 CPU 的问题 -
和android handler比较,核心实现是一致的。以下 Android实现方法 system/core/libutils/Looper.cpp
scss//wait int Looper::pollInner(int timeoutMillis) { ... int eventCount = epoll_wait(mEpollFd.get(), eventItems, EPOLL_MAX_EVENTS, timeoutMillis); ... } //ctl int result = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mWakeEventFd.get(), &wakeEvent); //wake void Looper::wake() { // ssize_t nWrite = TEMP_FAILURE_RETRY(write(mWakeEventFd.get(), &inc, sizeof(uint64_t))); }
-
下图显示了libuv事件循环的所有阶段:
-
-
uv_async_init
- int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb)
- 和uv_async_send方法在线程通信中配对使用, handle和async_cb加入到loop的async_handles队列
- int uv_async_send(uv_async_t* handle) 方法将handle的pending设置为1,并向loop->async_wfd写入事件唤醒主线程。主线程检查handle的pending为1后执行async_cb方法。
-
uv_queue_work
libuv中的threadpool
UV_EXTERN int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb)
第一次提交会创建一个线程池。 uv_thread_create中用pthread创建子线程,子线程执行时用。 uv_cond_signal(&cond)阻塞线程 ,主线程通过uv__work_submit提交work到队列,方法中会用uv_cond_signal(&cond);唤醒子线程,子线程从队列中取出work执行work_cb ,执行完后通过uv_async_send方法通知到主线程,主线程执行after_work_cb
源码分析
taskpool模块注册
javascript
import taskpool from '@ohos.taskpool';
/commonlibrary/ets_utils/js_concurrent_module/taskpool/native_module_taskpool.cpp
ini
static napi_module g_taskPoolModule = {
.nm_version = 1,
.nm_flags = 0,
.nm_filename = nullptr,
.nm_register_func = Commonlibrary::Concurrent::TaskPoolModule::TaskPool::InitTaskPool,
.nm_modname = "taskpool",
.nm_priv = reinterpret_cast<void*>(0),
.reserved = { 0 },
};
/*
* module register
*/
extern "C" __attribute__((constructor)) void TaskPoolRegister()
{
napi_module_register(&g_taskPoolModule);
}
js导出类方法创建
scss
napi_value TaskPool::InitTaskPool(napi_env env, napi_value exports){
HILOG_INFO("taskpool:: Import taskpool");
HITRACE_HELPER_METER_NAME(__PRETTY_FUNCTION__);
napi_value taskClass = nullptr;
napi_define_class(env, "Task", NAPI_AUTO_LENGTH, Task::TaskConstructor, nullptr, 0, nullptr, &taskClass);
napi_value taskGroupClass = nullptr;
napi_define_class(env, "TaskGroup", NAPI_AUTO_LENGTH, TaskGroup::TaskGroupConstructor, nullptr, 0, nullptr,
&taskGroupClass);
napi_value isCanceledFunc;
napi_create_function(env, "isCanceled", NAPI_AUTO_LENGTH, TaskManager::IsCanceled, NULL, &isCanceledFunc);
napi_set_named_property(env, taskClass, "isCanceled", isCanceledFunc);
// define priority
napi_value priorityObj = NapiHelper::CreateObject(env);
napi_value highPriority = NapiHelper::CreateUint32(env, Priority::HIGH);
napi_value mediumPriority = NapiHelper::CreateUint32(env, Priority::MEDIUM);
napi_value lowPriority = NapiHelper::CreateUint32(env, Priority::LOW);
napi_property_descriptor exportPriority[] = {
DECLARE_NAPI_PROPERTY("HIGH", highPriority),
DECLARE_NAPI_PROPERTY("MEDIUM", mediumPriority),
DECLARE_NAPI_PROPERTY("LOW", lowPriority),
};
napi_define_properties(env, priorityObj, sizeof(exportPriority) / sizeof(exportPriority[0]), exportPriority);
napi_property_descriptor properties[] = {
DECLARE_NAPI_PROPERTY("Task", taskClass),
DECLARE_NAPI_PROPERTY("TaskGroup", taskGroupClass),
DECLARE_NAPI_PROPERTY("Priority", priorityObj),
DECLARE_NAPI_FUNCTION("execute", Execute),
DECLARE_NAPI_FUNCTION("cancel", Cancel),
DECLARE_NAPI_FUNCTION("getTaskPoolInfo", GetTaskPoolInfo),
};
napi_define_properties(env, exports, sizeof(properties) / sizeof(properties[0]), properties);
TaskManager::GetInstance().InitTaskManager(env);
return exports;
}
线程池初始化
- 为线程池添加一个预留线程,通过libuv库的uv_thread_create方法创建一个线程。
ini
void TaskManager::CreateWorkers(napi_env env, uint32_t num)
{
for (uint32_t i = 0; i < num; i++) {
expandingCount_++;
auto worker = Worker::WorkerConstructor(env);
NotifyWorkerAdded(worker);
}
}
Worker* Worker::WorkerConstructor(napi_env env)
{
HITRACE_HELPER_METER_NAME("WorkerConstructor: [Add Thread]");
Worker* worker = new Worker(env);
worker->StartExecuteInThread();
return worker;
}
void Worker::StartExecuteInThread()
{
if (!runner_) {
runner_ = std::make_unique<TaskRunner>(TaskStartCallback(ExecuteInThread, this));
}
if (runner_) {
runner_->Execute(); // start a new thread
} else {
HILOG_ERROR("taskpool:: runner_ is nullptr");
}
}
bool TaskRunner::Execute()
{
taskInnerRunner_ = new TaskInnerRunner(this);
return taskInnerRunner_->Start();
}
bool Thread::Start()
{
int ret = uv_thread_create(&tid_, [](void* arg) {
#if defined IOS_PLATFORM || defined MAC_PLATFORM
pthread_setname_np("TaskWorkThread");
#else
pthread_setname_np(pthread_self(), "TaskWorkThread");
#endif
Thread* thread = reinterpret_cast<Thread*>(arg);
thread->Run();
}, this);
return ret != 0;
}
- 开启预留线程的loop功能
上面线程开启后执行ExecuteInThread 方法。
- 标记worker env为taskpool Thread,与ark runtime交互,用于检测js中的异步方法和回调
- 注册几个uv_async_init用于接受其他线程事件, worker->RunLoop()利用uv_run开启loop
关注几个uv_async_init
uv_async_init(loop, worker->performTaskSignal_, reinterpret_cast<uv_async_cb>(Worker::PerformTask)) uv_async_init(loop, worker->clearWorkerSignal_, reinterpret_cast<uv_async_cb>(Worker::ReleaseWorkerHandles));
- work加入到idleWorkers_
arduino
void TaskRunner::TaskInnerRunner::Run()
{
if (runner_ != nullptr) {
runner_->Run();
}
}
TaskRunner::TaskInnerRunner::TaskInnerRunner(const TaskRunner* runner) : runner_(runner) {}
void TaskRunner::Run() const
{
if (callback_.callback != nullptr) {
callback_.callback(callback_.data);
}
}
ini
void Worker::ExecuteInThread(const void* data)
{
HITRACE_HELPER_START_TRACE(__PRETTY_FUNCTION__);
auto worker = reinterpret_cast<Worker*>(const_cast<void*>(data));
{
napi_create_runtime(worker->hostEnv_, &worker->workerEnv_);
if (worker->workerEnv_ == nullptr) {
HILOG_ERROR("taskpool:: workerEnv is nullptr");
return;
}
auto workerEngine = reinterpret_cast<NativeEngine*>(worker->workerEnv_);
// mark worker env is taskpoolThread
workerEngine->MarkTaskPoolThread();
workerEngine->InitTaskPoolThread(workerEngine, Worker::TaskResultCallback);
}
uv_loop_t* loop = worker->GetWorkerLoop();
if (loop == nullptr) {
HILOG_ERROR("taskpool:: loop is nullptr");
return;
}
// save the worker tid
worker->tid_ = GetThreadId();
// Init worker task execute signal
worker->performTaskSignal_ = new uv_async_t;
worker->performTaskSignal_->data = worker;
uv_async_init(loop, worker->performTaskSignal_, reinterpret_cast<uv_async_cb>(Worker::PerformTask));
worker->clearWorkerSignal_ = new uv_async_t;
worker->clearWorkerSignal_->data = worker;
uv_async_init(loop, worker->clearWorkerSignal_, reinterpret_cast<uv_async_cb>(Worker::ReleaseWorkerHandles));
HITRACE_HELPER_FINISH_TRACE;
#if !defined(WINDOWS_PLATFORM) && !defined(MAC_PLATFORM)
// Init debugger task post signal
worker->debuggerOnPostTaskSignal_ = new uv_async_t;
worker->debuggerOnPostTaskSignal_->data = worker;
uv_async_init(loop, worker->debuggerOnPostTaskSignal_, reinterpret_cast<uv_async_cb>(Worker::HandleDebuggerTask));
#endif
if (worker->PrepareForWorkerInstance()) {
// Call after uv_async_init
worker->NotifyWorkerCreated();
worker->RunLoop();
} else {
HILOG_ERROR("taskpool:: Worker PrepareForWorkerInstance fail");
}
TaskManager::GetInstance().RemoveWorker(worker);
worker->ReleaseWorkerThreadContent();
delete worker;
worker = nullptr;
}
- 同时创建一个定时器来调整线程数量
ini
void TaskManager::RunTaskManager()
{
loop_ = uv_default_loop();
timer_ = new uv_timer_t;
uv_timer_init(loop_, timer_);
notifyRestartTimer_ = new uv_async_t;
uv_timer_start(timer_, reinterpret_cast<uv_timer_cb>(TaskManager::TriggerLoadBalance), 0, 1000); // 1000: 1s
uv_async_init(loop_, notifyRestartTimer_, reinterpret_cast<uv_async_cb>(TaskManager::RestartTimer));
#if defined IOS_PLATFORM || defined MAC_PLATFORM
pthread_setname_np("TaskMgrThread");
#else
pthread_setname_np(pthread_self(), "TaskMgrThread");
#endif
uv_run(loop_, UV_RUN_DEFAULT);
uv_loop_close(loop_);
}
创建任务
ini
let task = new taskpool.Task(test);
主要用于 生成taskid 保存异步方法 和入参
scss
napi_value Task::TaskConstructor(napi_env env, napi_callback_info cbinfo)
{
// check argv count
size_t argc = NapiHelper::GetCallbackInfoArgc(env, cbinfo);
if (argc < 1) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: create task need more than one param");
return nullptr;
}
// check 1st param is func
napi_value* args = new napi_value[argc];
ObjectScope<napi_value> scope(args, true);
napi_value thisVar;
napi_get_cb_info(env, cbinfo, &argc, args, &thisVar, nullptr);
if (!NapiHelper::IsFunction(args[0])) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: the first param of task must be function");
return nullptr;
}
CreateTaskByFunc(env, thisVar, args[0], args, argc);
return thisVar;
}
void Task::CreateTaskByFunc(napi_env env, napi_value task, napi_value func, napi_value* args, size_t argc)
{
napi_value argsArray;
napi_create_array_with_length(env, argc - 1, &argsArray);
for (size_t i = 0; i < argc - 1; i++) {
napi_set_element(env, argsArray, i, args[i + 1]);
}
napi_value taskId = NapiHelper::CreateUint32(env, TaskManager::GetInstance().GenerateTaskId());
napi_value setTransferListFunc;
napi_create_function(env, SETTRANSFERLIST_STR, NAPI_AUTO_LENGTH, SetTransferList, NULL, &setTransferListFunc);
napi_property_descriptor properties[] = {
DECLARE_NAPI_PROPERTY(FUNCTION_STR, args[0]),
DECLARE_NAPI_PROPERTY(ARGUMENTS_STR, argsArray),
DECLARE_NAPI_PROPERTY(TASKID_STR, taskId),
DECLARE_NAPI_FUNCTION(SETTRANSFERLIST_STR, SetTransferList),
};
napi_define_properties(env, task, sizeof(properties) / sizeof(properties[0]), properties);
}
任务提交
ini
taskpool.execute(task);
-
通过入参是是否napi_object 确定上层调用方法。
-
这个例子是task是napi_object
- 生成ExecuteId和TaskInfo
- 创建Promise deferred报存在taskinfo中
-
提交执行taskInfo,通知执行NotifyExecuteTask();
ini
napi_value TaskPool::Execute(napi_env env, napi_callback_info cbinfo)
{
HITRACE_HELPER_METER_NAME(__PRETTY_FUNCTION__);
// check the argc
size_t argc = NapiHelper::GetCallbackInfoArgc(env, cbinfo);
if (argc < 1) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: the number of params must be at least one");
return nullptr;
}
// check the first param is object or func
napi_value* args = new napi_value[argc];
ObjectScope<napi_value> scope(args, true);
napi_get_cb_info(env, cbinfo, &argc, args, nullptr, nullptr);
napi_valuetype type;
napi_typeof(env, args[0], &type);
uint32_t priority = Priority::DEFAULT; // DEFAULT priority is MEDIUM
if (type == napi_object) {
// Get execution priority
if (argc > 1) {
if (!NapiHelper::IsNumber(args[1])) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: priority type is error");
return nullptr;
}
priority = NapiHelper::GetUint32Value(env, args[1]);
if (priority >= Priority::NUMBER) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: priority value is error");
return nullptr;
}
}
if (NapiHelper::HasNameProperty(env, args[0], GROUP_ID_STR)) {
return ExecuteGroup(env, args[0], Priority(priority));
}
uint32_t executeId = TaskManager::GetInstance().GenerateExecuteId();
TaskInfo* taskInfo = TaskManager::GetInstance().GenerateTaskInfoFromTask(env, args[0], executeId);
if (taskInfo == nullptr) {
HILOG_ERROR("taskpool::ExecuteTask taskInfo is nullptr");
return nullptr;
}
napi_value promise = NapiHelper::CreatePromise(env, &taskInfo->deferred);
TaskManager::GetInstance().StoreRunningInfo(taskInfo->taskId, executeId);
ExecuteFunction(env, taskInfo, Priority(priority));
return promise;
}
if (type != napi_function) {
ErrorHelper::ThrowError(env, ErrorHelper::TYPE_ERROR, "taskpool:: first param must be object or function");
return nullptr;
}
// Type is napi_function, execute from func directly
napi_value argsArray;
napi_create_array_with_length(env, argc - 1, &argsArray);
for (size_t i = 0; i < argc - 1; i++) {
napi_set_element(env, argsArray, i, args[i + 1]);
}
uint32_t executeId = TaskManager::GetInstance().GenerateExecuteId();
// Set task id to 0 when execute from func directly
TaskInfo* taskInfo = TaskManager::GetInstance().GenerateTaskInfo(env, args[0], argsArray, 0, executeId);
if (taskInfo == nullptr) {
HILOG_ERROR("taskpool::ExecuteFunction taskInfo is nullptr");
return nullptr;
}
napi_value promise = NapiHelper::CreatePromise(env, &taskInfo->deferred);
TaskManager::GetInstance().StoreRunningInfo(0, executeId);
ExecuteFunction(env, taskInfo);
return promise;
}
css
void TaskPool::ExecuteFunction(napi_env env, TaskInfo* taskInfo, Priority priority)
{
uint32_t executeId = taskInfo->executeId;
taskInfo->priority = priority;
// tag for trace parse: Task Allocation
std::string strTrace = "Task Allocation: taskId : " + std::to_string(taskInfo->taskId)
+ ", executeId : " + std::to_string(executeId)
+ ", priority : " + std::to_string(priority)
+ ", executeState : " + std::to_string(ExecuteState::WAITING);
HITRACE_HELPER_METER_NAME(strTrace);
TaskManager::GetInstance().AddExecuteState(executeId);
TaskManager::GetInstance().EnqueueExecuteId(executeId, priority);
TaskManager::GetInstance().TryTriggerLoadBalance();
}
-
添加任务状态为waiting
-
executeId提交到task_queue中 调用NotifyExecuteTask
arduino
void TaskManager::AddExecuteState(uint32_t executeId)
{
std::unique_lock<std::shared_mutex> lock(executeStatesMutex_);
executeStates_.emplace(executeId, ExecuteState::WAITING);
}
scss
void TaskManager::EnqueueExecuteId(uint32_t executeId, Priority priority)
{
// once enqueued, reset the counter to make threads released at given time
// if timer is stopped and then new tasks enqueue, restart it
retryCount_ = 0;
if (suspend_) {
suspend_ = false;
uv_async_send(notifyRestartTimer_);
}
{
std::lock_guard<std::mutex> lock(taskQueuesMutex_);
taskQueues_[priority]->EnqueueExecuteId(executeId);
}
NotifyExecuteTask();
}
scss
void TaskManager::TriggerLoadBalance(const uv_timer_t* req)
{
// Now, we will call triggerLoadBalance when enqueue or by monitor,
// and taking the time used to create worker threads into consideration,
// so we should ensure the the process is atomic.
TaskManager& taskManager = TaskManager::GetInstance();
HITRACE_HELPER_COUNT_TRACE("threadNum", static_cast<int64_t>(taskManager.GetThreadNum()));
HITRACE_HELPER_COUNT_TRACE("runningThreadNum", static_cast<int64_t>(taskManager.GetRunningWorkers()));
HITRACE_HELPER_COUNT_TRACE("idleThreadNum", static_cast<int64_t>(taskManager.GetIdleWorkers()));
HITRACE_HELPER_COUNT_TRACE("timeoutThreadNum", static_cast<int64_t>(taskManager.GetTimeoutWorkers()));
if (taskManager.expandingCount_ != 0) {
return;
}
taskManager.CheckForBlockedWorkers();
uint32_t targetNum = taskManager.ComputeSuitableThreadNum();
if (targetNum != 0) {
// We have tasks in the queue, and all workers may be running.
// Therefore the target runnable threads should be the sum of runnig workers and the calculated result.
targetNum = std::min(targetNum, taskManager.GetTaskNum());
targetNum += taskManager.GetRunningWorkers();
} else {
// We have no task in the queue. Therefore we do not need extra threads.
// But, tasks may still be executed in workers or microtask queue,
// so we should return the num of running workers.
targetNum = taskManager.GetRunningWorkers();
}
taskManager.CreateOrDeleteWorkers(targetNum);
}
任务处理
- uv_async_init performTaskSignal_ 处收到回调。
- 通过executeId获取到taskinfo
- 更新任务状态到RUNNING
- workerEngine->InitTaskPoolFunc。将taskinfo存储在函数中 这边会到ark runtime中检测函数的concurrent状态
- 调用task中的func函数。就是我们需要执行的逻辑
scss
void Worker::NotifyExecuteTask()
{
if (uv_is_active(reinterpret_cast<uv_handle_t*>(performTaskSignal_))) {
uv_async_send(performTaskSignal_);
}
}
ini
void Worker::PerformTask(const uv_async_t* req)
{
auto worker = static_cast<Worker*>(req->data);
napi_env env = worker->workerEnv_;
napi_status status = napi_ok;
RunningScope runningScope(worker, status);
NAPI_CALL_RETURN_VOID(env, status);
auto executeIdAndPriority = TaskManager::GetInstance().DequeueExecuteId();
if (executeIdAndPriority.first == 0) {
worker->NotifyTaskFinished();
return;
}
PriorityScope priorityScope(worker, executeIdAndPriority.second);
TaskInfo* taskInfo = TaskManager::GetInstance().GetTaskInfo(executeIdAndPriority.first);
if (taskInfo == nullptr) { // task may have been canceled
worker->NotifyTaskFinished();
HILOG_DEBUG("taskpool::PerformTask taskInfo is null");
return;
}
{
std::lock_guard<std::mutex> lock(worker->currentTaskIdMutex_);
worker->currentTaskId_.emplace_back(taskInfo->taskId);
}
// tag for trace parse: Task Perform
std::string strTrace = "Task Perform: taskId : " + std::to_string(taskInfo->taskId) + ", executeId : " +
std::to_string(taskInfo->executeId);
HITRACE_HELPER_METER_NAME(strTrace);
taskInfo->worker = worker;
TaskManager::GetInstance().UpdateExecuteState(taskInfo->executeId, ExecuteState::RUNNING);
napi_value func;
status = napi_deserialize(env, taskInfo->serializationFunction, &func);
if (status != napi_ok || func == nullptr) {
HILOG_ERROR("taskpool:: PerformTask deserialize function fail");
napi_value err = ErrorHelper::NewError(env, ErrorHelper::ERR_WORKER_SERIALIZATION,
"taskpool: failed to deserialize function.");
taskInfo->success = false;
NotifyTaskResult(env, taskInfo, err);
return;
}
napi_value args;
status = napi_deserialize(env, taskInfo->serializationArguments, &args);
if (status != napi_ok || args == nullptr) {
HILOG_ERROR("taskpool:: PerformTask deserialize arguments fail");
napi_value err = ErrorHelper::NewError(env, ErrorHelper::ERR_WORKER_SERIALIZATION,
"taskpool: failed to deserialize arguments.");
taskInfo->success = false;
NotifyTaskResult(env, taskInfo, err);
return;
}
auto funcVal = reinterpret_cast<NativeValue*>(func);
auto workerEngine = reinterpret_cast<NativeEngine*>(env);
// Store taskinfo in function
bool success = workerEngine->InitTaskPoolFunc(workerEngine, funcVal, taskInfo);
napi_value exception;
napi_get_and_clear_last_exception(env, &exception);
if (exception != nullptr) {
HILOG_ERROR("taskpool:: InitTaskPoolFunc occur exception");
taskInfo->success = false;
napi_value errorEvent = ErrorHelper::TranslateErrorEvent(env, exception);
NotifyTaskResult(env, taskInfo, errorEvent);
return;
}
if (!success) {
HILOG_ERROR("taskpool:: InitTaskPoolFunc fail");
napi_value err = ErrorHelper::NewError(env, ErrorHelper::TYPE_ERROR,
"taskpool: function may not be concurrent.");
taskInfo->success = false;
NotifyTaskResult(env, taskInfo, err);
return;
}
uint32_t argsNum = NapiHelper::GetArrayLength(env, args);
napi_value argsArray[argsNum];
napi_value val;
for (size_t i = 0; i < argsNum; i++) {
napi_get_element(env, args, i, &val);
argsArray[i] = val;
}
napi_value result;
napi_value undefined = NapiHelper::GetUndefinedValue(env);
napi_call_function(env, undefined, func, argsNum, argsArray, &result);
{
std::lock_guard<std::mutex> lock(worker->stateMutex_);
if (LIKELY(worker->state_ == WorkerState::RUNNING)) {
uint64_t duration = ConcurrentHelper::GetMilliseconds() - worker->startTime_;
TaskManager::GetInstance().UpdateExecutedInfo(duration);
}
}
napi_get_and_clear_last_exception(env, &exception);
if (exception != nullptr) {
HILOG_ERROR("taskpool::PerformTask occur exception");
taskInfo->success = false;
napi_value errorEvent = ErrorHelper::TranslateErrorEvent(env, exception);
NotifyTaskResult(env, taskInfo, errorEvent);
}
}
任务回调
- 移除executeId 执行信息
- uv_async_init onResultSignal处 收到回调TaskPool::HandleTaskResult
- js层的promise收到回调
ini
void Worker::TaskResultCallback(NativeEngine* engine, NativeValue* result, bool success, void* data)
{
HITRACE_HELPER_METER_NAME(__PRETTY_FUNCTION__);
if (engine == nullptr) {
HILOG_FATAL("taskpool::TaskResultCallback engine is null");
return;
}
if (data == nullptr) {
HILOG_FATAL("taskpool:: taskInfo is nullptr");
return;
}
TaskInfo* taskInfo = static_cast<TaskInfo*>(data);
auto env = reinterpret_cast<napi_env>(engine);
taskInfo->success = success;
NotifyTaskResult(env, taskInfo, reinterpret_cast<napi_value>(result));
}
ini
void Worker::NotifyTaskResult(napi_env env, TaskInfo* taskInfo, napi_value result)
{
HITRACE_HELPER_METER_NAME(__PRETTY_FUNCTION__);
napi_value undefined = NapiHelper::GetUndefinedValue(env);
napi_value resultData;
napi_status status = napi_serialize(env, result, undefined, &resultData);
if ((status != napi_ok || resultData == nullptr) && taskInfo->success) {
taskInfo->success = false;
napi_value err = ErrorHelper::NewError(env, ErrorHelper::ERR_WORKER_SERIALIZATION,
"taskpool: failed to serialize result.");
NotifyTaskResult(env, taskInfo, err);
return;
}
taskInfo->result = resultData;
TaskManager::GetInstance().RemoveExecuteState(taskInfo->executeId);
if (taskInfo->groupExecuteId == 0) {
TaskManager::GetInstance().PopRunningInfo(taskInfo->taskId, taskInfo->executeId);
}
TaskManager::GetInstance().PopTaskInfo(taskInfo->executeId);
Worker* worker = reinterpret_cast<Worker*>(taskInfo->worker);
{
std::lock_guard<std::mutex> lock(worker->currentTaskIdMutex_);
worker->currentTaskId_.erase(std::find(worker->currentTaskId_.begin(),
worker->currentTaskId_.end(), taskInfo->taskId));
}
uv_async_send(taskInfo->onResultSignal);
worker->NotifyTaskFinished();
}
rust
void TaskPool::HandleTaskResult(const uv_async_t* req)
{
HITRACE_HELPER_METER_NAME(__PRETTY_FUNCTION__);
auto taskInfo = static_cast<TaskInfo*>(req->data);
if (taskInfo == nullptr) {
HILOG_FATAL("taskpool::HandleTaskResult taskInfo is null");
return;
}
napi_handle_scope scope = nullptr;
NAPI_CALL_RETURN_VOID(taskInfo->env, napi_open_handle_scope(taskInfo->env, &scope));
napi_value taskData = nullptr;
napi_status status = napi_deserialize(taskInfo->env, taskInfo->result, &taskData);
// tag for trace parse: Task PerformTask End
std::string strTrace = "Task PerformTask End: taskId : " + std::to_string(taskInfo->taskId);
strTrace += ", executeId : " + std::to_string(taskInfo->executeId);
if (taskInfo->isCanceled) {
strTrace += ", performResult : IsCanceled";
} else if (status != napi_ok) {
strTrace += ", performResult : DeserializeFailed";
} else if (taskInfo->success) {
strTrace += ", performResult : Successful";
} else {
strTrace += ", performResult : Unsuccessful";
}
HITRACE_HELPER_METER_NAME(strTrace);
bool success = status == napi_ok && !taskInfo->isCanceled && taskInfo->success;
if (taskData == nullptr) {
napi_get_undefined(taskInfo->env, &taskData);
}
if (taskInfo->groupExecuteId == 0) {
if (success) {
napi_resolve_deferred(taskInfo->env, taskInfo->deferred, taskData);
} else {
napi_reject_deferred(taskInfo->env, taskInfo->deferred, taskData);
}
} else {
UpdateGroupInfoByResult(taskInfo->env, taskInfo, taskData, success);
}
NAPI_CALL_RETURN_VOID(taskInfo->env, napi_close_handle_scope(taskInfo->env, scope));
TaskManager::GetInstance().ReleaseTaskContent(taskInfo);
}
线程池线程数保持逻辑
-
TriggerLoadBalance触发时机
- 任务提交时
- 创建了定时器 每个1秒调用
-
CheckForBlockedWorkers 当任务来不及执行 nextCheckTime_ < now 检测超时work并移除
-
CreateOrDeleteWorkers
- 当没有任务时暂停定时器
- 满足workerCount < maxThreads && workerCount < targetNum 优先创建线程。
- 空闲线程过多时清理线程
scss
void TaskManager::TriggerLoadBalance(const uv_timer_t* req)
{
// Now, we will call triggerLoadBalance when enqueue or by monitor,
// and taking the time used to create worker threads into consideration,
// so we should ensure the the process is atomic.
TaskManager& taskManager = TaskManager::GetInstance();
HITRACE_HELPER_COUNT_TRACE("threadNum", static_cast<int64_t>(taskManager.GetThreadNum()));
HITRACE_HELPER_COUNT_TRACE("runningThreadNum", static_cast<int64_t>(taskManager.GetRunningWorkers()));
HITRACE_HELPER_COUNT_TRACE("idleThreadNum", static_cast<int64_t>(taskManager.GetIdleWorkers()));
HITRACE_HELPER_COUNT_TRACE("timeoutThreadNum", static_cast<int64_t>(taskManager.GetTimeoutWorkers()));
if (taskManager.expandingCount_ != 0) {
return;
}
taskManager.CheckForBlockedWorkers();
uint32_t targetNum = taskManager.ComputeSuitableThreadNum();
if (targetNum != 0) {
// We have tasks in the queue, and all workers may be running.
// Therefore the target runnable threads should be the sum of runnig workers and the calculated result.
targetNum = std::min(targetNum, taskManager.GetTaskNum());
targetNum += taskManager.GetRunningWorkers();
} else {
// We have no task in the queue. Therefore we do not need extra threads.
// But, tasks may still be executed in workers or microtask queue,
// so we should return the num of running workers.
targetNum = taskManager.GetRunningWorkers();
}
taskManager.CreateOrDeleteWorkers(targetNum);
}
scss
void TaskManager::CheckForBlockedWorkers()
{
// monitor the running state
uint64_t now = ConcurrentHelper::GetMilliseconds();
if (UNLIKELY(nextCheckTime_ < now)) {
// the threshold will be dynamically modified to provide more flexibility in detecting exceptions
// if the thread num has reached the limit and the idle worker is not available, a short time will be used,
// else we will choose the longer one
std::lock_guard<std::recursive_mutex> lock(workersMutex_);
bool state = GetThreadNum() == ConcurrentHelper::GetActiveCpus() - 1 && GetIdleWorkers() == 0;
uint64_t threshold = state ? MIN_TIMEOUT_TIME : MAX_TIMEOUT_TIME;
for (auto iter = workers_.begin(); iter != workers_.end();) {
auto worker = *iter;
std::lock_guard<std::mutex> stateLock(worker->stateMutex_);
// if the worker thread is idle, just skip it
if (worker->state_ == WorkerState::IDLE) {
iter++;
continue;
}
if (now - worker->startTime_ >= threshold) {
HILOG_DEBUG("taskpool:: The worker is marked for timeout.");
worker->state_ = WorkerState::BLOCKED;
timeoutWorkers_.insert(worker);
idleWorkers_.erase(worker);
workers_.erase(iter++);
} else {
iter++;
}
}
nextCheckTime_ = now + CHECK_INTERVAL;
}
}
ini
void TaskManager::CreateOrDeleteWorkers(uint32_t targetNum)
{
// uv_timer_start should not run on the background frequently when there is no task
if (targetNum == 0 && retryCount_ >= MAX_RETRY_COUNT) {
uv_timer_stop(timer_);
suspend_ = true;
return;
} else if (GetTimeoutWorkers() == 0 && targetNum == 0) {
retryCount_++;
} else {
retryCount_ = 0;
}
uint32_t workerCount = GetThreadNum();
const uint32_t maxThreads = std::max(ConcurrentHelper::GetActiveCpus() - 1, DEFAULT_THREADS);
targetNum |= 1;
if (workerCount < maxThreads && workerCount < targetNum) {
uint32_t step = std::min(maxThreads, targetNum) - workerCount;
CreateWorkers(hostEnv_, step);
} else if (workerCount > MIN_THREADS && workerCount > targetNum) {
std::lock_guard<std::recursive_mutex> lock(workersMutex_);
uint32_t maxNum = std::max(MIN_THREADS, targetNum);
uint32_t step = std::min(workerCount - maxNum, STEP_SIZE);
for (uint32_t i = 0; i < step; i++) {
auto iter = std::find_if(idleWorkers_.begin(), idleWorkers_.end(), [this](Worker *worker) {
auto idleTime = ConcurrentHelper::GetMilliseconds() - worker->idlePoint_;
return idleTime > MAX_IDLE_TIME && worker->runningCount_ == 0 &&
!Timer::HasTimer(worker->workerEnv_) && !HasTaskEnvInfo(worker->workerEnv_);
});
if (iter != idleWorkers_.end()) {
workers_.erase(*iter);
uv_async_send((*iter)->clearWorkerSignal_);
idleWorkers_.erase(iter);
}
}
}
if (UNLIKELY(!timeoutWorkers_.empty())) {
for (auto iter = timeoutWorkers_.begin(); iter != timeoutWorkers_.end();) {
auto worker = *iter;
if (worker->runningCount_ == 0 && worker->state_ == WorkerState::BLOCKED &&
!Timer::HasTimer(worker->workerEnv_) && !HasTaskEnvInfo(worker->workerEnv_)) {
uv_async_send(worker->clearWorkerSignal_);
timeoutWorkers_.erase(iter++);
} else {
iter++;
}
}
}
}
arduino
void Worker::ReleaseWorkerHandles(const uv_async_t* req)
{
HITRACE_HELPER_METER_NAME("ReleaseWorkerHandles: [Release Thread]");
auto worker = static_cast<Worker*>(req->data);
// when there is no active handle, worker loop will stop automatically.
uv_close(reinterpret_cast<uv_handle_t*>(worker->performTaskSignal_), [](uv_handle_t* handle) {
if (handle != nullptr) {
delete reinterpret_cast<uv_async_t*>(handle);
handle = nullptr;
}
});
#if !defined(WINDOWS_PLATFORM) && !defined(MAC_PLATFORM)
uv_close(reinterpret_cast<uv_handle_t*>(worker->debuggerOnPostTaskSignal_), [](uv_handle_t* handle) {
if (handle != nullptr) {
delete reinterpret_cast<uv_async_t*>(handle);
handle = nullptr;
}
});
#endif
uv_close(reinterpret_cast<uv_handle_t*>(worker->clearWorkerSignal_), [](uv_handle_t* handle) {
if (handle != nullptr) {
delete reinterpret_cast<uv_async_t*>(handle);
handle = nullptr;
}
});
uv_loop_t* loop = worker->GetWorkerLoop();
if (loop != nullptr) {
uv_stop(loop);
}
}
总结
鸿蒙中的taskpool底层利用libuv进行线程创建和线程通信。 不过没有直接使用taskpool的线程池,而是自己维护了一个可伸缩的线程池,并维护了任务状态。 除了通用的线程池能力,这里的taskpool封装了与上层js通信功能 涉及到与ark runtime thread打交道。ark runtime中的Thread 这次并没有去仔细探究。