TaskPool.cpp
3.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
//===--------------------- TaskPool.cpp -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "lldb/Host/TaskPool.h"
#include "lldb/Host/ThreadLauncher.h"
#include "lldb/Utility/Log.h"
#include <cstdint>
#include <queue>
#include <thread>
namespace lldb_private {
namespace {
class TaskPoolImpl {
public:
static TaskPoolImpl &GetInstance();
void AddTask(std::function<void()> &&task_fn);
private:
TaskPoolImpl();
static lldb::thread_result_t WorkerPtr(void *pool);
static void Worker(TaskPoolImpl *pool);
std::queue<std::function<void()>> m_tasks;
std::mutex m_tasks_mutex;
uint32_t m_thread_count;
};
} // end of anonymous namespace
TaskPoolImpl &TaskPoolImpl::GetInstance() {
static TaskPoolImpl g_task_pool_impl;
return g_task_pool_impl;
}
void TaskPool::AddTaskImpl(std::function<void()> &&task_fn) {
TaskPoolImpl::GetInstance().AddTask(std::move(task_fn));
}
TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {}
unsigned GetHardwareConcurrencyHint() {
// std::thread::hardware_concurrency may return 0 if the value is not well
// defined or not computable.
static const unsigned g_hardware_concurrency =
std::max(1u, std::thread::hardware_concurrency());
return g_hardware_concurrency;
}
void TaskPoolImpl::AddTask(std::function<void()> &&task_fn) {
const size_t min_stack_size = 8 * 1024 * 1024;
std::unique_lock<std::mutex> lock(m_tasks_mutex);
m_tasks.emplace(std::move(task_fn));
if (m_thread_count < GetHardwareConcurrencyHint()) {
m_thread_count++;
// Note that this detach call needs to happen with the m_tasks_mutex held.
// This prevents the thread from exiting prematurely and triggering a linux
// libc bug (https://sourceware.org/bugzilla/show_bug.cgi?id=19951).
llvm::Expected<HostThread> host_thread =
lldb_private::ThreadLauncher::LaunchThread(
"task-pool.worker", WorkerPtr, this, min_stack_size);
if (host_thread) {
host_thread->Release();
} else {
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_HOST),
"failed to launch host thread: {}",
llvm::toString(host_thread.takeError()));
}
}
}
lldb::thread_result_t TaskPoolImpl::WorkerPtr(void *pool) {
Worker((TaskPoolImpl *)pool);
return {};
}
void TaskPoolImpl::Worker(TaskPoolImpl *pool) {
while (true) {
std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
if (pool->m_tasks.empty()) {
pool->m_thread_count--;
break;
}
std::function<void()> f = std::move(pool->m_tasks.front());
pool->m_tasks.pop();
lock.unlock();
f();
}
}
void TaskMapOverInt(size_t begin, size_t end,
const llvm::function_ref<void(size_t)> &func) {
const size_t num_workers = std::min<size_t>(end, GetHardwareConcurrencyHint());
std::atomic<size_t> idx{begin};
auto wrapper = [&idx, end, &func]() {
while (true) {
size_t i = idx.fetch_add(1);
if (i >= end)
break;
func(i);
}
};
std::vector<std::future<void>> futures;
futures.reserve(num_workers);
for (size_t i = 0; i < num_workers; i++)
futures.push_back(TaskPool::AddTask(wrapper));
for (size_t i = 0; i < num_workers; i++)
futures[i].wait();
}
} // namespace lldb_private