Threading.h
5.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
//===--- Threading.h - Abstractions for multithreading -----------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_SUPPORT_THREADING_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_SUPPORT_THREADING_H
#include "support/Context.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/Twine.h"
#include <cassert>
#include <condition_variable>
#include <future>
#include <memory>
#include <mutex>
#include <thread>
#include <vector>
namespace clang {
namespace clangd {
/// A threadsafe flag that is initially clear.
class Notification {
public:
// Sets the flag. No-op if already set.
void notify();
// Blocks until flag is set.
void wait() const;
private:
bool Notified = false;
mutable std::condition_variable CV;
mutable std::mutex Mu;
};
/// Limits the number of threads that can acquire the lock at the same time.
class Semaphore {
public:
Semaphore(std::size_t MaxLocks);
bool try_lock();
void lock();
void unlock();
private:
std::mutex Mutex;
std::condition_variable SlotsChanged;
std::size_t FreeSlots;
};
/// A point in time we can wait for.
/// Can be zero (don't wait) or infinity (wait forever).
/// (Not time_point::max(), because many std::chrono implementations overflow).
class Deadline {
public:
Deadline(std::chrono::steady_clock::time_point Time)
: Type(Finite), Time(Time) {}
static Deadline zero() { return Deadline(Zero); }
static Deadline infinity() { return Deadline(Infinite); }
std::chrono::steady_clock::time_point time() const {
assert(Type == Finite);
return Time;
}
bool expired() const {
return (Type == Zero) ||
(Type == Finite && Time < std::chrono::steady_clock::now());
}
bool operator==(const Deadline &Other) const {
return (Type == Other.Type) && (Type != Finite || Time == Other.Time);
}
private:
enum Type { Zero, Infinite, Finite };
Deadline(enum Type Type) : Type(Type) {}
enum Type Type;
std::chrono::steady_clock::time_point Time;
};
/// Makes a deadline from a timeout in seconds. None means wait forever.
Deadline timeoutSeconds(llvm::Optional<double> Seconds);
/// Wait once on CV for the specified duration.
void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
Deadline D);
/// Waits on a condition variable until F() is true or D expires.
template <typename Func>
LLVM_NODISCARD bool wait(std::unique_lock<std::mutex> &Lock,
std::condition_variable &CV, Deadline D, Func F) {
while (!F()) {
if (D.expired())
return false;
wait(Lock, CV, D);
}
return true;
}
/// Runs tasks on separate (detached) threads and wait for all tasks to finish.
/// Objects that need to spawn threads can own an AsyncTaskRunner to ensure they
/// all complete on destruction.
class AsyncTaskRunner {
public:
/// Destructor waits for all pending tasks to finish.
~AsyncTaskRunner();
void wait() const { (void)wait(Deadline::infinity()); }
LLVM_NODISCARD bool wait(Deadline D) const;
// The name is used for tracing and debugging (e.g. to name a spawned thread).
void runAsync(const llvm::Twine &Name, llvm::unique_function<void()> Action);
private:
mutable std::mutex Mutex;
mutable std::condition_variable TasksReachedZero;
std::size_t InFlightTasks = 0;
};
/// Runs \p Action asynchronously with a new std::thread. The context will be
/// propagated.
template <typename T>
std::future<T> runAsync(llvm::unique_function<T()> Action) {
return std::async(
std::launch::async,
[](llvm::unique_function<T()> &&Action, Context Ctx) {
WithContext WithCtx(std::move(Ctx));
return Action();
},
std::move(Action), Context::current().clone());
}
/// Memoize is a cache to store and reuse computation results based on a key.
///
/// Memoize<DenseMap<int, bool>> PrimeCache;
/// for (int I : RepetitiveNumbers)
/// if (PrimeCache.get(I, [&] { return expensiveIsPrime(I); }))
/// llvm::errs() << "Prime: " << I << "\n";
///
/// The computation will only be run once for each key.
/// This class is threadsafe. Concurrent calls for the same key may run the
/// computation multiple times, but each call will return the same result.
template <typename Container> class Memoize {
mutable Container Cache;
std::unique_ptr<std::mutex> Mu;
public:
Memoize() : Mu(std::make_unique<std::mutex>()) {}
template <typename T, typename Func>
typename Container::mapped_type get(T &&Key, Func Compute) const {
{
std::lock_guard<std::mutex> Lock(*Mu);
auto It = Cache.find(Key);
if (It != Cache.end())
return It->second;
}
// Don't hold the mutex while computing.
auto V = Compute();
{
std::lock_guard<std::mutex> Lock(*Mu);
auto R = Cache.try_emplace(std::forward<T>(Key), V);
// Insert into cache may fail if we raced with another thread.
if (!R.second)
return R.first->second; // Canonical value, from other thread.
}
return V;
}
};
} // namespace clangd
} // namespace clang
#endif