-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy paththreadpool.h
215 lines (184 loc) · 6.03 KB
/
threadpool.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
#include <iostream>
#include <map>
#include <chrono>
using std::map;
using std::vector;
class Worker_buffer {
public:
Worker_buffer(unsigned char* data=0, size_t size=0)
: data(data), size(size) {}
unsigned char* data;
size_t size;
};
class ThreadPool {
public:
ThreadPool(size_t);
template<class F, class... Args>
auto enqueue(F&& f, Args&&... args)
-> std::future<typename std::result_of<F(Args...)>::type>;
~ThreadPool();
inline unsigned char* get_buffer(int buffer_id, size_t size) {
int ti = tid[std::this_thread::get_id()];
if (size > buffers[ti][buffer_id].size) {
if (buffers[ti][buffer_id].data) {
free(buffers[ti][buffer_id].data);
}
buffers[ti][buffer_id] =
Worker_buffer((unsigned char*)aligned_alloc(16, size), size);
}
return buffers[ti][buffer_id].data;
}
inline unsigned char* get_global_buffer(int buffer_id, size_t size) {
std::lock_guard<std::mutex> lock(gb_mutex);
auto it = global_buffers.find(buffer_id);
if (it == global_buffers.end()) {
global_buffers[buffer_id] = Worker_buffer((unsigned char*)aligned_alloc(16, size), size);
return global_buffers[buffer_id].data;
} else {
if (size > it->second.size) {
free(it->second.data);
it->second = Worker_buffer((unsigned char*)aligned_alloc(16, size), size);
}
return it->second.data;
}
}
inline void lock_buffers(void) {
sleeper_mutex.lock();
buffers_toggled = true;
}
inline void unlock_buffers(void) {
buffers_toggled = true;
sleeper_mutex.unlock();
sleeper_condition.notify_one();
}
int get_threads() const {
return workers.size();
}
private:
// need to keep track of threads so we can join them
std::vector< std::thread > workers;
// the task queue
std::queue< std::function<void()> > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
map< std::thread::id, int > tid;
vector < vector<Worker_buffer> > buffers;
map< int, Worker_buffer > global_buffers;
std::mutex gb_mutex;
std::thread sleeper;
std::mutex sleeper_mutex;
std::condition_variable sleeper_condition;
bool buffers_toggled;
};
extern ThreadPool* filter_pool;
// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads)
: stop(false), buffers_toggled(false)
{
buffers = vector< vector<Worker_buffer> >(threads);
for(size_t i = 0;i<threads;++i) {
workers.emplace_back(
[this]
{
for(;;)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock,
[this]{ return this->stop || !this->tasks.empty(); });
if(this->stop && this->tasks.empty())
return;
task = std::move(this->tasks.front());
this->tasks.pop();
}
task();
}
}
);
tid[workers[i].get_id()] = i;
buffers[i] = vector<Worker_buffer>(2);
}
sleeper = std::thread( [this] {
while (true) {
std::unique_lock<std::mutex> lock(sleeper_mutex);
buffers_toggled = false;
sleeper_condition.wait_for(lock, std::chrono::milliseconds(100), [this] {
return this->stop;
});
if (stop) return;
if (buffers_toggled) continue;
if (global_buffers.size()) {
for (auto& bo: buffers) {
for (auto& bi: bo) {
if (bi.data) {
free(bi.data);
bi.size = 0;
bi.data = 0;
}
}
}
for (auto& b: global_buffers) {
if (b.second.data) {
free(b.second.data);
b.second.data = 0;
}
}
global_buffers.clear();
}
}
});
sched_param sch_params;
sch_params.sched_priority = sched_get_priority_min(SCHED_IDLE);
if (pthread_setschedparam(sleeper.native_handle(), SCHED_IDLE, &sch_params)) {
printf("Error while trying to demote sleeper thread\n");
}
}
// add new work item to the pool
template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&&... args)
-> std::future<typename std::result_of<F(Args...)>::type>
{
using return_type = typename std::result_of<F(Args...)>::type;
auto task = std::make_shared< std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace([task](){ (*task)(); });
}
condition.notify_one();
return res;
}
// the destructor joins all threads
inline ThreadPool::~ThreadPool()
{
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
condition.notify_all();
sleeper_condition.notify_all();
for(std::thread &worker: workers) {
worker.join();
}
sleeper.join();
}
#endif