1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
|
package worker_watcher //nolint:golint,stylecheck
import (
"context"
"runtime"
"sync"
"time"
"github.com/spiral/errors"
"github.com/spiral/roadrunner/v2/interfaces/events"
"github.com/spiral/roadrunner/v2/interfaces/worker"
"github.com/spiral/roadrunner/v2/internal"
)
type Stack struct {
workers []worker.BaseProcess
mutex sync.RWMutex
destroy bool
actualNumOfWorkers int64
}
func NewWorkersStack() *Stack {
w := runtime.NumCPU()
return &Stack{
workers: make([]worker.BaseProcess, 0, w),
actualNumOfWorkers: 0,
}
}
func (stack *Stack) Reset() {
stack.mutex.Lock()
defer stack.mutex.Unlock()
stack.actualNumOfWorkers = 0
stack.workers = nil
}
// Push worker back to the stack
// If stack in destroy state, Push will provide 100ms window to unlock the mutex
func (stack *Stack) Push(w worker.BaseProcess) {
stack.mutex.Lock()
defer stack.mutex.Unlock()
stack.actualNumOfWorkers++
stack.workers = append(stack.workers, w)
}
func (stack *Stack) IsEmpty() bool {
stack.mutex.Lock()
defer stack.mutex.Unlock()
return len(stack.workers) == 0
}
func (stack *Stack) Pop() (worker.BaseProcess, bool) {
stack.mutex.Lock()
defer stack.mutex.Unlock()
// do not release new stack
if stack.destroy {
return nil, true
}
if len(stack.workers) == 0 {
return nil, false
}
// move worker
w := stack.workers[len(stack.workers)-1]
stack.workers = stack.workers[:len(stack.workers)-1]
stack.actualNumOfWorkers--
return w, false
}
func (stack *Stack) FindAndRemoveByPid(pid int64) bool {
stack.mutex.Lock()
defer stack.mutex.Unlock()
for i := 0; i < len(stack.workers); i++ {
// worker in the stack, reallocating
if stack.workers[i].Pid() == pid {
stack.workers = append(stack.workers[:i], stack.workers[i+1:]...)
stack.actualNumOfWorkers--
// worker found and removed
return true
}
}
// no worker with such ID
return false
}
// Workers return copy of the workers in the stack
func (stack *Stack) Workers() []worker.BaseProcess {
stack.mutex.Lock()
defer stack.mutex.Unlock()
workersCopy := make([]worker.BaseProcess, 0, 1)
// copy
for _, v := range stack.workers {
workersCopy = append(workersCopy, v)
}
return workersCopy
}
func (stack *Stack) isDestroying() bool {
stack.mutex.Lock()
defer stack.mutex.Unlock()
return stack.destroy
}
// we also have to give a chance to pool to Push worker (return it)
func (stack *Stack) Destroy(ctx context.Context) {
stack.mutex.Lock()
stack.destroy = true
stack.mutex.Unlock()
tt := time.NewTicker(time.Millisecond * 100)
for {
select {
case <-tt.C:
stack.mutex.Lock()
// that might be one of the workers is working
if len(stack.workers) != int(stack.actualNumOfWorkers) {
stack.mutex.Unlock()
continue
}
stack.mutex.Unlock()
// unnecessary mutex, but
// just to make sure. All stack at this moment are in the stack
// Pop operation is blocked, push can't be done, since it's not possible to pop
stack.mutex.Lock()
for i := 0; i < len(stack.workers); i++ {
// set state for the stack in the stack (unused at the moment)
stack.workers[i].State().Set(internal.StateDestroyed)
// kill the worker
_ = stack.workers[i].Kill()
}
stack.mutex.Unlock()
tt.Stop()
// clear
stack.Reset()
return
}
}
}
// workerCreateFunc can be nil, but in that case, dead stack will not be replaced
func NewWorkerWatcher(allocator worker.Allocator, numWorkers int64, events events.Handler) worker.Watcher {
ww := &workerWatcher{
stack: NewWorkersStack(),
allocator: allocator,
initialNumWorkers: numWorkers,
actualNumWorkers: numWorkers,
events: events,
}
return ww
}
type workerWatcher struct {
mutex sync.RWMutex
stack *Stack
allocator worker.Allocator
initialNumWorkers int64
actualNumWorkers int64
events events.Handler
}
func (ww *workerWatcher) AddToWatch(workers []worker.BaseProcess) error {
for i := 0; i < len(workers); i++ {
ww.stack.Push(workers[i])
go func(swc worker.BaseProcess) {
ww.wait(swc)
}(workers[i])
}
return nil
}
func (ww *workerWatcher) GetFreeWorker(ctx context.Context) (worker.BaseProcess, error) {
const op = errors.Op("GetFreeWorker")
// thread safe operation
w, stop := ww.stack.Pop()
if stop {
return nil, errors.E(op, errors.WatcherStopped)
}
// handle worker remove state
// in this state worker is destroyed by supervisor
if w != nil && w.State().Value() == internal.StateRemove {
err := ww.RemoveWorker(w)
if err != nil {
return nil, err
}
// try to get next
return ww.GetFreeWorker(ctx)
}
// no free stack
if w == nil {
for {
select {
default:
w, stop = ww.stack.Pop()
if stop {
return nil, errors.E(op, errors.WatcherStopped)
}
if w == nil {
continue
}
return w, nil
case <-ctx.Done():
return nil, errors.E(op, errors.NoFreeWorkers, errors.Str("no free workers in the stack, timeout exceed"))
}
}
}
return w, nil
}
func (ww *workerWatcher) AllocateNew() error {
ww.stack.mutex.Lock()
const op = errors.Op("allocate new worker")
sw, err := ww.allocator()
if err != nil {
return errors.E(op, errors.WorkerAllocate, err)
}
ww.addToWatch(sw)
ww.stack.mutex.Unlock()
ww.PushWorker(sw)
return nil
}
func (ww *workerWatcher) RemoveWorker(wb worker.BaseProcess) error {
ww.mutex.Lock()
defer ww.mutex.Unlock()
const op = errors.Op("remove worker")
pid := wb.Pid()
if ww.stack.FindAndRemoveByPid(pid) {
wb.State().Set(internal.StateRemove)
err := wb.Kill()
if err != nil {
return errors.E(op, err)
}
return nil
}
return nil
}
// O(1) operation
func (ww *workerWatcher) PushWorker(w worker.BaseProcess) {
ww.mutex.Lock()
defer ww.mutex.Unlock()
ww.stack.Push(w)
}
// Destroy all underlying stack (but let them to complete the task)
func (ww *workerWatcher) Destroy(ctx context.Context) {
// destroy stack, we don't use ww mutex here, since we should be able to push worker
ww.stack.Destroy(ctx)
}
// Warning, this is O(n) operation, and it will return copy of the actual workers
func (ww *workerWatcher) WorkersList() []worker.BaseProcess {
return ww.stack.Workers()
}
func (ww *workerWatcher) wait(w worker.BaseProcess) {
const op = errors.Op("process wait")
err := w.Wait()
if err != nil {
ww.events.Push(events.WorkerEvent{
Event: events.EventWorkerError,
Worker: w,
Payload: errors.E(op, err),
})
}
if w.State().Value() == internal.StateDestroyed {
// worker was manually destroyed, no need to replace
ww.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w})
return
}
_ = ww.stack.FindAndRemoveByPid(w.Pid())
err = ww.AllocateNew()
if err != nil {
ww.events.Push(events.PoolEvent{
Event: events.EventPoolError,
Payload: errors.E(op, err),
})
}
}
func (ww *workerWatcher) addToWatch(wb worker.BaseProcess) {
go func() {
ww.wait(wb)
}()
}
|