-
Notifications
You must be signed in to change notification settings - Fork 350
Expand file tree
/
Copy pathzephyr_dp_schedule.c
More file actions
441 lines (386 loc) · 13.2 KB
/
zephyr_dp_schedule.c
File metadata and controls
441 lines (386 loc) · 13.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright(c) 2023 Intel Corporation. All rights reserved.
*
* Author: Marcin Szkudlinski
*/
#include <sof/audio/component.h>
#include <sof/audio/module_adapter/module/generic.h>
#include <sof/llext_manager.h>
#include <rtos/task.h>
#include <rtos/userspace_helper.h>
#include <stdint.h>
#include <sof/schedule/dp_schedule.h>
#include <sof/schedule/ll_schedule.h>
#include <sof/schedule/ll_schedule_domain.h>
#include <sof/trace/trace.h>
#include <rtos/wait.h>
#include <rtos/interrupt.h>
#include <zephyr/kernel.h>
#include <zephyr/sys_clock.h>
#include <zephyr/sys/sem.h>
#include <zephyr/sys/mutex.h>
#include <sof/lib/notifier.h>
#include <ipc4/base_fw.h>
#include "zephyr_dp_schedule.h"
#include <zephyr/kernel/thread.h>
LOG_MODULE_REGISTER(dp_schedule, CONFIG_SOF_LOG_LEVEL);
SOF_DEFINE_REG_UUID(dp_sched);
DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
#if CONFIG_SOF_USERSPACE_APPLICATION
struct dp_sem_buf {
struct sys_sem sem[CONFIG_CORE_COUNT];
uint8_t reserved[CONFIG_MM_DRV_PAGE_SIZE - sizeof(struct sys_sem) * CONFIG_CORE_COUNT];
};
static struct dp_sem_buf __aligned(4096) dp_sched_sem;
#else
#define DP_LOCK_INIT(i, _) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
#define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))
/* User threads don't need access to this array. Access is performed from
* the kernel space via a syscall. Array must be placed in special section
* to be qualified as initialized by the gen_kobject_list.py script.
*/
static
STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOCK_INIT_LIST };
#endif
/* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core.
*
* TODO: consider using cpu_get_id() instead of supplying core as a parameter.
*/
unsigned int scheduler_dp_lock(uint16_t core)
{
#if CONFIG_SOF_USERSPACE_APPLICATION
sys_sem_take(&dp_sched_sem.sem[core], K_FOREVER);
#else
k_sem_take(&dp_lock[core], K_FOREVER);
#endif
return core;
}
void scheduler_dp_unlock(unsigned int key)
{
#if CONFIG_SOF_USERSPACE_APPLICATION
sys_sem_give(&dp_sched_sem.sem[key]);
#else
k_sem_give(&dp_lock[key]);
#endif
}
#if CONFIG_SOF_USERSPACE_APPLICATION
int scheduler_dp_add_domain(struct k_mem_domain *domain)
{
struct k_mem_partition part = {
.start = (uintptr_t)&dp_sched_sem,
.size = sizeof(dp_sched_sem),
.attr = K_MEM_PARTITION_P_RW_U_RW,
};
return k_mem_domain_add_partition(domain, &part);
}
int scheduler_dp_rm_domain(struct k_mem_domain *domain)
{
struct k_mem_partition part = {
.start = (uintptr_t)&dp_sched_sem,
.size = sizeof(dp_sched_sem),
.attr = K_MEM_PARTITION_P_RW_U_RW,
};
return k_mem_domain_remove_partition(domain, &part);
}
#endif
/* dummy LL task - to start LL on secondary cores */
static enum task_state scheduler_dp_ll_tick_dummy(void *data)
{
return SOF_TASK_STATE_RESCHEDULE;
}
/*
* function called after every LL tick
*
* This function checks if the queued DP tasks are ready to processing (meaning
* the module run by the task has enough data at all sources and enough free space
* on all sinks)
*
* if the task becomes ready, a deadline is set allowing Zephyr to schedule threads
* in right order
*
* TODO: currently there's a limitation - DP module must be surrounded by LL modules.
* it simplifies algorithm - there's no need to browse through DP chains calculating
* deadlines for each module in function of all modules execution status.
* Now is simple - modules deadline is its start + tick time.
*
* example:
* Lets assume we do have a pipeline:
*
* LL1 -> DP1 -> LL2 -> DP2 -> LL3 -> DP3 -> LL4
*
* all LLs starts in 1ms tick
*
* for simplification lets assume
* - all LLs are on primary core, all DPs on secondary (100% CPU is for DP)
* - context switching requires 0 cycles
*
* DP1 - starts every 1ms, needs 0.5ms to finish processing
* DP2 - starts every 2ms, needs 0.6ms to finish processing
* DP3 - starts every 10ms, needs 0.3ms to finish processing
*
* TICK0
* only LL1 is ready to run
* LL1 processing (producing data chunk for DP1)
*
* TICK1
* LL1 is ready to run
* DP1 is ready tu run (has data from LL1) set deadline to TICK2
* LL1 processing (producing second data chunk for DP1)
* DP1 processing for 0.5ms (consuming first data chunk, producing data chunk for LL2)
* CPU is idle for 0.5ms
*
* TICK2
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK3
* LL2 is ready to run
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing 50% data chunk for DP2)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* CPU is idle for 0.5ms
*
* TICK3
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK4
* LL2 is ready to run
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing rest of data chunk for DP2)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* CPU is idle for 0.5ms
*
* TICK4
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK5
* LL2 is ready to run
* DP2 is ready to run set deadline to TICK6
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing 50% of second data chunk for DP2)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.5ms (no data produced as DP2 has 0.1ms to go)
* 100% CPU used
*
* !!!!!! Note here - DP1 must do before DP2 as it MUST finish in this tick. DP2 can wait
* >>>>>>> this is what we call EDF - EARIEST DEADLINE FIRST <<<<<<
*
* TICK5
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK6
* LL2 is ready to run
* DP2 is in progress, deadline is set to TICK6
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing rest of second data chunk for DP2)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.1ms (producing TWO data chunks for LL3)
* CPU is idle for 0.4ms (60% used)
*
* TICK6
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK7
* LL2 is ready to run
* DP2 is ready to run set deadline to TICK8
* LL3 is ready to run
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing 50% of second data chunk for DP2)
* LL3 processing (producing 10% of first data chunk for DP3)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.5ms (no data produced as DP2 has 0.1ms to go)
* 100% CPU used
*
*
*
* (........ 9 more cycles - LL3 procuces 100% of data for DP3......)
*
*
* TICK15
* LL1 is ready to run
* DP1 is ready tu run set deadline to TICK16
* LL2 is ready to run
* DP2 is ready to run set deadline to TICK17
* LL3 is ready to run
* DP3 is ready to run set deadline to TICK25
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing 50% of data chunk for DP2)
* LL3 processing (producing 10% of second data chunk for DP3)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.5ms (no data produced as DP2 has 0.1ms to go)
* 100% CPU used -
* !!! note that DP3 is ready but has no chance to get CPU in this cycle
*
* TICK16
* LL1 is ready to run set deadline to TICK17
* DP1 is ready tu run
* LL2 is ready to run
* DP2 is in progress, deadline is set to TICK17
* LL3 is ready to run
* DP3 is in progress, deadline is set to TICK25
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing rest of data chunk for DP2)
* LL3 processing (producing 10% of second data chunk for DP3)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.1ms (producing data)
* DP3 processing for 0.2ms (producing 10 data chunks for LL4)
* 90% CPU used
*
* TICK17
* LL1 is ready to run
* DP1 is ready tu run
* LL2 is ready to run
* DP2 is ready to run
* LL3 is ready to run
* LL4 is ready to run
* !! NOTE that DP3 is not ready - it will be ready again in TICK25
* LL1 processing (producing data chunk for DP1)
* LL2 processing (producing rest of data chunk for DP2)
* LL3 processing (producing next 10% of second data chunk for DP3)
* LL4 processing (consuming 10% of data prepared by DP3)
* DP1 processing for 0.5ms (producing data chunk for LL2)
* DP2 processing for 0.5ms (no data produced as DP2 has 0.1ms to go)
* 100% CPU used
*
*
* Now - pipeline is in stable state, CPU used almost in 100% (it would be 100% if DP3
* needed 1.2ms for processing - but the example would be too complicated)
*/
void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *caller_data)
{
(void)receiver_data;
(void)event_type;
(void)caller_data;
unsigned int lock_key;
struct scheduler_dp_data *dp_sch = scheduler_get_data(SOF_SCHEDULE_DP);
/* remember current timestamp as "NOW" */
dp_sch->last_ll_tick_timestamp = k_cycle_get_32();
lock_key = scheduler_dp_lock(cpu_get_id());
scheduler_dp_recalculate(dp_sch, event_type == NOTIFIER_ID_LL_POST_RUN);
scheduler_dp_unlock(lock_key);
}
#if CONFIG_SOF_USERSPACE_APPLICATION
static int scheduler_dp_task_cancel(void *data, struct task *task)
{
/* Should never be called */
k_panic();
return -EOPNOTSUPP;
}
#endif
static int scheduler_dp_task_stop(void *data, struct task *task)
{
unsigned int lock_key;
struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
struct task_dp_pdata *pdata = task->priv_data;
/* this is asyn cancel - mark the task as canceled and remove it from scheduling */
lock_key = scheduler_dp_lock(cpu_get_id());
task->state = SOF_TASK_STATE_CANCEL;
list_item_del(&task->list);
/* if there're no more DP task, stop LL tick source */
if (list_is_empty(&dp_sch->tasks))
schedule_task_cancel(&dp_sch->ll_tick_src);
/* if the task is waiting - let it run and self-terminate */
#if CONFIG_SOF_USERSPACE_APPLICATION
k_sem_give(pdata->sem);
#else
k_event_set(pdata->event, DP_TASK_EVENT_CANCEL);
#endif
scheduler_dp_unlock(lock_key);
/* wait till the task has finished, if there was any task created */
if (pdata->thread_id)
k_thread_join(pdata->thread_id, K_FOREVER);
return 0;
}
static int scheduler_dp_task_free(void *data, struct task *task)
{
struct task_dp_pdata *pdata = task->priv_data;
int ret;
scheduler_dp_task_stop(data, task);
/* the thread should be terminated at this moment,
* abort is safe and will ensure no use after free
*/
if (pdata->thread_id) {
k_thread_abort(pdata->thread_id);
pdata->thread_id = NULL;
}
#ifdef CONFIG_USERSPACE
#if CONFIG_SOF_USERSPACE_PROXY
if (pdata->event != &pdata->event_struct)
k_object_free(pdata->event);
#else
if (pdata->sem != &pdata->sem_struct)
k_object_free(pdata->sem);
#endif
if (pdata->thread != &pdata->thread_struct)
k_object_free(pdata->thread);
#endif
/* free task stack */
ret = user_stack_free(pdata->p_stack);
pdata->p_stack = NULL;
scheduler_dp_domain_free(pdata->mod);
/* all other memory has been allocated as a single malloc, will be freed later by caller */
return ret;
}
static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t start,
uint64_t period)
{
struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
struct task_dp_pdata *pdata = task->priv_data;
unsigned int lock_key;
lock_key = scheduler_dp_lock(cpu_get_id());
if (task->state != SOF_TASK_STATE_INIT &&
task->state != SOF_TASK_STATE_CANCEL &&
task->state != SOF_TASK_STATE_COMPLETED) {
scheduler_dp_unlock(lock_key);
return -EINVAL;
}
/* if there's no DP tasks scheduled yet, run ll tick source task */
if (list_is_empty(&dp_sch->tasks))
schedule_task(&dp_sch->ll_tick_src, 0, 0);
/* add a task to DP scheduler list */
task->state = SOF_TASK_STATE_QUEUED;
list_item_prepend(&task->list, &dp_sch->tasks);
pdata->mod->dp_startup_delay = true;
scheduler_dp_unlock(lock_key);
tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);
return 0;
}
static struct scheduler_ops schedule_dp_ops = {
.schedule_task = scheduler_dp_task_shedule,
#if CONFIG_SOF_USERSPACE_APPLICATION
.schedule_task_cancel = scheduler_dp_task_cancel,
#else
.schedule_task_cancel = scheduler_dp_task_stop,
#endif
.schedule_task_free = scheduler_dp_task_free,
};
int scheduler_dp_init(void)
{
int ret;
struct scheduler_dp_data *dp_sch = rzalloc(SOF_MEM_FLAG_KERNEL,
sizeof(struct scheduler_dp_data));
if (!dp_sch)
return -ENOMEM;
list_init(&dp_sch->tasks);
scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch);
#if CONFIG_SOF_USERSPACE_APPLICATION
for (unsigned int i = 0; i < ARRAY_SIZE(dp_sched_sem.sem); i++)
sys_sem_init(dp_sched_sem.sem + i, 1, 1);
#endif
/* init src of DP tick */
ret = schedule_task_init_ll(&dp_sch->ll_tick_src,
SOF_UUID(dp_sched_uuid),
SOF_SCHEDULE_LL_TIMER,
0, scheduler_dp_ll_tick_dummy, dp_sch,
cpu_get_id(), 0);
if (ret)
return ret;
notifier_register(NULL, NULL, NOTIFIER_ID_LL_POST_RUN, scheduler_dp_ll_tick, 0);
scheduler_dp_domain_init();
return 0;
}
void scheduler_get_task_info_dp(struct scheduler_props *scheduler_props, uint32_t *data_off_size)
{
unsigned int lock_key;
scheduler_props->processing_domain = COMP_PROCESSING_DOMAIN_DP;
struct scheduler_dp_data *dp_sch =
(struct scheduler_dp_data *)scheduler_get_data(SOF_SCHEDULE_DP);
lock_key = scheduler_dp_lock(cpu_get_id());
scheduler_get_task_info(scheduler_props, data_off_size, &dp_sch->tasks);
scheduler_dp_unlock(lock_key);
}