-
Notifications
You must be signed in to change notification settings - Fork 350
Expand file tree
/
Copy pathzephyr_dp_schedule_thread.c
More file actions
309 lines (261 loc) · 8.77 KB
/
zephyr_dp_schedule_thread.c
File metadata and controls
309 lines (261 loc) · 8.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright(c) 2025 Intel Corporation. All rights reserved.
*
* Author: Marcin Szkudlinski
*/
#include <rtos/task.h>
#include <sof/audio/module_adapter/module/generic.h>
#include <sof/common.h>
#include <sof/list.h>
#include <sof/schedule/ll_schedule_domain.h>
#include <sof/schedule/dp_schedule.h>
#include <sof/trace/trace.h>
#include <zephyr/kernel.h>
#include <stdbool.h>
#include <stdint.h>
#include "zephyr_dp_schedule.h"
LOG_MODULE_DECLARE(dp_schedule, CONFIG_SOF_LOG_LEVEL);
extern struct tr_ctx dp_tr;
/* Go through all DP tasks and recalculate their readiness and deadlines
* NOT REENTRANT, should be called with scheduler_dp_lock()
*/
void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_run)
{
struct list_item *tlist;
struct task *curr_task;
struct task_dp_pdata *pdata;
list_for_item(tlist, &dp_sch->tasks) {
curr_task = container_of(tlist, struct task, list);
pdata = curr_task->priv_data;
struct processing_module *mod = pdata->mod;
bool trigger_task = false;
/* decrease number of LL ticks/cycles left till the module reaches its deadline */
if (mod->dp_startup_delay && is_ll_post_run && pdata->ll_cycles_to_start) {
pdata->ll_cycles_to_start--;
if (!pdata->ll_cycles_to_start)
/* delayed start complete, clear startup delay flag.
* see dp_startup_delay comment for details
*/
mod->dp_startup_delay = false;
}
if (curr_task->state == SOF_TASK_STATE_QUEUED) {
bool mod_ready;
mod_ready = module_is_ready_to_process(mod, mod->sources,
mod->num_of_sources,
mod->sinks,
mod->num_of_sinks);
if (mod_ready) {
/* trigger the task */
curr_task->state = SOF_TASK_STATE_RUNNING;
if (mod->dp_startup_delay && !pdata->ll_cycles_to_start) {
/* first time run - use delayed start */
pdata->ll_cycles_to_start =
module_get_lpt(pdata->mod) / LL_TIMER_PERIOD_US;
/* in case LPT < LL cycle - delay at least cycle */
if (!pdata->ll_cycles_to_start)
pdata->ll_cycles_to_start = 1;
}
trigger_task = true;
k_event_post(pdata->event, DP_TASK_EVENT_PROCESS);
}
}
if (curr_task->state == SOF_TASK_STATE_RUNNING) {
/* (re) calculate deadline for all running tasks */
/* get module deadline in us*/
uint32_t deadline = module_get_deadline(mod);
/* if a deadline cannot be calculated, use a fixed value relative to its
* first start
*/
if (deadline >= UINT32_MAX / 2 && trigger_task)
deadline = module_get_lpt(mod);
if (deadline < UINT32_MAX) {
/* round down to 1ms */
deadline = deadline / 1000;
/* calculate number of ticks */
deadline = deadline * (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000);
/* add to "NOW", overflows are OK */
deadline = dp_sch->last_ll_tick_timestamp + deadline;
/* set in Zephyr. Note that it may be in past, it does not matter,
* Zephyr still will schedule the thread with earlier deadline
* first
*/
k_thread_absolute_deadline_set(pdata->thread_id, deadline);
}
}
}
}
/* Thread function called in component context, on target core */
void dp_thread_fn(void *p1, void *p2, void *p3)
{
struct task *task = p1;
(void)p2;
(void)p3;
struct task_dp_pdata *task_pdata = task->priv_data;
struct scheduler_dp_data *dp_sch = NULL;
unsigned int lock_key;
enum task_state state;
bool task_stop;
if (!(task->flags & K_USER))
dp_sch = scheduler_get_data(SOF_SCHEDULE_DP);
do {
/*
* the thread is started immediately after creation, it will stop on event.
* Event will be signalled once the task is ready to process.
*/
k_event_wait_safe(task_pdata->event, DP_TASK_EVENT_PROCESS | DP_TASK_EVENT_CANCEL,
false, K_FOREVER);
if (task->state == SOF_TASK_STATE_RUNNING)
state = task_run(task);
else
state = task->state; /* to avoid undefined variable warning */
lock_key = scheduler_dp_lock(task->core);
/*
* check if task is still running, may have been canceled by external call
* if not, set the state returned by run procedure
*/
if (task->state == SOF_TASK_STATE_RUNNING) {
task->state = state;
switch (state) {
case SOF_TASK_STATE_RESCHEDULE:
/* mark to reschedule, schedule time is already calculated */
task->state = SOF_TASK_STATE_QUEUED;
break;
case SOF_TASK_STATE_CANCEL:
case SOF_TASK_STATE_COMPLETED:
/* remove from scheduling */
list_item_del(&task->list);
break;
default:
/* illegal state, serious defect, won't happen */
k_panic();
}
}
/* if true exit the while loop, terminate the thread */
task_stop = task->state == SOF_TASK_STATE_COMPLETED ||
task->state == SOF_TASK_STATE_CANCEL;
/* recalculate all DP tasks readiness and deadlines
* TODO: it should be for all tasks, for all cores
* currently its limited to current core only
*/
if (dp_sch)
scheduler_dp_recalculate(dp_sch, false);
scheduler_dp_unlock(lock_key);
} while (!task_stop);
/* call task_complete */
if (task->state == SOF_TASK_STATE_COMPLETED)
task_complete(task);
}
int scheduler_dp_task_init(struct task **task,
const struct sof_uuid_entry *uid,
const struct task_ops *ops,
struct processing_module *mod,
uint16_t core,
size_t stack_size,
uint32_t options)
{
void __sparse_cache *p_stack = NULL;
struct k_heap *const user_heap = mod->dev->drv->user_heap;
/* memory allocation helper structure */
struct {
struct task task;
struct task_dp_pdata pdata;
} *task_memory;
int ret;
/* must be called on the same core the task will be binded to */
assert(cpu_get_id() == core);
/*
* allocate memory
* to avoid multiple malloc operations allocate all required memory as a single structure
* and return pointer to task_memory->task
* As the structure contains zephyr kernel specific data, it must be located in
* shared, non cached memory
*/
task_memory = sof_heap_alloc(user_heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT,
sizeof(*task_memory), 0);
if (!task_memory) {
tr_err(&dp_tr, "memory alloc failed");
return -ENOMEM;
}
memset(task_memory, 0, sizeof(*task_memory));
/* allocate stack - must be aligned and cached so a separate alloc */
p_stack = user_stack_allocate(stack_size, options);
if (!p_stack) {
tr_err(&dp_tr, "stack alloc failed");
ret = -ENOMEM;
goto err;
}
/* internal SOF task init */
ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_DP, 0, ops->run,
mod, core, options);
if (ret < 0) {
tr_err(&dp_tr, "schedule_task_init failed");
goto err;
}
struct task_dp_pdata *pdata = &task_memory->pdata;
/* Point to event_struct event for kernel threads synchronization */
/* It will be overwritten for K_USER threads to dynamic ones. */
pdata->event = &pdata->event_struct;
pdata->thread = &pdata->thread_struct;
#ifdef CONFIG_USERSPACE
if (options & K_USER) {
pdata->event = k_object_alloc(K_OBJ_EVENT);
if (!pdata->event) {
tr_err(&dp_tr, "Event object allocation failed");
ret = -ENOMEM;
goto err;
}
pdata->thread = k_object_alloc(K_OBJ_THREAD);
if (!pdata->thread) {
tr_err(&dp_tr, "Thread object allocation failed");
ret = -ENOMEM;
goto err;
}
}
#endif /* CONFIG_USERSPACE */
/* initialize other task structures */
task_memory->task.ops.complete = ops->complete;
task_memory->task.ops.get_deadline = ops->get_deadline;
task_memory->task.state = SOF_TASK_STATE_INIT;
task_memory->task.core = core;
task_memory->task.priv_data = pdata;
/* success, fill the structures */
pdata->p_stack = p_stack;
pdata->mod = mod;
*task = &task_memory->task;
/* create a zephyr thread for the task */
pdata->thread_id = k_thread_create(pdata->thread, (__sparse_force void *)p_stack,
stack_size, dp_thread_fn, *task, NULL, NULL,
CONFIG_DP_THREAD_PRIORITY, (*task)->flags, K_FOREVER);
k_thread_access_grant(pdata->thread_id, pdata->event);
/* pin the thread to specific core */
ret = k_thread_cpu_pin(pdata->thread_id, core);
if (ret < 0) {
tr_err(&dp_tr, "zephyr task pin to core failed");
goto e_thread;
}
#ifdef CONFIG_USERSPACE
if ((*task)->flags & K_USER) {
ret = user_memory_init_shared(pdata->thread_id, pdata->mod);
if (ret < 0) {
tr_err(&dp_tr, "user_memory_init_shared() failed");
goto e_thread;
}
}
#endif /* CONFIG_USERSPACE */
/* start the thread, it should immediately stop at an event */
k_event_init(pdata->event);
k_thread_start(pdata->thread_id);
return 0;
e_thread:
k_thread_abort(pdata->thread_id);
err:
/* cleanup - free all allocated resources */
if (user_stack_free((__sparse_force void *)p_stack))
tr_err(&dp_tr, "user_stack_free failed!");
/* k_object_free looks for a pointer in the list, any invalid value can be passed */
k_object_free(task_memory->pdata.event);
k_object_free(task_memory->pdata.thread);
sof_heap_free(user_heap, task_memory);
return ret;
}