diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index c0306f0f599e..c3f9f6e30211 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -300,8 +300,7 @@ static int scheduler_dp_task_free(void *data, struct task *task) if (pdata->event != &pdata->event_struct) k_object_free(pdata->event); #else - if (pdata->sem != &pdata->sem_struct) - k_object_free(pdata->sem); + k_object_free(pdata->sem); #endif if (pdata->thread != &pdata->thread_struct) k_object_free(pdata->thread); diff --git a/src/schedule/zephyr_dp_schedule.h b/src/schedule/zephyr_dp_schedule.h index 2ef6bde805c3..303403565506 100644 --- a/src/schedule/zephyr_dp_schedule.h +++ b/src/schedule/zephyr_dp_schedule.h @@ -40,7 +40,6 @@ struct task_dp_pdata { uint32_t ll_cycles_to_start; /* current number of LL cycles till delayed start */ #if CONFIG_SOF_USERSPACE_APPLICATION struct k_sem *sem; /* pointer to semaphore for task scheduling */ - struct k_sem sem_struct; /* semaphore for task scheduling for kernel threads */ struct ipc4_flat *flat; unsigned char pend_ipc; unsigned char pend_proc; diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 65f4c1ee9de1..dc0e0fe1da3e 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -27,9 +27,7 @@ LOG_MODULE_DECLARE(dp_schedule, CONFIG_SOF_LOG_LEVEL); extern struct tr_ctx dp_tr; -#if CONFIG_USERSPACE static struct k_mem_domain dp_mdom[CONFIG_CORE_COUNT]; -#endif /* Synchronization semaphore for the scheduler thread to wait for DP startup */ #define DP_SYNC_INIT(i, _) Z_SEM_INITIALIZER(dp_sync[i], 0, 1) @@ -49,7 +47,8 @@ struct ipc4_flat { enum ipc4_pipeline_state state; int n_sources; int n_sinks; - void *source_sink[2 * CONFIG_MODULE_MAX_CONNECTIONS]; + struct sof_source *source[CONFIG_MODULE_MAX_CONNECTIONS]; + struct sof_sink *sink[CONFIG_MODULE_MAX_CONNECTIONS]; } pipeline_state; }; }; @@ -84,13 +83,12 @@ static int ipc_thread_flatten(unsigned int cmd, const union scheduler_dp_thread_ flat->pipeline_state.n_sources = param->pipeline_state.n_sources; flat->pipeline_state.n_sinks = param->pipeline_state.n_sinks; /* Up to 2 * CONFIG_MODULE_MAX_CONNECTIONS */ - memcpy(flat->pipeline_state.source_sink, param->pipeline_state.sources, + memcpy(flat->pipeline_state.source, param->pipeline_state.sources, flat->pipeline_state.n_sources * - sizeof(flat->pipeline_state.source_sink[0])); - memcpy(flat->pipeline_state.source_sink + flat->pipeline_state.n_sources, - param->pipeline_state.sinks, + sizeof(flat->pipeline_state.source[0])); + memcpy(flat->pipeline_state.sink, param->pipeline_state.sinks, flat->pipeline_state.n_sinks * - sizeof(flat->pipeline_state.source_sink[0])); + sizeof(flat->pipeline_state.sink[0])); } } @@ -140,11 +138,10 @@ static void ipc_thread_unflatten_run(struct processing_module *pmod, struct ipc4 break; case COMP_TRIGGER_PREPARE: flat->ret = ops->prepare(pmod, - (struct sof_source **)flat->pipeline_state.source_sink, - flat->pipeline_state.n_sources, - (struct sof_sink **)(flat->pipeline_state.source_sink + - flat->pipeline_state.n_sources), - flat->pipeline_state.n_sinks); + flat->pipeline_state.source, + flat->pipeline_state.n_sources, + flat->pipeline_state.sink, + flat->pipeline_state.n_sinks); } } } @@ -198,7 +195,7 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd, } /* Go through all DP tasks and recalculate their readiness and deadlines - * NOT REENTRANT, should be called with scheduler_dp_lock() + * NOT REENTRANT, called with scheduler_dp_lock() held */ void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_run) { @@ -387,7 +384,6 @@ void dp_thread_fn(void *p1, void *p2, void *p3) */ void scheduler_dp_domain_free(struct processing_module *pmod) { -#if CONFIG_USERSPACE unsigned int core = pmod->dev->task->core; llext_manager_rm_domain(pmod->dev->ipc_config.id, dp_mdom + core); @@ -396,9 +392,9 @@ void scheduler_dp_domain_free(struct processing_module *pmod) k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP); k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG); -#endif } +/* Called only in IPC context */ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, const struct task_ops *ops, struct processing_module *mod, uint16_t core, size_t stack_size, uint32_t options) @@ -455,30 +451,22 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, struct task_dp_pdata *pdata = &task_memory->pdata; - /* Point to event_struct event for kernel threads synchronization */ - /* It will be overwritten for K_USER threads to dynamic ones. */ - pdata->sem = &pdata->sem_struct; - pdata->thread = &pdata->thread_struct; pdata->flat = &task_memory->flat; -#ifdef CONFIG_USERSPACE - if (options & K_USER) { - pdata->sem = k_object_alloc(K_OBJ_SEM); - if (!pdata->sem) { - tr_err(&dp_tr, "Event object allocation failed"); - ret = -ENOMEM; - goto e_stack; - } + pdata->sem = k_object_alloc(K_OBJ_SEM); + if (!pdata->sem) { + tr_err(&dp_tr, "Event object allocation failed"); + ret = -ENOMEM; + goto e_stack; + } - pdata->thread = k_object_alloc(K_OBJ_THREAD); - if (!pdata->thread) { - tr_err(&dp_tr, "Thread object allocation failed"); - ret = -ENOMEM; - goto e_kobj; - } - memset(&pdata->thread->arch, 0, sizeof(pdata->thread->arch)); + pdata->thread = k_object_alloc(K_OBJ_THREAD); + if (!pdata->thread) { + tr_err(&dp_tr, "Thread object allocation failed"); + ret = -ENOMEM; + goto e_kobj; } -#endif /* CONFIG_USERSPACE */ + memset(&pdata->thread->arch, 0, sizeof(pdata->thread->arch)); /* success, fill the structures */ pdata->p_stack = p_stack; @@ -503,7 +491,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, goto e_thread; } -#if CONFIG_USERSPACE k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]); scheduler_dp_grant(pdata->thread_id, core); @@ -546,7 +533,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, tr_err(&dp_tr, "failed to add thread to domain %d", ret); goto e_dom; } -#endif /* CONFIG_USERSPACE */ /* start the thread, it should immediately stop at the semaphore */ k_sem_init(pdata->sem, 0, 1); @@ -554,18 +540,14 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, return 0; -#ifdef CONFIG_USERSPACE e_dom: scheduler_dp_domain_free(mod); -#endif e_thread: k_thread_abort(pdata->thread_id); -#ifdef CONFIG_USERSPACE e_kobj: /* k_object_free looks for a pointer in the list, any invalid value can be passed */ k_object_free(pdata->thread); k_object_free(pdata->sem); -#endif e_stack: user_stack_free(p_stack); e_tmem: