blob: 89fa552a6f6bea94833337b986b4e0bf07c04106 [file] [log] [blame]
Jerome Forissierc14cfee2025-04-18 16:09:34 +02001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2025 Linaro Limited
4 */
5
6#include <linux/list.h>
7#include <linux/types.h>
8#include <setjmp.h>
9
10#ifndef _UTHREAD_H_
11#define _UTHREAD_H_
12
13/**
14 * DOC: Overview
15 *
16 * The uthread framework is a basic task scheduler that allows to run functions
17 * "in parallel" on a single CPU core. The scheduling is cooperative, not
18 * preemptive -- meaning that context switches from one task to another task is
19 * voluntary, via a call to uthread_schedule(). This characteristic makes thread
20 * synchronization much easier, because a thread cannot be interrupted in the
21 * middle of a critical section (reading from or writing to shared state, for
22 * instance).
23 *
24 * CONFIG_UTHREAD in lib/Kconfig enables the uthread framework. When disabled,
25 * the uthread_create() and uthread_schedule() functions may still be used so
26 * that code differences between uthreads enabled and disabled can be reduced to
27 * a minimum.
28 */
29
30/**
31 * struct uthread - a thread object
32 *
33 * @fn: thread entry point
34 * @arg: argument passed to the entry point when the thread is started
35 * @ctx: context to resume execution of this thread (via longjmp())
36 * @stack: initial stack pointer for the thread
37 * @done: true once @fn has returned, false otherwise
38 * @grp_id: user-supplied identifier for this thread and possibly others. A
39 * thread can belong to zero or one group (not more), and a group may contain
40 * any number of threads.
41 * @list: link in the global scheduler list
42 */
43struct uthread {
44 void (*fn)(void *arg);
45 void *arg;
46 jmp_buf ctx;
47 void *stack;
48 bool done;
49 unsigned int grp_id;
50 struct list_head list;
51};
52
Jerome Forissier16637842025-04-18 16:09:35 +020053/**
54 * Internal state of a struct uthread_mutex
55 */
56enum uthread_mutex_state {
57 UTHREAD_MUTEX_UNLOCKED = 0,
58 UTHREAD_MUTEX_LOCKED = 1
59};
60
61/**
62 * Uthread mutex
63 */
64struct uthread_mutex {
65 enum uthread_mutex_state state;
66};
67
68#define UTHREAD_MUTEX_INITIALIZER { .state = UTHREAD_MUTEX_UNLOCKED }
69
Jerome Forissierc14cfee2025-04-18 16:09:34 +020070#ifdef CONFIG_UTHREAD
71
72/**
73 * uthread_create() - Create a uthread object and make it ready for execution
74 *
75 * Threads are automatically deleted when they return from their entry point.
76 *
77 * @uthr: a pointer to a user-allocated uthread structure to store information
78 * about the new thread, or NULL to let the framework allocate and manage its
79 * own structure.
80 * @fn: the thread's entry point
81 * @arg: argument passed to the thread's entry point
82 * @stack_sz: stack size for the new thread (in bytes). The stack is allocated
83 * on the heap.
84 * @grp_id: an optional thread group ID that the new thread should belong to
85 * (zero for no group)
86 */
87int uthread_create(struct uthread *uthr, void (*fn)(void *), void *arg,
88 size_t stack_sz, unsigned int grp_id);
89/**
90 * uthread_schedule() - yield the CPU to the next runnable thread
91 *
92 * This function is called either by the main thread or any secondary thread
93 * (that is, any thread created via uthread_create()) to switch execution to
94 * the next runnable thread.
95 *
96 * Return: true if a thread was scheduled, false if no runnable thread was found
97 */
98bool uthread_schedule(void);
99/**
100 * uthread_grp_new_id() - return a new ID for a thread group
101 *
102 * Return: the new thread group ID
103 */
104unsigned int uthread_grp_new_id(void);
105/**
106 * uthread_grp_done() - test if all threads in a group are done
107 *
108 * @grp_id: the ID of the thread group that should be considered
109 * Return: false if the group contains at least one runnable thread (i.e., one
110 * thread which entry point has not returned yet), true otherwise
111 */
112bool uthread_grp_done(unsigned int grp_id);
113
Jerome Forissier16637842025-04-18 16:09:35 +0200114/**
115 * uthread_mutex_lock() - lock a mutex
116 *
117 * If the cwmutexlock is available (i.e., not owned by any other thread), then
118 * it is locked for use by the current thread. Otherwise the current thread
119 * blocks: it enters a wait loop by scheduling other threads until the mutex
120 * becomes unlocked.
121 *
122 * @mutex: pointer to the mutex to lock
123 * Return: 0 on success, in which case the lock is owned by the calling thread.
124 * != 0 otherwise (the lock is not owned by the calling thread).
125 */
126int uthread_mutex_lock(struct uthread_mutex *mutex);
127
128/**
129 * uthread_mutex_trylock() - lock a mutex if not currently locked
130 *
131 * Similar to uthread_mutex_lock() except return immediately if the mutex is
132 * locked already.
133 *
134 * @mutex: pointer to the mutex to lock
135 * Return: 0 on success, in which case the lock is owned by the calling thread.
136 * EBUSY if the mutex is already locked by another thread. Any other non-zero
137 * value on error.
138 */
139int uthread_mutex_trylock(struct uthread_mutex *mutex);
140
141/**
142 * uthread_mutex_unlock() - unlock a mutex
143 *
144 * The mutex is assumed to be owned by the calling thread on entry. On exit, it
145 * is unlocked.
146 *
147 * @mutex: pointer to the mutex to unlock
148 * Return: 0 on success, != 0 on error
149 */
150int uthread_mutex_unlock(struct uthread_mutex *mutex);
151
Jerome Forissierc14cfee2025-04-18 16:09:34 +0200152#else
153
154static inline int uthread_create(struct uthread *uthr, void (*fn)(void *),
155 void *arg, size_t stack_sz,
156 unsigned int grp_id)
157{
158 fn(arg);
159 return 0;
160}
161
162static inline bool uthread_schedule(void)
163{
164 return false;
165}
166
167static inline unsigned int uthread_grp_new_id(void)
168{
169 return 0;
170}
171
172static inline bool uthread_grp_done(unsigned int grp_id)
173{
174 return true;
175}
176
Jerome Forissier16637842025-04-18 16:09:35 +0200177/* These are macros for convenience on the caller side */
178#define uthread_mutex_lock(_mutex) ({ 0; })
179#define uthread_mutex_trylock(_mutex) ({ 0 })
180#define uthread_mutex_unlock(_mutex) ({ 0; })
181
Jerome Forissierc14cfee2025-04-18 16:09:34 +0200182#endif /* CONFIG_UTHREAD */
183#endif /* _UTHREAD_H_ */