Wolfgang Denk | 2f9b7e4 | 2005-08-17 12:55:25 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_COMPAT_H_ |
| 2 | #define _LINUX_COMPAT_H_ |
| 3 | |
Oleksandr Andrushchenko | 0c09ade | 2020-08-06 12:42:51 +0300 | [diff] [blame] | 4 | #include <console.h> |
Stefan Roese | 80877fa | 2022-09-02 14:10:46 +0200 | [diff] [blame] | 5 | #include <cyclic.h> |
Simon Glass | 1e26864 | 2020-05-10 11:39:55 -0600 | [diff] [blame] | 6 | #include <log.h> |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 7 | #include <malloc.h> |
Tom Rini | dec7ea0 | 2024-05-20 13:35:03 -0600 | [diff] [blame] | 8 | #include <time.h> |
Oleksandr Andrushchenko | 0c09ade | 2020-08-06 12:42:51 +0300 | [diff] [blame] | 9 | |
| 10 | #include <asm/processor.h> |
| 11 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 12 | #include <linux/types.h> |
| 13 | #include <linux/err.h> |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 15 | |
Oleksandr Andrushchenko | 0c09ade | 2020-08-06 12:42:51 +0300 | [diff] [blame] | 16 | #ifdef CONFIG_XEN |
| 17 | #include <xen/events.h> |
| 18 | #endif |
| 19 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 20 | struct unused {}; |
| 21 | typedef struct unused unused_t; |
| 22 | |
| 23 | struct p_current{ |
| 24 | int pid; |
| 25 | }; |
| 26 | |
| 27 | extern struct p_current *current; |
| 28 | |
Masahiro Yamada | 7b5ec7e | 2015-07-13 13:17:07 +0900 | [diff] [blame] | 29 | #define GFP_ATOMIC ((gfp_t) 0) |
| 30 | #define GFP_KERNEL ((gfp_t) 0) |
| 31 | #define GFP_NOFS ((gfp_t) 0) |
| 32 | #define GFP_USER ((gfp_t) 0) |
| 33 | #define __GFP_NOWARN ((gfp_t) 0) |
| 34 | #define __GFP_ZERO ((__force gfp_t)0x8000u) /* Return zeroed page on success */ |
| 35 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 36 | void *kmalloc(size_t size, int flags); |
Masahiro Yamada | 7b5ec7e | 2015-07-13 13:17:07 +0900 | [diff] [blame] | 37 | |
| 38 | static inline void *kzalloc(size_t size, gfp_t flags) |
| 39 | { |
| 40 | return kmalloc(size, flags | __GFP_ZERO); |
| 41 | } |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 42 | |
| 43 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
| 44 | { |
| 45 | if (size != 0 && n > SIZE_MAX / size) |
| 46 | return NULL; |
| 47 | return kmalloc(n * size, flags | __GFP_ZERO); |
| 48 | } |
| 49 | |
| 50 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) |
| 51 | { |
| 52 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
| 53 | } |
| 54 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 55 | #define vmalloc(size) kmalloc(size, 0) |
| 56 | #define __vmalloc(size, flags, pgsz) kmalloc(size, flags) |
Masahiro Yamada | c537347 | 2015-07-13 13:17:06 +0900 | [diff] [blame] | 57 | static inline void *vzalloc(unsigned long size) |
| 58 | { |
| 59 | return kzalloc(size, 0); |
| 60 | } |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 61 | static inline void kfree(const void *block) |
| 62 | { |
| 63 | free((void *)block); |
| 64 | } |
| 65 | static inline void vfree(const void *addr) |
| 66 | { |
| 67 | free((void *)addr); |
| 68 | } |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 69 | |
| 70 | struct kmem_cache { int sz; }; |
| 71 | |
| 72 | struct kmem_cache *get_mem(int element_sz); |
| 73 | #define kmem_cache_create(a, sz, c, d, e) get_mem(sz) |
| 74 | void *kmem_cache_alloc(struct kmem_cache *obj, int flag); |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 75 | static inline void kmem_cache_free(struct kmem_cache *cachep, void *obj) |
| 76 | { |
| 77 | free(obj); |
| 78 | } |
| 79 | static inline void kmem_cache_destroy(struct kmem_cache *cachep) |
| 80 | { |
| 81 | free(cachep); |
| 82 | } |
Stefan Roese | e0860d4 | 2009-05-12 14:29:39 +0200 | [diff] [blame] | 83 | |
| 84 | #define DECLARE_WAITQUEUE(...) do { } while (0) |
| 85 | #define add_wait_queue(...) do { } while (0) |
| 86 | #define remove_wait_queue(...) do { } while (0) |
William Juul | 52c0796 | 2007-10-31 13:53:06 +0100 | [diff] [blame] | 87 | |
Oleksandr Andrushchenko | 0c09ade | 2020-08-06 12:42:51 +0300 | [diff] [blame] | 88 | #ifndef CONFIG_XEN |
| 89 | #define eventchn_poll() |
| 90 | #endif |
| 91 | |
| 92 | #define __wait_event_timeout(condition, timeout, ret) \ |
| 93 | ({ \ |
| 94 | ulong __ret = ret; /* explicit shadow */ \ |
| 95 | ulong start = get_timer(0); \ |
| 96 | for (;;) { \ |
| 97 | eventchn_poll(); \ |
| 98 | if (condition) { \ |
| 99 | __ret = 1; \ |
| 100 | break; \ |
| 101 | } \ |
| 102 | if ((get_timer(start) > timeout) || ctrlc()) { \ |
| 103 | __ret = 0; \ |
| 104 | break; \ |
| 105 | } \ |
| 106 | cpu_relax(); \ |
| 107 | } \ |
| 108 | __ret; \ |
| 109 | }) |
| 110 | |
| 111 | /** |
| 112 | * wait_event_timeout() - Wait until the event occurs before the timeout. |
| 113 | * @wr_head: The wait queue to wait on. |
| 114 | * @condition: Expression for the event to wait for. |
| 115 | * @timeout: Maximum waiting time. |
| 116 | * |
| 117 | * We wait until the @condition evaluates to %true (succeed) or |
| 118 | * %false (@timeout elapsed). |
| 119 | * |
| 120 | * Return: |
| 121 | * 0 - if the @condition evaluated to %false after the @timeout elapsed |
| 122 | * 1 - if the @condition evaluated to %true |
| 123 | */ |
| 124 | #define wait_event_timeout(wq_head, condition, timeout) \ |
| 125 | ({ \ |
| 126 | ulong __ret; \ |
| 127 | if (condition) \ |
| 128 | __ret = 1; \ |
| 129 | else \ |
| 130 | __ret = __wait_event_timeout(condition, timeout, __ret);\ |
| 131 | __ret; \ |
| 132 | }) |
| 133 | |
Stefan Roese | e0860d4 | 2009-05-12 14:29:39 +0200 | [diff] [blame] | 134 | #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) |
Wolfgang Denk | 2f9b7e4 | 2005-08-17 12:55:25 +0200 | [diff] [blame] | 135 | |
Simon Glass | fb6f482 | 2020-02-03 07:36:17 -0700 | [diff] [blame] | 136 | /* This is also defined in ARMv8's mmu.h */ |
| 137 | #ifndef PAGE_SIZE |
Wolfgang Denk | 2f9b7e4 | 2005-08-17 12:55:25 +0200 | [diff] [blame] | 138 | #define PAGE_SIZE 4096 |
Simon Glass | fb6f482 | 2020-02-03 07:36:17 -0700 | [diff] [blame] | 139 | #endif |
Lijun Pan | 036f66d | 2014-06-20 12:17:29 -0500 | [diff] [blame] | 140 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 141 | /* drivers/char/random.c */ |
| 142 | #define get_random_bytes(...) |
| 143 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 144 | /* include/linux/leds.h */ |
| 145 | struct led_trigger {}; |
| 146 | |
| 147 | #define DEFINE_LED_TRIGGER(x) static struct led_trigger *x; |
| 148 | enum led_brightness { |
| 149 | LED_OFF = 0, |
| 150 | LED_HALF = 127, |
| 151 | LED_FULL = 255, |
| 152 | }; |
| 153 | |
| 154 | static inline void led_trigger_register_simple(const char *name, |
| 155 | struct led_trigger **trigger) {} |
| 156 | static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} |
| 157 | static inline void led_trigger_event(struct led_trigger *trigger, |
| 158 | enum led_brightness event) {} |
| 159 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 160 | /* uapi/linux/limits.h */ |
| 161 | #define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */ |
| 162 | |
| 163 | /** |
| 164 | * The type used for indexing onto a disc or disc partition. |
| 165 | * |
| 166 | * Linux always considers sectors to be 512 bytes long independently |
| 167 | * of the devices real block size. |
| 168 | * |
| 169 | * blkcnt_t is the type of the inode's block count. |
| 170 | */ |
| 171 | #ifdef CONFIG_LBDAF |
| 172 | typedef u64 sector_t; |
| 173 | typedef u64 blkcnt_t; |
| 174 | #else |
| 175 | typedef unsigned long sector_t; |
| 176 | typedef unsigned long blkcnt_t; |
| 177 | #endif |
| 178 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 179 | /* module */ |
| 180 | #define THIS_MODULE 0 |
| 181 | #define try_module_get(...) 1 |
| 182 | #define module_put(...) do { } while (0) |
| 183 | #define module_init(...) |
| 184 | #define module_exit(...) |
| 185 | #define EXPORT_SYMBOL(...) |
| 186 | #define EXPORT_SYMBOL_GPL(...) |
| 187 | #define module_param(...) |
| 188 | #define module_param_call(...) |
| 189 | #define MODULE_PARM_DESC(...) |
| 190 | #define MODULE_VERSION(...) |
| 191 | #define MODULE_DESCRIPTION(...) |
| 192 | #define MODULE_AUTHOR(...) |
| 193 | #define MODULE_LICENSE(...) |
| 194 | #define MODULE_ALIAS(...) |
| 195 | #define __module_get(...) |
| 196 | |
| 197 | /* character device */ |
| 198 | #define MKDEV(...) 0 |
| 199 | #define MAJOR(dev) 0 |
| 200 | #define MINOR(dev) 0 |
| 201 | |
| 202 | #define alloc_chrdev_region(...) 0 |
| 203 | #define unregister_chrdev_region(...) |
| 204 | |
| 205 | #define class_create(...) __builtin_return_address(0) |
| 206 | #define class_create_file(...) 0 |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 207 | #define class_register(...) 0 |
| 208 | #define class_unregister(...) |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 209 | #define class_remove_file(...) |
| 210 | #define class_destroy(...) |
| 211 | #define misc_register(...) 0 |
| 212 | #define misc_deregister(...) |
| 213 | |
| 214 | #define blocking_notifier_call_chain(...) 0 |
| 215 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 216 | #define __initdata |
| 217 | #define late_initcall(...) |
| 218 | |
| 219 | #define dev_set_name(...) do { } while (0) |
| 220 | #define device_register(...) 0 |
Heiko Schocher | 9851f52 | 2015-10-22 06:19:20 +0200 | [diff] [blame] | 221 | #define device_unregister(...) |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 222 | #define volume_sysfs_init(...) 0 |
| 223 | #define volume_sysfs_close(...) do { } while (0) |
| 224 | |
| 225 | #define init_waitqueue_head(...) do { } while (0) |
| 226 | #define wait_event_interruptible(...) 0 |
| 227 | #define wake_up_interruptible(...) do { } while (0) |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 228 | #define dump_stack(...) do { } while (0) |
| 229 | |
| 230 | #define task_pid_nr(x) 0 |
| 231 | #define set_freezable(...) do { } while (0) |
| 232 | #define try_to_freeze(...) 0 |
| 233 | #define set_current_state(...) do { } while (0) |
| 234 | #define kthread_should_stop(...) 0 |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 235 | |
| 236 | #define setup_timer(timer, func, data) do {} while (0) |
| 237 | #define del_timer_sync(timer) do {} while (0) |
| 238 | #define schedule_work(work) do {} while (0) |
| 239 | #define INIT_WORK(work, fun) do {} while (0) |
| 240 | |
| 241 | struct work_struct {}; |
| 242 | |
| 243 | unsigned long copy_from_user(void *dest, const void *src, |
| 244 | unsigned long count); |
| 245 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 246 | typedef unused_t spinlock_t; |
| 247 | typedef int wait_queue_head_t; |
| 248 | |
| 249 | #define spin_lock_init(lock) do {} while (0) |
| 250 | #define spin_lock(lock) do {} while (0) |
| 251 | #define spin_unlock(lock) do {} while (0) |
Andy Shevchenko | e4b5636 | 2020-11-19 21:26:20 +0200 | [diff] [blame] | 252 | #define spin_lock_irqsave(lock, flags) do {} while (0) |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 253 | #define spin_unlock_irqrestore(lock, flags) do { flags = 0; } while (0) |
| 254 | |
| 255 | #define DEFINE_MUTEX(...) |
| 256 | #define mutex_init(...) |
| 257 | #define mutex_lock(...) |
| 258 | #define mutex_unlock(...) |
| 259 | |
| 260 | #define init_rwsem(...) do { } while (0) |
| 261 | #define down_read(...) do { } while (0) |
| 262 | #define down_write(...) do { } while (0) |
| 263 | #define down_write_trylock(...) 1 |
| 264 | #define up_read(...) do { } while (0) |
| 265 | #define up_write(...) do { } while (0) |
| 266 | |
| 267 | #define cond_resched() do { } while (0) |
| 268 | #define yield() do { } while (0) |
| 269 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 270 | #define __init |
| 271 | #define __exit |
| 272 | #define __devinit |
| 273 | #define __devinitdata |
| 274 | #define __devinitconst |
Marek Vasut | e2dfb5e | 2023-09-17 16:09:39 +0200 | [diff] [blame] | 275 | #define __initconst |
| 276 | #define __initdata |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 277 | |
| 278 | #define kthread_create(...) __builtin_return_address(0) |
| 279 | #define kthread_stop(...) do { } while (0) |
| 280 | #define wake_up_process(...) do { } while (0) |
| 281 | |
| 282 | struct rw_semaphore { int i; }; |
| 283 | #define down_write(...) do { } while (0) |
| 284 | #define up_write(...) do { } while (0) |
| 285 | #define down_read(...) do { } while (0) |
| 286 | #define up_read(...) do { } while (0) |
| 287 | struct device { |
| 288 | struct device *parent; |
| 289 | struct class *class; |
| 290 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
| 291 | void (*release)(struct device *dev); |
| 292 | /* This is used from drivers/usb/musb-new subsystem only */ |
| 293 | void *driver_data; /* data private to the driver */ |
| 294 | void *device_data; /* data private to the device */ |
| 295 | }; |
| 296 | struct mutex { int i; }; |
| 297 | struct kernel_param { int i; }; |
| 298 | |
| 299 | struct cdev { |
| 300 | int owner; |
| 301 | dev_t dev; |
| 302 | }; |
| 303 | #define cdev_init(...) do { } while (0) |
| 304 | #define cdev_add(...) 0 |
| 305 | #define cdev_del(...) do { } while (0) |
| 306 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 307 | #define prandom_u32(...) 0 |
| 308 | |
| 309 | typedef struct { |
| 310 | uid_t val; |
| 311 | } kuid_t; |
| 312 | |
| 313 | typedef struct { |
| 314 | gid_t val; |
| 315 | } kgid_t; |
| 316 | |
| 317 | /* from include/linux/types.h */ |
| 318 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 319 | /** |
| 320 | * struct callback_head - callback structure for use with RCU and task_work |
| 321 | * @next: next update requests in a list |
| 322 | * @func: actual update function to call after the grace period. |
| 323 | */ |
| 324 | struct callback_head { |
| 325 | struct callback_head *next; |
| 326 | void (*func)(struct callback_head *head); |
| 327 | }; |
| 328 | #define rcu_head callback_head |
| 329 | enum writeback_sync_modes { |
| 330 | WB_SYNC_NONE, /* Don't wait on anything */ |
| 331 | WB_SYNC_ALL, /* Wait on every mapping */ |
| 332 | }; |
| 333 | |
| 334 | /* from include/linux/writeback.h */ |
| 335 | /* |
| 336 | * A control structure which tells the writeback code what to do. These are |
| 337 | * always on the stack, and hence need no locking. They are always initialised |
| 338 | * in a manner such that unspecified fields are set to zero. |
| 339 | */ |
| 340 | struct writeback_control { |
| 341 | long nr_to_write; /* Write this many pages, and decrement |
| 342 | this for each page written */ |
| 343 | long pages_skipped; /* Pages which were not written */ |
| 344 | |
| 345 | /* |
| 346 | * For a_ops->writepages(): if start or end are non-zero then this is |
| 347 | * a hint that the filesystem need only write out the pages inside that |
| 348 | * byterange. The byte at `end' is included in the writeout request. |
| 349 | */ |
| 350 | loff_t range_start; |
| 351 | loff_t range_end; |
| 352 | |
| 353 | enum writeback_sync_modes sync_mode; |
| 354 | |
| 355 | unsigned for_kupdate:1; /* A kupdate writeback */ |
| 356 | unsigned for_background:1; /* A background writeback */ |
| 357 | unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ |
| 358 | unsigned for_reclaim:1; /* Invoked from the page allocator */ |
| 359 | unsigned range_cyclic:1; /* range_start is cyclic */ |
| 360 | unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ |
| 361 | }; |
| 362 | |
| 363 | void *kmemdup(const void *src, size_t len, gfp_t gfp); |
| 364 | |
| 365 | typedef int irqreturn_t; |
| 366 | |
| 367 | struct timer_list {}; |
| 368 | struct notifier_block {}; |
| 369 | |
| 370 | typedef unsigned long dmaaddr_t; |
| 371 | |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 372 | #define pm_runtime_get_sync(dev) do {} while (0) |
| 373 | #define pm_runtime_put(dev) do {} while (0) |
| 374 | #define pm_runtime_put_sync(dev) do {} while (0) |
| 375 | #define pm_runtime_use_autosuspend(dev) do {} while (0) |
| 376 | #define pm_runtime_set_autosuspend_delay(dev, delay) do {} while (0) |
| 377 | #define pm_runtime_enable(dev) do {} while (0) |
| 378 | |
| 379 | #define IRQ_NONE 0 |
| 380 | #define IRQ_HANDLED 1 |
Kishon Vijay Abraham I | 35ffd28 | 2015-02-23 18:39:58 +0530 | [diff] [blame] | 381 | #define IRQ_WAKE_THREAD 2 |
Heiko Schocher | 4f7a9a3 | 2014-06-24 10:10:03 +0200 | [diff] [blame] | 382 | |
| 383 | #define dev_set_drvdata(dev, data) do {} while (0) |
| 384 | |
| 385 | #define enable_irq(...) |
| 386 | #define disable_irq(...) |
| 387 | #define disable_irq_wake(irq) do {} while (0) |
| 388 | #define enable_irq_wake(irq) -EINVAL |
| 389 | #define free_irq(irq, data) do {} while (0) |
| 390 | #define request_irq(nr, f, flags, nm, data) 0 |
| 391 | |
Wolfgang Denk | 2f9b7e4 | 2005-08-17 12:55:25 +0200 | [diff] [blame] | 392 | #endif |