blob: d795ff6e9b06dcecd2b2b175d64ade724083b93b [file] [log] [blame]
Aaron Williams1fd14ee2022-04-07 09:11:03 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * Interface to the hardware Fetch and Add Unit.
6 */
7
8#ifndef __CVMX_FAU_H__
9#define __CVMX_FAU_H__
10
11extern u8 *cvmx_fau_regs_ptr;
12
13/**
14 * Initializes fau, on devices with FAU hw this is a noop.
15 */
16int cvmx_fau_init(void);
17
18/**
19 * Return the location of emulated FAU register
20 */
21static inline u8 *__cvmx_fau_sw_addr(int reg)
22{
23 if (cvmx_unlikely(!cvmx_fau_regs_ptr))
24 cvmx_fau_init();
25 return (cvmx_fau_regs_ptr + reg);
26}
27
28/**
29 * Perform an atomic 64 bit add
30 *
31 * @param reg FAU atomic register to access. 0 <= reg < 2048.
32 * - Step by 8 for 64 bit access.
33 * @param value Signed value to add.
34 * Note: Only the low 22 bits are available.
35 * @return Value of the register before the update
36 */
37static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg64_t reg,
38 int64_t value)
39{
40 if (octeon_has_feature(OCTEON_FEATURE_FAU))
41 return cvmx_hwfau_fetch_and_add64(reg, value);
42
43 return __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
44 value, __ATOMIC_SEQ_CST);
45}
46
47/**
48 * Perform an atomic 32 bit add
49 *
50 * @param reg FAU atomic register to access. 0 <= reg < 2048.
51 * - Step by 4 for 32 bit access.
52 * @param value Signed value to add.
53 * Note: Only the low 22 bits are available.
54 * @return Value of the register before the update
55 */
56static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg32_t reg,
57 int32_t value)
58{
59 if (octeon_has_feature(OCTEON_FEATURE_FAU))
60 return cvmx_hwfau_fetch_and_add32(reg, value);
61
62 reg ^= SWIZZLE_32;
63 return __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
64 value, __ATOMIC_SEQ_CST);
65}
66
67/**
68 * Perform an atomic 16 bit add
69 *
70 * @param reg FAU atomic register to access. 0 <= reg < 2048.
71 * - Step by 2 for 16 bit access.
72 * @param value Signed value to add.
73 * @return Value of the register before the update
74 */
75static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg16_t reg,
76 int16_t value)
77{
78 if (octeon_has_feature(OCTEON_FEATURE_FAU))
79 return cvmx_hwfau_fetch_and_add16(reg, value);
80
81 reg ^= SWIZZLE_16;
82 return __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
83 value, __ATOMIC_SEQ_CST);
84}
85
86/**
87 * Perform an atomic 8 bit add
88 *
89 * @param reg FAU atomic register to access. 0 <= reg < 2048.
90 * @param value Signed value to add.
91 * @return Value of the register before the update
92 */
93static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
94{
95 if (octeon_has_feature(OCTEON_FEATURE_FAU))
96 return cvmx_hwfau_fetch_and_add8(reg, value);
97
98 reg ^= SWIZZLE_8;
99 return __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
100 value, __ATOMIC_SEQ_CST);
101}
102
103/**
104 * Perform an atomic 64 bit add after the current tag switch
105 * completes
106 *
107 * @param reg FAU atomic register to access. 0 <= reg < 2048.
108 * - Step by 8 for 64 bit access.
109 * @param value Signed value to add.
110 * Note: Only the low 22 bits are available.
111 * @return If a timeout occurs, the error bit will be set. Otherwise
112 * the value of the register before the update will be
113 * returned
114 */
115static inline cvmx_fau_tagwait64_t
116cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg, int64_t value)
117{
118 if (octeon_has_feature(OCTEON_FEATURE_FAU))
119 return cvmx_hwfau_tagwait_fetch_and_add64(reg, value);
120
121 /* not implemented yet.*/
122 return (cvmx_fau_tagwait64_t){ 1, 0 };
123}
124
125/**
126 * Perform an atomic 32 bit add after the current tag switch
127 * completes
128 *
129 * @param reg FAU atomic register to access. 0 <= reg < 2048.
130 * - Step by 4 for 32 bit access.
131 * @param value Signed value to add.
132 * Note: Only the low 22 bits are available.
133 * @return If a timeout occurs, the error bit will be set. Otherwise
134 * the value of the register before the update will be
135 * returned
136 */
137static inline cvmx_fau_tagwait32_t
138cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg, int32_t value)
139{
140 if (octeon_has_feature(OCTEON_FEATURE_FAU))
141 return cvmx_hwfau_tagwait_fetch_and_add32(reg, value);
142
143 /* not implemented yet.*/
144 return (cvmx_fau_tagwait32_t){ 1, 0 };
145}
146
147/**
148 * Perform an atomic 16 bit add after the current tag switch
149 * completes
150 *
151 * @param reg FAU atomic register to access. 0 <= reg < 2048.
152 * - Step by 2 for 16 bit access.
153 * @param value Signed value to add.
154 * @return If a timeout occurs, the error bit will be set. Otherwise
155 * the value of the register before the update will be
156 * returned
157 */
158static inline cvmx_fau_tagwait16_t
159cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg, int16_t value)
160{
161 if (octeon_has_feature(OCTEON_FEATURE_FAU))
162 return cvmx_hwfau_tagwait_fetch_and_add16(reg, value);
163
164 /* not implemented yet.*/
165 return (cvmx_fau_tagwait16_t){ 1, 0 };
166}
167
168/**
169 * Perform an atomic 8 bit add after the current tag switch
170 * completes
171 *
172 * @param reg FAU atomic register to access. 0 <= reg < 2048.
173 * @param value Signed value to add.
174 * @return If a timeout occurs, the error bit will be set. Otherwise
175 * the value of the register before the update will be
176 * returned
177 */
178static inline cvmx_fau_tagwait8_t
179cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
180{
181 if (octeon_has_feature(OCTEON_FEATURE_FAU))
182 return cvmx_hwfau_tagwait_fetch_and_add8(reg, value);
183
184 /* not implemented yet.*/
185 return (cvmx_fau_tagwait8_t){ 1, 0 };
186}
187
188/**
189 * Perform an async atomic 64 bit add. The old value is
190 * placed in the scratch memory at byte address scraddr.
191 *
192 * @param scraddr Scratch memory byte address to put response in.
193 * Must be 8 byte aligned.
194 * @param reg FAU atomic register to access. 0 <= reg < 2048.
195 * - Step by 8 for 64 bit access.
196 * @param value Signed value to add.
197 * Note: Only the low 22 bits are available.
198 * @return Placed in the scratch pad register
199 */
200static inline void
201cvmx_fau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, int64_t value)
202{
203 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
204 cvmx_hwfau_async_fetch_and_add64(scraddr, reg, value);
205 return;
206 }
207 cvmx_scratch_write64(
208 scraddr,
209 __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
210 value, __ATOMIC_SEQ_CST));
211}
212
213/**
214 * Perform an async atomic 32 bit add. The old value is
215 * placed in the scratch memory at byte address scraddr.
216 *
217 * @param scraddr Scratch memory byte address to put response in.
218 * Must be 8 byte aligned.
219 * @param reg FAU atomic register to access. 0 <= reg < 2048.
220 * - Step by 4 for 32 bit access.
221 * @param value Signed value to add.
222 * Note: Only the low 22 bits are available.
223 * @return Placed in the scratch pad register
224 */
225static inline void
226cvmx_fau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, int32_t value)
227{
228 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
229 cvmx_hwfau_async_fetch_and_add32(scraddr, reg, value);
230 return;
231 }
232 cvmx_scratch_write64(
233 scraddr,
234 __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
235 value, __ATOMIC_SEQ_CST));
236}
237
238/**
239 * Perform an async atomic 16 bit add. The old value is
240 * placed in the scratch memory at byte address scraddr.
241 *
242 * @param scraddr Scratch memory byte address to put response in.
243 * Must be 8 byte aligned.
244 * @param reg FAU atomic register to access. 0 <= reg < 2048.
245 * - Step by 2 for 16 bit access.
246 * @param value Signed value to add.
247 * @return Placed in the scratch pad register
248 */
249static inline void
250cvmx_fau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, int16_t value)
251{
252 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
253 cvmx_hwfau_async_fetch_and_add16(scraddr, reg, value);
254 return;
255 }
256 cvmx_scratch_write64(
257 scraddr,
258 __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
259 value, __ATOMIC_SEQ_CST));
260}
261
262/**
263 * Perform an async atomic 8 bit add. The old value is
264 * placed in the scratch memory at byte address scraddr.
265 *
266 * @param scraddr Scratch memory byte address to put response in.
267 * Must be 8 byte aligned.
268 * @param reg FAU atomic register to access. 0 <= reg < 2048.
269 * @param value Signed value to add.
270 * @return Placed in the scratch pad register
271 */
272static inline void
273cvmx_fau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
274{
275 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
276 cvmx_hwfau_async_fetch_and_add8(scraddr, reg, value);
277 return;
278 }
279 cvmx_scratch_write64(
280 scraddr,
281 __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
282 value, __ATOMIC_SEQ_CST));
283}
284
285/**
286 * Perform an async atomic 64 bit add after the current tag
287 * switch completes.
288 *
289 * @param scraddr Scratch memory byte address to put response in.
290 * Must be 8 byte aligned.
291 * If a timeout occurs, the error bit (63) will be set. Otherwise
292 * the value of the register before the update will be
293 * returned
294 * @param reg FAU atomic register to access. 0 <= reg < 2048.
295 * - Step by 8 for 64 bit access.
296 * @param value Signed value to add.
297 * Note: Only the low 22 bits are available.
298 * @return Placed in the scratch pad register
299 */
300static inline void cvmx_fau_async_tagwait_fetch_and_add64(u64 scraddr,
301 cvmx_fau_reg64_t reg,
302 int64_t value)
303{
304 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
305 cvmx_hwfau_async_tagwait_fetch_and_add64(scraddr, reg, value);
306 return;
307 }
308
309 /* Broken. Where is the tag wait? */
310 cvmx_scratch_write64(
311 scraddr,
312 __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
313 value, __ATOMIC_SEQ_CST));
314}
315
316/**
317 * Perform an async atomic 32 bit add after the current tag
318 * switch completes.
319 *
320 * @param scraddr Scratch memory byte address to put response in.
321 * Must be 8 byte aligned.
322 * If a timeout occurs, the error bit (63) will be set. Otherwise
323 * the value of the register before the update will be
324 * returned
325 * @param reg FAU atomic register to access. 0 <= reg < 2048.
326 * - Step by 4 for 32 bit access.
327 * @param value Signed value to add.
328 * Note: Only the low 22 bits are available.
329 * @return Placed in the scratch pad register
330 */
331static inline void cvmx_fau_async_tagwait_fetch_and_add32(u64 scraddr,
332 cvmx_fau_reg32_t reg,
333 int32_t value)
334{
335 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
336 cvmx_hwfau_async_tagwait_fetch_and_add32(scraddr, reg, value);
337 return;
338 }
339 /* Broken. Where is the tag wait? */
340 cvmx_scratch_write64(
341 scraddr,
342 __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
343 value, __ATOMIC_SEQ_CST));
344}
345
346/**
347 * Perform an async atomic 16 bit add after the current tag
348 * switch completes.
349 *
350 * @param scraddr Scratch memory byte address to put response in.
351 * Must be 8 byte aligned.
352 * If a timeout occurs, the error bit (63) will be set. Otherwise
353 * the value of the register before the update will be
354 * returned
355 * @param reg FAU atomic register to access. 0 <= reg < 2048.
356 * - Step by 2 for 16 bit access.
357 * @param value Signed value to add.
358 * @return Placed in the scratch pad register
359 */
360static inline void cvmx_fau_async_tagwait_fetch_and_add16(u64 scraddr,
361 cvmx_fau_reg16_t reg,
362 int16_t value)
363{
364 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
365 cvmx_hwfau_async_tagwait_fetch_and_add16(scraddr, reg, value);
366 return;
367 }
368 /* Broken. Where is the tag wait? */
369 cvmx_scratch_write64(
370 scraddr,
371 __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
372 value, __ATOMIC_SEQ_CST));
373}
374
375/**
376 * Perform an async atomic 8 bit add after the current tag
377 * switch completes.
378 *
379 * @param scraddr Scratch memory byte address to put response in.
380 * Must be 8 byte aligned.
381 * If a timeout occurs, the error bit (63) will be set. Otherwise
382 * the value of the register before the update will be
383 * returned
384 * @param reg FAU atomic register to access. 0 <= reg < 2048.
385 * @param value Signed value to add.
386 * @return Placed in the scratch pad register
387 */
388static inline void cvmx_fau_async_tagwait_fetch_and_add8(u64 scraddr,
389 cvmx_fau_reg8_t reg,
390 int8_t value)
391{
392 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
393 cvmx_hwfau_async_tagwait_fetch_and_add8(scraddr, reg, value);
394 return;
395 }
396 /* Broken. Where is the tag wait? */
397 cvmx_scratch_write64(
398 scraddr,
399 __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
400 value, __ATOMIC_SEQ_CST));
401}
402
403/**
404 * Perform an atomic 64 bit add
405 *
406 * @param reg FAU atomic register to access. 0 <= reg < 2048.
407 * - Step by 8 for 64 bit access.
408 * @param value Signed value to add.
409 */
410static inline void cvmx_fau_atomic_add64(cvmx_fau_reg64_t reg, int64_t value)
411{
412 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
413 cvmx_hwfau_atomic_add64(reg, value);
414 return;
415 }
416 /* Ignored fetch values should be optimized away */
417 __atomic_add_fetch(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
418 __ATOMIC_SEQ_CST);
419}
420
421/**
422 * Perform an atomic 32 bit add
423 *
424 * @param reg FAU atomic register to access. 0 <= reg < 2048.
425 * - Step by 4 for 32 bit access.
426 * @param value Signed value to add.
427 */
428static inline void cvmx_fau_atomic_add32(cvmx_fau_reg32_t reg, int32_t value)
429{
430 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
431 cvmx_hwfau_atomic_add32(reg, value);
432 return;
433 }
434 reg ^= SWIZZLE_32;
435 /* Ignored fetch values should be optimized away */
436 __atomic_add_fetch(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
437 __ATOMIC_SEQ_CST);
438}
439
440/**
441 * Perform an atomic 16 bit add
442 *
443 * @param reg FAU atomic register to access. 0 <= reg < 2048.
444 * - Step by 2 for 16 bit access.
445 * @param value Signed value to add.
446 */
447static inline void cvmx_fau_atomic_add16(cvmx_fau_reg16_t reg, int16_t value)
448{
449 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
450 cvmx_hwfau_atomic_add16(reg, value);
451 return;
452 }
453 reg ^= SWIZZLE_16;
454 /* Ignored fetch values should be optimized away */
455 __atomic_add_fetch(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
456 __ATOMIC_SEQ_CST);
457}
458
459/**
460 * Perform an atomic 8 bit add
461 *
462 * @param reg FAU atomic register to access. 0 <= reg < 2048.
463 * @param value Signed value to add.
464 */
465static inline void cvmx_fau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
466{
467 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
468 cvmx_hwfau_atomic_add8(reg, value);
469 return;
470 }
471 reg ^= SWIZZLE_8;
472 /* Ignored fetch values should be optimized away */
473 __atomic_add_fetch(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
474 __ATOMIC_SEQ_CST);
475}
476
477/**
478 * Perform an atomic 64 bit write
479 *
480 * @param reg FAU atomic register to access. 0 <= reg < 2048.
481 * - Step by 8 for 64 bit access.
482 * @param value Signed value to write.
483 */
484static inline void cvmx_fau_atomic_write64(cvmx_fau_reg64_t reg, int64_t value)
485{
486 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
487 cvmx_hwfau_atomic_write64(reg, value);
488 return;
489 }
490 __atomic_store_n(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
491 __ATOMIC_SEQ_CST);
492}
493
494/**
495 * Perform an atomic 32 bit write
496 *
497 * @param reg FAU atomic register to access. 0 <= reg < 2048.
498 * - Step by 4 for 32 bit access.
499 * @param value Signed value to write.
500 */
501static inline void cvmx_fau_atomic_write32(cvmx_fau_reg32_t reg, int32_t value)
502{
503 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
504 cvmx_hwfau_atomic_write32(reg, value);
505 return;
506 }
507 reg ^= SWIZZLE_32;
508 __atomic_store_n(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
509 __ATOMIC_SEQ_CST);
510}
511
512/**
513 * Perform an atomic 16 bit write
514 *
515 * @param reg FAU atomic register to access. 0 <= reg < 2048.
516 * - Step by 2 for 16 bit access.
517 * @param value Signed value to write.
518 */
519static inline void cvmx_fau_atomic_write16(cvmx_fau_reg16_t reg, int16_t value)
520{
521 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
522 cvmx_hwfau_atomic_write16(reg, value);
523 return;
524 }
525 reg ^= SWIZZLE_16;
526 __atomic_store_n(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
527 __ATOMIC_SEQ_CST);
528}
529
530/**
531 * Perform an atomic 8 bit write
532 *
533 * @param reg FAU atomic register to access. 0 <= reg < 2048.
534 * @param value Signed value to write.
535 */
536static inline void cvmx_fau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
537{
538 if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
539 cvmx_hwfau_atomic_write8(reg, value);
540 return;
541 }
542 reg ^= SWIZZLE_8;
543 __atomic_store_n(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
544 __ATOMIC_SEQ_CST);
545}
546
547/** Allocates 64bit FAU register.
548 * @param reserve base address to reserve
549 * @return value is the base address of allocated FAU register
550 */
551int cvmx_fau64_alloc(int reserve);
552
553/** Allocates 32bit FAU register.
554 * @param reserve base address to reserve
555 * @return value is the base address of allocated FAU register
556 */
557int cvmx_fau32_alloc(int reserve);
558
559/** Allocates 16bit FAU register.
560 * @param reserve base address to reserve
561 * @return value is the base address of allocated FAU register
562 */
563int cvmx_fau16_alloc(int reserve);
564
565/** Allocates 8bit FAU register.
566 * @param reserve base address to reserve
567 * @return value is the base address of allocated FAU register
568 */
569int cvmx_fau8_alloc(int reserve);
570
571/** Frees the specified FAU register.
572 * @param address base address of register to release.
573 * @return 0 on success; -1 on failure
574 */
575int cvmx_fau_free(int address);
576
577/** Display the fau registers array
578 */
579void cvmx_fau_show(void);
580
581#endif /* __CVMX_FAU_H__ */