blob: f97c1e907f0b5c39be0497e079f2d41636674f8a [file] [log] [blame]
Aaron Williams17c7ad22020-08-20 07:21:57 +02001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (C) 2020 Stefan Roese <sr@denx.de>
4 */
5
6#ifndef __CVMX_REGS_H__
7#define __CVMX_REGS_H__
8
Stefan Roesecbe62432022-04-07 09:11:10 +02009#include <log.h>
Aaron Williams17c7ad22020-08-20 07:21:57 +020010#include <linux/bitfield.h>
11#include <linux/bitops.h>
12#include <linux/io.h>
Stefan Roese1a035f82020-12-11 17:05:56 +010013#include <mach/cvmx-address.h>
Aaron Williams17c7ad22020-08-20 07:21:57 +020014
15/* General defines */
16#define CVMX_MAX_CORES 48
17/* Maximum # of bits to define core in node */
18#define CVMX_NODE_NO_SHIFT 7
19#define CVMX_NODE_BITS 2 /* Number of bits to define a node */
20#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
21#define CVMX_NODE_MASK (CVMX_MAX_NODES - 1)
22#define CVMX_NODE_IO_SHIFT 36
23#define CVMX_NODE_MEM_SHIFT 40
24#define CVMX_NODE_IO_MASK ((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
25
26#define CVMX_MIPS_MAX_CORE_BITS 10 /* Maximum # of bits to define cores */
27#define CVMX_MIPS_MAX_CORES (1 << CVMX_MIPS_MAX_CORE_BITS)
28
29#define MAX_CORE_TADS 8
30
Aaron Williams17c7ad22020-08-20 07:21:57 +020031#define CASTPTR(type, v) ((type *)(long)(v))
Stefan Roese1a035f82020-12-11 17:05:56 +010032#define CAST64(v) ((long long)(long)(v))
Aaron Williams17c7ad22020-08-20 07:21:57 +020033
34/* Regs */
Aaron Williams17c7ad22020-08-20 07:21:57 +020035#define CVMX_CIU3_NMI 0x0001010000000160ULL
Stefan Roesecbe62432022-04-07 09:11:10 +020036#define CVMX_CIU3_ISCX_W1C(x) (0x0001010090000000ull + ((x) & 1048575) * 8)
Aaron Williams17c7ad22020-08-20 07:21:57 +020037
38#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
Stefan Roese1a035f82020-12-11 17:05:56 +010039#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
40#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
Aaron Williams17c7ad22020-08-20 07:21:57 +020041
42#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
Stefan Roese1a035f82020-12-11 17:05:56 +010043#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
Aaron Williams17c7ad22020-08-20 07:21:57 +020044
45#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
46
47#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
Stefan Roese1a035f82020-12-11 17:05:56 +010048#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
49#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
50#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
Aaron Williams17c7ad22020-08-20 07:21:57 +020051
52#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
Stefan Roese1a035f82020-12-11 17:05:56 +010053#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
54#define MIO_FUS_RCMD_PEND BIT_ULL(12)
55#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
Aaron Williams17c7ad22020-08-20 07:21:57 +020056
57#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
Stefan Roese1a035f82020-12-11 17:05:56 +010058#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
59
Stefan Roesecbe62432022-04-07 09:11:10 +020060/* IOBDMA/LMTDMA IO addresses */
61#define CVMX_LMTDMA_ORDERED_IO_ADDR 0xffffffffffffa400ull
Stefan Roese1a035f82020-12-11 17:05:56 +010062#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
Aaron Williams17c7ad22020-08-20 07:21:57 +020063
64/* turn the variable name into a string */
65#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
66#define CVMX_TMP_STR2(x) #x
Stefan Roesecbe62432022-04-07 09:11:10 +020067#define VASTR(...) #__VA_ARGS__
68
69#define CVMX_PKO_LMTLINE 2ull
70#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
71
72#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
Aaron Williams17c7ad22020-08-20 07:21:57 +020073
Stefan Roese1a035f82020-12-11 17:05:56 +010074#define CVMX_RDHWR(result, regstr) \
75 asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
Aaron Williams17c7ad22020-08-20 07:21:57 +020076#define CVMX_RDHWRNV(result, regstr) \
Stefan Roese1a035f82020-12-11 17:05:56 +010077 asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
78#define CVMX_POP(result, input) \
79 asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
Stefan Roesecbe62432022-04-07 09:11:10 +020080#define CVMX_MF_COP0(val, cop0) \
81 asm("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val))
82#define CVMX_MT_COP0(val, cop0) \
83 asm("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val))
84
85#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
86#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
Stefan Roese1a035f82020-12-11 17:05:56 +010087
Stefan Roese3629d8b2021-04-07 09:12:27 +020088#define CVMX_SYNC asm volatile("sync\n" : : : "memory")
Stefan Roese1a035f82020-12-11 17:05:56 +010089#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
90#define CVMX_SYNCS asm volatile("syncs\n" : : : "memory")
91#define CVMX_SYNCWS asm volatile("syncws\n" : : : "memory")
92
93#define CVMX_CACHE_LINE_SIZE 128 // In bytes
94#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
95#define CVMX_CACHE_LINE_ALIGNED __aligned(CVMX_CACHE_LINE_SIZE)
96
97#define CVMX_SYNCIOBDMA asm volatile("synciobdma" : : : "memory")
98
99#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
Aaron Williams17c7ad22020-08-20 07:21:57 +0200100
Stefan Roesecbe62432022-04-07 09:11:10 +0200101#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
102#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
103
104/** a normal prefetch */
105#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
106
107/** normal prefetches that use the pref instruction */
108#define CVMX_PREFETCH_PREFX(X, address, offset) \
109 asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
110#define CVMX_PREFETCH_PREF0(address, offset) \
111 CVMX_PREFETCH_PREFX(0, address, offset)
112
Stefan Roese1a035f82020-12-11 17:05:56 +0100113/*
114 * The macros cvmx_likely and cvmx_unlikely use the
115 * __builtin_expect GCC operation to control branch
116 * probabilities for a conditional. For example, an "if"
117 * statement in the code that will almost always be
118 * executed should be written as "if (cvmx_likely(...))".
119 * If the "else" section of an if statement is more
120 * probable, use "if (cvmx_unlikey(...))".
121 */
122#define cvmx_likely(x) __builtin_expect(!!(x), 1)
123#define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
124
125#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, to_us) \
126 ({ \
127 int result; \
128 do { \
129 u64 done = get_timer(0); \
130 type c; \
131 while (1) { \
132 c.u64 = csr_rd(address); \
133 if ((c.s.field)op(value)) { \
134 result = 0; \
135 break; \
136 } else if (get_timer(done) > ((to_us) / 1000)) { \
137 result = -1; \
138 break; \
139 } else \
140 udelay(100); \
141 } \
142 } while (0); \
143 result; \
144 })
145
146#define CVMX_WAIT_FOR_FIELD64_NODE(node, address, type, field, op, value, to_us) \
147 ({ \
148 int result; \
149 do { \
150 u64 done = get_timer(0); \
151 type c; \
152 while (1) { \
153 c.u64 = csr_rd(address); \
154 if ((c.s.field)op(value)) { \
155 result = 0; \
156 break; \
157 } else if (get_timer(done) > ((to_us) / 1000)) { \
158 result = -1; \
159 break; \
160 } else \
161 udelay(100); \
162 } \
163 } while (0); \
164 result; \
165 })
Aaron Williams17c7ad22020-08-20 07:21:57 +0200166
167/* ToDo: Currently only node = 0 supported */
Stefan Roese1a035f82020-12-11 17:05:56 +0100168#define cvmx_get_node_num() 0
169
Aaron Williams17c7ad22020-08-20 07:21:57 +0200170static inline u64 csr_rd_node(int node, u64 addr)
171{
172 void __iomem *base;
173
174 base = ioremap_nocache(addr, 0x100);
175 return ioread64(base);
176}
177
Stefan Roese1a035f82020-12-11 17:05:56 +0100178static inline u32 csr_rd32_node(int node, u64 addr)
179{
180 void __iomem *base;
181
182 base = ioremap_nocache(addr, 0x100);
183 return ioread32(base);
184}
185
Aaron Williams17c7ad22020-08-20 07:21:57 +0200186static inline u64 csr_rd(u64 addr)
187{
188 return csr_rd_node(0, addr);
189}
190
Stefan Roese1a035f82020-12-11 17:05:56 +0100191static inline u32 csr_rd32(u64 addr)
192{
193 return csr_rd32_node(0, addr);
194}
195
Aaron Williams17c7ad22020-08-20 07:21:57 +0200196static inline void csr_wr_node(int node, u64 addr, u64 val)
197{
198 void __iomem *base;
199
200 base = ioremap_nocache(addr, 0x100);
201 iowrite64(val, base);
202}
203
Stefan Roese1a035f82020-12-11 17:05:56 +0100204static inline void csr_wr32_node(int node, u64 addr, u32 val)
205{
206 void __iomem *base;
207
208 base = ioremap_nocache(addr, 0x100);
209 iowrite32(val, base);
210}
211
Aaron Williams17c7ad22020-08-20 07:21:57 +0200212static inline void csr_wr(u64 addr, u64 val)
213{
214 csr_wr_node(0, addr, val);
215}
216
Stefan Roese1a035f82020-12-11 17:05:56 +0100217static inline void csr_wr32(u64 addr, u32 val)
218{
219 csr_wr32_node(0, addr, val);
220}
221
Aaron Williams17c7ad22020-08-20 07:21:57 +0200222/*
223 * We need to use the volatile access here, otherwise the IO accessor
224 * functions might swap the bytes
225 */
226static inline u64 cvmx_read64_uint64(u64 addr)
227{
228 return *(volatile u64 *)addr;
229}
230
Stefan Roese1a035f82020-12-11 17:05:56 +0100231static inline s64 cvmx_read64_int64(u64 addr)
232{
233 return *(volatile s64 *)addr;
234}
235
Aaron Williams17c7ad22020-08-20 07:21:57 +0200236static inline void cvmx_write64_uint64(u64 addr, u64 val)
237{
238 *(volatile u64 *)addr = val;
239}
240
Stefan Roese1a035f82020-12-11 17:05:56 +0100241static inline void cvmx_write64_int64(u64 addr, s64 val)
242{
243 *(volatile s64 *)addr = val;
244}
245
Aaron Williams17c7ad22020-08-20 07:21:57 +0200246static inline u32 cvmx_read64_uint32(u64 addr)
247{
248 return *(volatile u32 *)addr;
249}
250
Stefan Roese1a035f82020-12-11 17:05:56 +0100251static inline s32 cvmx_read64_int32(u64 addr)
252{
253 return *(volatile s32 *)addr;
254}
255
Aaron Williams17c7ad22020-08-20 07:21:57 +0200256static inline void cvmx_write64_uint32(u64 addr, u32 val)
257{
258 *(volatile u32 *)addr = val;
259}
260
Stefan Roese1a035f82020-12-11 17:05:56 +0100261static inline void cvmx_write64_int32(u64 addr, s32 val)
262{
263 *(volatile s32 *)addr = val;
264}
265
266static inline void cvmx_write64_int16(u64 addr, s16 val)
267{
268 *(volatile s16 *)addr = val;
269}
270
271static inline void cvmx_write64_uint16(u64 addr, u16 val)
272{
273 *(volatile u16 *)addr = val;
274}
275
276static inline void cvmx_write64_int8(u64 addr, int8_t val)
277{
278 *(volatile int8_t *)addr = val;
279}
280
281static inline void cvmx_write64_uint8(u64 addr, u8 val)
282{
283 *(volatile u8 *)addr = val;
284}
285
286static inline s16 cvmx_read64_int16(u64 addr)
287{
288 return *(volatile s16 *)addr;
289}
290
291static inline u16 cvmx_read64_uint16(u64 addr)
292{
293 return *(volatile u16 *)addr;
294}
295
296static inline int8_t cvmx_read64_int8(u64 addr)
297{
298 return *(volatile int8_t *)addr;
299}
300
301static inline u8 cvmx_read64_uint8(u64 addr)
302{
303 return *(volatile u8 *)addr;
304}
305
306static inline void cvmx_send_single(u64 data)
307{
308 cvmx_write64_uint64(CVMX_IOBDMA_ORDERED_IO_ADDR, data);
309}
310
311/**
312 * Perform a 64-bit write to an IO address
313 *
314 * @param io_addr I/O address to write to
315 * @param val 64-bit value to write
316 */
317static inline void cvmx_write_io(u64 io_addr, u64 val)
318{
319 cvmx_write64_uint64(io_addr, val);
320}
321
322/**
323 * Builds a memory address for I/O based on the Major and Sub DID.
324 *
325 * @param major_did 5 bit major did
326 * @param sub_did 3 bit sub did
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100327 * Return: I/O base address
Stefan Roese1a035f82020-12-11 17:05:56 +0100328 */
329static inline u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
330{
331 return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
332}
333
334/**
335 * Builds a bit mask given the required size in bits.
336 *
337 * @param bits Number of bits in the mask
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100338 * Return: The mask
Stefan Roese1a035f82020-12-11 17:05:56 +0100339 */
340static inline u64 cvmx_build_mask(u64 bits)
341{
342 if (bits == 64)
343 return -1;
344
345 return ~((~0x0ull) << bits);
346}
347
348/**
349 * Extract bits out of a number
350 *
351 * @param input Number to extract from
352 * @param lsb Starting bit, least significant (0-63)
353 * @param width Width in bits (1-64)
354 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100355 * Return: Extracted number
Stefan Roese1a035f82020-12-11 17:05:56 +0100356 */
357static inline u64 cvmx_bit_extract(u64 input, int lsb, int width)
358{
359 u64 result = input >> lsb;
360
361 result &= cvmx_build_mask(width);
362
363 return result;
364}
365
366/**
367 * Perform mask and shift to place the supplied value into
368 * the supplied bit rage.
369 *
370 * Example: cvmx_build_bits(39,24,value)
371 * <pre>
372 * 6 5 4 3 3 2 1
373 * 3 5 7 9 1 3 5 7 0
374 * +-------+-------+-------+-------+-------+-------+-------+------+
375 * 000000000000000000000000___________value000000000000000000000000
376 * </pre>
377 *
378 * @param high_bit Highest bit value can occupy (inclusive) 0-63
379 * @param low_bit Lowest bit value can occupy inclusive 0-high_bit
380 * @param value Value to use
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100381 * Return: Value masked and shifted
Stefan Roese1a035f82020-12-11 17:05:56 +0100382 */
383static inline u64 cvmx_build_bits(u64 high_bit, u64 low_bit, u64 value)
384{
385 return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
386}
387
388static inline u64 cvmx_mask_to_localaddr(u64 addr)
389{
390 return (addr & 0xffffffffff);
391}
392
393static inline u64 cvmx_addr_on_node(u64 node, u64 addr)
394{
395 return (node << 40) | cvmx_mask_to_localaddr(addr);
396}
397
Aaron Williams17c7ad22020-08-20 07:21:57 +0200398static inline void *cvmx_phys_to_ptr(u64 addr)
399{
400 return (void *)CKSEG0ADDR(addr);
401}
402
403static inline u64 cvmx_ptr_to_phys(void *ptr)
404{
405 return virt_to_phys(ptr);
406}
407
408/**
409 * Number of the Core on which the program is currently running.
410 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100411 * Return: core number
Aaron Williams17c7ad22020-08-20 07:21:57 +0200412 */
413static inline unsigned int cvmx_get_core_num(void)
414{
415 unsigned int core_num;
416
417 CVMX_RDHWRNV(core_num, 0);
418 return core_num;
419}
420
Stefan Roese1a035f82020-12-11 17:05:56 +0100421/**
422 * Node-local number of the core on which the program is currently running.
423 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100424 * Return: core number on local node
Stefan Roese1a035f82020-12-11 17:05:56 +0100425 */
426static inline unsigned int cvmx_get_local_core_num(void)
427{
428 unsigned int core_num, core_mask;
429
430 CVMX_RDHWRNV(core_num, 0);
431 /* note that MAX_CORES may not be power of 2 */
432 core_mask = (1 << CVMX_NODE_NO_SHIFT) - 1;
433
434 return core_num & core_mask;
435}
436
437/**
Stefan Roesecbe62432022-04-07 09:11:10 +0200438 * Given a CSR address return the node number of that address
439 *
440 * @param addr Address to extract node number from
441 *
442 * @return node number
443 */
444static inline u8 cvmx_csr_addr_to_node(u64 addr)
445{
446 return (addr >> CVMX_NODE_IO_SHIFT) & CVMX_NODE_MASK;
447}
448
449/**
450 * Strip the node address bits from a CSR address
451 *
452 * @param addr CSR address to strip the node bits from
453 *
454 * @return CSR address with the node bits set to zero
455 */
456static inline u64 cvmx_csr_addr_strip_node(u64 addr)
457{
458 return addr & ~((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT);
459}
460
461/**
Stefan Roese1a035f82020-12-11 17:05:56 +0100462 * Returns the number of bits set in the provided value.
463 * Simple wrapper for POP instruction.
464 *
465 * @param val 32 bit value to count set bits in
466 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100467 * Return: Number of bits set
Stefan Roese1a035f82020-12-11 17:05:56 +0100468 */
469static inline u32 cvmx_pop(u32 val)
470{
471 u32 pop;
472
473 CVMX_POP(pop, val);
474
475 return pop;
476}
477
478#define cvmx_read_csr_node(node, addr) csr_rd(addr)
479#define cvmx_write_csr_node(node, addr, val) csr_wr(addr, val)
480
481#define cvmx_printf printf
482#define cvmx_vprintf vprintf
483
Stefan Roesecbe62432022-04-07 09:11:10 +0200484/* Use common debug macros */
485#define cvmx_warn debug
486#define cvmx_warn_if debug_cond
487
488/**
489 * Atomically adds a signed value to a 32 bit (aligned) memory location,
490 * and returns previous value.
491 *
492 * Memory access ordering is enforced before/after the atomic operation,
493 * so no additional 'sync' instructions are required.
494 *
495 * @param ptr address in memory to add incr to
496 * @param incr amount to increment memory location by (signed)
497 *
498 * @return Value of memory location before increment
499 */
500static inline int32_t cvmx_atomic_fetch_and_add32(int32_t * ptr, int32_t incr)
501{
502 int32_t val;
Stefan Roese1a035f82020-12-11 17:05:56 +0100503
Stefan Roesecbe62432022-04-07 09:11:10 +0200504 val = *ptr;
505 *ptr += incr;
506 return val;
507}
508
509/**
510 * Atomically adds a signed value to a 32 bit (aligned) memory location.
511 *
512 * This version does not perform 'sync' operations to enforce memory
513 * operations. This should only be used when there are no memory operation
514 * ordering constraints. (This should NOT be used for reference counting -
515 * use the standard version instead.)
516 *
517 * @param ptr address in memory to add incr to
518 * @param incr amount to increment memory location by (signed)
519 */
520static inline void cvmx_atomic_add32_nosync(int32_t * ptr, int32_t incr)
521{
522 *ptr += incr;
523}
Stefan Roese1a035f82020-12-11 17:05:56 +0100524
Aaron Williams17c7ad22020-08-20 07:21:57 +0200525#endif /* __CVMX_REGS_H__ */