blob: 56528bc1bfdeaa661bfe3e42bb8d4ef337ae26af [file] [log] [blame]
Aaron Williams17c7ad22020-08-20 07:21:57 +02001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (C) 2020 Stefan Roese <sr@denx.de>
4 */
5
6#ifndef __CVMX_REGS_H__
7#define __CVMX_REGS_H__
8
9#include <linux/bitfield.h>
10#include <linux/bitops.h>
11#include <linux/io.h>
Stefan Roeseb380dae2020-12-11 17:05:56 +010012#include <mach/cvmx-address.h>
Aaron Williams17c7ad22020-08-20 07:21:57 +020013
14/* General defines */
15#define CVMX_MAX_CORES 48
16/* Maximum # of bits to define core in node */
17#define CVMX_NODE_NO_SHIFT 7
18#define CVMX_NODE_BITS 2 /* Number of bits to define a node */
19#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
20#define CVMX_NODE_MASK (CVMX_MAX_NODES - 1)
21#define CVMX_NODE_IO_SHIFT 36
22#define CVMX_NODE_MEM_SHIFT 40
23#define CVMX_NODE_IO_MASK ((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
24
25#define CVMX_MIPS_MAX_CORE_BITS 10 /* Maximum # of bits to define cores */
26#define CVMX_MIPS_MAX_CORES (1 << CVMX_MIPS_MAX_CORE_BITS)
27
28#define MAX_CORE_TADS 8
29
Aaron Williams17c7ad22020-08-20 07:21:57 +020030#define CASTPTR(type, v) ((type *)(long)(v))
Stefan Roeseb380dae2020-12-11 17:05:56 +010031#define CAST64(v) ((long long)(long)(v))
Aaron Williams17c7ad22020-08-20 07:21:57 +020032
33/* Regs */
Aaron Williams17c7ad22020-08-20 07:21:57 +020034#define CVMX_CIU3_NMI 0x0001010000000160ULL
Aaron Williams17c7ad22020-08-20 07:21:57 +020035
36#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
Stefan Roeseb380dae2020-12-11 17:05:56 +010037#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
38#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
Aaron Williams17c7ad22020-08-20 07:21:57 +020039
40#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
Stefan Roeseb380dae2020-12-11 17:05:56 +010041#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
Aaron Williams17c7ad22020-08-20 07:21:57 +020042
43#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
44
45#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
Stefan Roeseb380dae2020-12-11 17:05:56 +010046#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
47#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
48#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
Aaron Williams17c7ad22020-08-20 07:21:57 +020049
50#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
Stefan Roeseb380dae2020-12-11 17:05:56 +010051#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
52#define MIO_FUS_RCMD_PEND BIT_ULL(12)
53#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
Aaron Williams17c7ad22020-08-20 07:21:57 +020054
55#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
Stefan Roeseb380dae2020-12-11 17:05:56 +010056#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
57
58#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
Aaron Williams17c7ad22020-08-20 07:21:57 +020059
60/* turn the variable name into a string */
61#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
62#define CVMX_TMP_STR2(x) #x
63
Stefan Roeseb380dae2020-12-11 17:05:56 +010064#define CVMX_RDHWR(result, regstr) \
65 asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
Aaron Williams17c7ad22020-08-20 07:21:57 +020066#define CVMX_RDHWRNV(result, regstr) \
Stefan Roeseb380dae2020-12-11 17:05:56 +010067 asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
68#define CVMX_POP(result, input) \
69 asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
70
71#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
72#define CVMX_SYNCS asm volatile("syncs\n" : : : "memory")
73#define CVMX_SYNCWS asm volatile("syncws\n" : : : "memory")
74
75#define CVMX_CACHE_LINE_SIZE 128 // In bytes
76#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
77#define CVMX_CACHE_LINE_ALIGNED __aligned(CVMX_CACHE_LINE_SIZE)
78
79#define CVMX_SYNCIOBDMA asm volatile("synciobdma" : : : "memory")
80
81#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
Aaron Williams17c7ad22020-08-20 07:21:57 +020082
Stefan Roeseb380dae2020-12-11 17:05:56 +010083/*
84 * The macros cvmx_likely and cvmx_unlikely use the
85 * __builtin_expect GCC operation to control branch
86 * probabilities for a conditional. For example, an "if"
87 * statement in the code that will almost always be
88 * executed should be written as "if (cvmx_likely(...))".
89 * If the "else" section of an if statement is more
90 * probable, use "if (cvmx_unlikey(...))".
91 */
92#define cvmx_likely(x) __builtin_expect(!!(x), 1)
93#define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
94
95#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, to_us) \
96 ({ \
97 int result; \
98 do { \
99 u64 done = get_timer(0); \
100 type c; \
101 while (1) { \
102 c.u64 = csr_rd(address); \
103 if ((c.s.field)op(value)) { \
104 result = 0; \
105 break; \
106 } else if (get_timer(done) > ((to_us) / 1000)) { \
107 result = -1; \
108 break; \
109 } else \
110 udelay(100); \
111 } \
112 } while (0); \
113 result; \
114 })
115
116#define CVMX_WAIT_FOR_FIELD64_NODE(node, address, type, field, op, value, to_us) \
117 ({ \
118 int result; \
119 do { \
120 u64 done = get_timer(0); \
121 type c; \
122 while (1) { \
123 c.u64 = csr_rd(address); \
124 if ((c.s.field)op(value)) { \
125 result = 0; \
126 break; \
127 } else if (get_timer(done) > ((to_us) / 1000)) { \
128 result = -1; \
129 break; \
130 } else \
131 udelay(100); \
132 } \
133 } while (0); \
134 result; \
135 })
Aaron Williams17c7ad22020-08-20 07:21:57 +0200136
137/* ToDo: Currently only node = 0 supported */
Stefan Roeseb380dae2020-12-11 17:05:56 +0100138#define cvmx_get_node_num() 0
139
Aaron Williams17c7ad22020-08-20 07:21:57 +0200140static inline u64 csr_rd_node(int node, u64 addr)
141{
142 void __iomem *base;
143
144 base = ioremap_nocache(addr, 0x100);
145 return ioread64(base);
146}
147
Stefan Roeseb380dae2020-12-11 17:05:56 +0100148static inline u32 csr_rd32_node(int node, u64 addr)
149{
150 void __iomem *base;
151
152 base = ioremap_nocache(addr, 0x100);
153 return ioread32(base);
154}
155
Aaron Williams17c7ad22020-08-20 07:21:57 +0200156static inline u64 csr_rd(u64 addr)
157{
158 return csr_rd_node(0, addr);
159}
160
Stefan Roeseb380dae2020-12-11 17:05:56 +0100161static inline u32 csr_rd32(u64 addr)
162{
163 return csr_rd32_node(0, addr);
164}
165
Aaron Williams17c7ad22020-08-20 07:21:57 +0200166static inline void csr_wr_node(int node, u64 addr, u64 val)
167{
168 void __iomem *base;
169
170 base = ioremap_nocache(addr, 0x100);
171 iowrite64(val, base);
172}
173
Stefan Roeseb380dae2020-12-11 17:05:56 +0100174static inline void csr_wr32_node(int node, u64 addr, u32 val)
175{
176 void __iomem *base;
177
178 base = ioremap_nocache(addr, 0x100);
179 iowrite32(val, base);
180}
181
Aaron Williams17c7ad22020-08-20 07:21:57 +0200182static inline void csr_wr(u64 addr, u64 val)
183{
184 csr_wr_node(0, addr, val);
185}
186
Stefan Roeseb380dae2020-12-11 17:05:56 +0100187static inline void csr_wr32(u64 addr, u32 val)
188{
189 csr_wr32_node(0, addr, val);
190}
191
Aaron Williams17c7ad22020-08-20 07:21:57 +0200192/*
193 * We need to use the volatile access here, otherwise the IO accessor
194 * functions might swap the bytes
195 */
196static inline u64 cvmx_read64_uint64(u64 addr)
197{
198 return *(volatile u64 *)addr;
199}
200
Stefan Roeseb380dae2020-12-11 17:05:56 +0100201static inline s64 cvmx_read64_int64(u64 addr)
202{
203 return *(volatile s64 *)addr;
204}
205
Aaron Williams17c7ad22020-08-20 07:21:57 +0200206static inline void cvmx_write64_uint64(u64 addr, u64 val)
207{
208 *(volatile u64 *)addr = val;
209}
210
Stefan Roeseb380dae2020-12-11 17:05:56 +0100211static inline void cvmx_write64_int64(u64 addr, s64 val)
212{
213 *(volatile s64 *)addr = val;
214}
215
Aaron Williams17c7ad22020-08-20 07:21:57 +0200216static inline u32 cvmx_read64_uint32(u64 addr)
217{
218 return *(volatile u32 *)addr;
219}
220
Stefan Roeseb380dae2020-12-11 17:05:56 +0100221static inline s32 cvmx_read64_int32(u64 addr)
222{
223 return *(volatile s32 *)addr;
224}
225
Aaron Williams17c7ad22020-08-20 07:21:57 +0200226static inline void cvmx_write64_uint32(u64 addr, u32 val)
227{
228 *(volatile u32 *)addr = val;
229}
230
Stefan Roeseb380dae2020-12-11 17:05:56 +0100231static inline void cvmx_write64_int32(u64 addr, s32 val)
232{
233 *(volatile s32 *)addr = val;
234}
235
236static inline void cvmx_write64_int16(u64 addr, s16 val)
237{
238 *(volatile s16 *)addr = val;
239}
240
241static inline void cvmx_write64_uint16(u64 addr, u16 val)
242{
243 *(volatile u16 *)addr = val;
244}
245
246static inline void cvmx_write64_int8(u64 addr, int8_t val)
247{
248 *(volatile int8_t *)addr = val;
249}
250
251static inline void cvmx_write64_uint8(u64 addr, u8 val)
252{
253 *(volatile u8 *)addr = val;
254}
255
256static inline s16 cvmx_read64_int16(u64 addr)
257{
258 return *(volatile s16 *)addr;
259}
260
261static inline u16 cvmx_read64_uint16(u64 addr)
262{
263 return *(volatile u16 *)addr;
264}
265
266static inline int8_t cvmx_read64_int8(u64 addr)
267{
268 return *(volatile int8_t *)addr;
269}
270
271static inline u8 cvmx_read64_uint8(u64 addr)
272{
273 return *(volatile u8 *)addr;
274}
275
276static inline void cvmx_send_single(u64 data)
277{
278 cvmx_write64_uint64(CVMX_IOBDMA_ORDERED_IO_ADDR, data);
279}
280
281/**
282 * Perform a 64-bit write to an IO address
283 *
284 * @param io_addr I/O address to write to
285 * @param val 64-bit value to write
286 */
287static inline void cvmx_write_io(u64 io_addr, u64 val)
288{
289 cvmx_write64_uint64(io_addr, val);
290}
291
292/**
293 * Builds a memory address for I/O based on the Major and Sub DID.
294 *
295 * @param major_did 5 bit major did
296 * @param sub_did 3 bit sub did
297 * @return I/O base address
298 */
299static inline u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
300{
301 return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
302}
303
304/**
305 * Builds a bit mask given the required size in bits.
306 *
307 * @param bits Number of bits in the mask
308 * @return The mask
309 */
310static inline u64 cvmx_build_mask(u64 bits)
311{
312 if (bits == 64)
313 return -1;
314
315 return ~((~0x0ull) << bits);
316}
317
318/**
319 * Extract bits out of a number
320 *
321 * @param input Number to extract from
322 * @param lsb Starting bit, least significant (0-63)
323 * @param width Width in bits (1-64)
324 *
325 * @return Extracted number
326 */
327static inline u64 cvmx_bit_extract(u64 input, int lsb, int width)
328{
329 u64 result = input >> lsb;
330
331 result &= cvmx_build_mask(width);
332
333 return result;
334}
335
336/**
337 * Perform mask and shift to place the supplied value into
338 * the supplied bit rage.
339 *
340 * Example: cvmx_build_bits(39,24,value)
341 * <pre>
342 * 6 5 4 3 3 2 1
343 * 3 5 7 9 1 3 5 7 0
344 * +-------+-------+-------+-------+-------+-------+-------+------+
345 * 000000000000000000000000___________value000000000000000000000000
346 * </pre>
347 *
348 * @param high_bit Highest bit value can occupy (inclusive) 0-63
349 * @param low_bit Lowest bit value can occupy inclusive 0-high_bit
350 * @param value Value to use
351 * @return Value masked and shifted
352 */
353static inline u64 cvmx_build_bits(u64 high_bit, u64 low_bit, u64 value)
354{
355 return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
356}
357
358static inline u64 cvmx_mask_to_localaddr(u64 addr)
359{
360 return (addr & 0xffffffffff);
361}
362
363static inline u64 cvmx_addr_on_node(u64 node, u64 addr)
364{
365 return (node << 40) | cvmx_mask_to_localaddr(addr);
366}
367
Aaron Williams17c7ad22020-08-20 07:21:57 +0200368static inline void *cvmx_phys_to_ptr(u64 addr)
369{
370 return (void *)CKSEG0ADDR(addr);
371}
372
373static inline u64 cvmx_ptr_to_phys(void *ptr)
374{
375 return virt_to_phys(ptr);
376}
377
378/**
379 * Number of the Core on which the program is currently running.
380 *
381 * @return core number
382 */
383static inline unsigned int cvmx_get_core_num(void)
384{
385 unsigned int core_num;
386
387 CVMX_RDHWRNV(core_num, 0);
388 return core_num;
389}
390
Stefan Roeseb380dae2020-12-11 17:05:56 +0100391/**
392 * Node-local number of the core on which the program is currently running.
393 *
394 * @return core number on local node
395 */
396static inline unsigned int cvmx_get_local_core_num(void)
397{
398 unsigned int core_num, core_mask;
399
400 CVMX_RDHWRNV(core_num, 0);
401 /* note that MAX_CORES may not be power of 2 */
402 core_mask = (1 << CVMX_NODE_NO_SHIFT) - 1;
403
404 return core_num & core_mask;
405}
406
407/**
408 * Returns the number of bits set in the provided value.
409 * Simple wrapper for POP instruction.
410 *
411 * @param val 32 bit value to count set bits in
412 *
413 * @return Number of bits set
414 */
415static inline u32 cvmx_pop(u32 val)
416{
417 u32 pop;
418
419 CVMX_POP(pop, val);
420
421 return pop;
422}
423
424#define cvmx_read_csr_node(node, addr) csr_rd(addr)
425#define cvmx_write_csr_node(node, addr, val) csr_wr(addr, val)
426
427#define cvmx_printf printf
428#define cvmx_vprintf vprintf
429
430#if defined(DEBUG)
431void cvmx_warn(const char *format, ...) __printf(1, 2);
432#else
433void cvmx_warn(const char *format, ...);
434#endif
435
436#define cvmx_warn_if(expression, format, ...) \
437 if (expression) \
438 cvmx_warn(format, ##__VA_ARGS__)
439
Aaron Williams17c7ad22020-08-20 07:21:57 +0200440#endif /* __CVMX_REGS_H__ */