blob: b56af82f92d7b57624d6056787dcbf1c574bc6bc [file] [log] [blame]
Stefan Roese96c19042016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <dm/device-internal.h>
19#include <dm/lists.h>
20#include <net.h>
21#include <netdev.h>
22#include <config.h>
23#include <malloc.h>
24#include <asm/io.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090025#include <linux/errno.h>
Stefan Roese96c19042016-02-10 07:22:10 +010026#include <phy.h>
27#include <miiphy.h>
28#include <watchdog.h>
29#include <asm/arch/cpu.h>
30#include <asm/arch/soc.h>
31#include <linux/compat.h>
32#include <linux/mbus.h>
33
34DECLARE_GLOBAL_DATA_PTR;
35
36/* Some linux -> U-Boot compatibility stuff */
37#define netdev_err(dev, fmt, args...) \
38 printf(fmt, ##args)
39#define netdev_warn(dev, fmt, args...) \
40 printf(fmt, ##args)
41#define netdev_info(dev, fmt, args...) \
42 printf(fmt, ##args)
43#define netdev_dbg(dev, fmt, args...) \
44 printf(fmt, ##args)
45
46#define ETH_ALEN 6 /* Octets in one ethernet addr */
47
48#define __verify_pcpu_ptr(ptr) \
49do { \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
51 (void)__vpp_verify; \
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p) \
55({ \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
58})
59
60#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id() 0
62#define num_present_cpus() 1
63#define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
68#define CONFIG_NR_CPUS 1
69#define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
70
71/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72#define WRAP (2 + ETH_HLEN + 4 + 32)
73#define MTU 1500
74#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
75
76#define MVPP2_SMI_TIMEOUT 10000
77
78/* RX Fifo Registers */
79#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82#define MVPP2_RX_FIFO_INIT_REG 0x64
83
84/* RX DMA Top Registers */
85#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89#define MVPP2_POOL_BUF_SIZE_OFFSET 5
90#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni2321c922017-02-16 06:53:51 +010094#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
95#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Stefan Roese96c19042016-02-10 07:22:10 +010096#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni2321c922017-02-16 06:53:51 +010097#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
98#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Stefan Roese96c19042016-02-10 07:22:10 +010099#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
100#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
101#define MVPP2_RXQ_DISABLE_MASK BIT(31)
102
103/* Parser Registers */
104#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
105#define MVPP2_PRS_PORT_LU_MAX 0xf
106#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
107#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
108#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
109#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
110#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
111#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
112#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
113#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
114#define MVPP2_PRS_TCAM_IDX_REG 0x1100
115#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
116#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
117#define MVPP2_PRS_SRAM_IDX_REG 0x1200
118#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
119#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
120#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
121
122/* Classifier Registers */
123#define MVPP2_CLS_MODE_REG 0x1800
124#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
125#define MVPP2_CLS_PORT_WAY_REG 0x1810
126#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
127#define MVPP2_CLS_LKP_INDEX_REG 0x1814
128#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
129#define MVPP2_CLS_LKP_TBL_REG 0x1818
130#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
131#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
132#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
133#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
134#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
135#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
136#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
137#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
138#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
139#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
140#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
141#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
142
143/* Descriptor Manager Top Registers */
144#define MVPP2_RXQ_NUM_REG 0x2040
145#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100146#define MVPP22_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100147#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
148#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
149#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
150#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
151#define MVPP2_RXQ_NUM_NEW_OFFSET 16
152#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
153#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
154#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
155#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
156#define MVPP2_RXQ_THRESH_REG 0x204c
157#define MVPP2_OCCUPIED_THRESH_OFFSET 0
158#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
159#define MVPP2_RXQ_INDEX_REG 0x2050
160#define MVPP2_TXQ_NUM_REG 0x2080
161#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
162#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
163#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
164#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
165#define MVPP2_TXQ_THRESH_REG 0x2094
166#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
167#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
168#define MVPP2_TXQ_INDEX_REG 0x2098
169#define MVPP2_TXQ_PREF_BUF_REG 0x209c
170#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
171#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
172#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
173#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
174#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
175#define MVPP2_TXQ_PENDING_REG 0x20a0
176#define MVPP2_TXQ_PENDING_MASK 0x3fff
177#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
178#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
179#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
180#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
181#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
182#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
183#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
184#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
185#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
186#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
187#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100188#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100189#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
190#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
191#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
192#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
193#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
194
195/* MBUS bridge registers */
196#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
197#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
198#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
199#define MVPP2_BASE_ADDR_ENABLE 0x4060
200
Thomas Petazzonica560ab2017-02-16 08:41:07 +0100201/* AXI Bridge Registers */
202#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
203#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
204#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
205#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
206#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
207#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
208#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
209#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
210#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
211#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
212#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
213#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
214
215/* Values for AXI Bridge registers */
216#define MVPP22_AXI_ATTR_CACHE_OFFS 0
217#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
218
219#define MVPP22_AXI_CODE_CACHE_OFFS 0
220#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
221
222#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
223#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
224#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
225
226#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
227#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
228
Stefan Roese96c19042016-02-10 07:22:10 +0100229/* Interrupt Cause and Mask registers */
230#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
231#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
232#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
233#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
234#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
235#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
236#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
237#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
238#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
239#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
240#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
241#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
242#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
243#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
244#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
245#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
246#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
247#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
248#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
249#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
250
251/* Buffer Manager registers */
252#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
253#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
254#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
255#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
256#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
257#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
258#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
259#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
260#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
261#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
262#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
263#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
264#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
265#define MVPP2_BM_START_MASK BIT(0)
266#define MVPP2_BM_STOP_MASK BIT(1)
267#define MVPP2_BM_STATE_MASK BIT(4)
268#define MVPP2_BM_LOW_THRESH_OFFS 8
269#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
270#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
271 MVPP2_BM_LOW_THRESH_OFFS)
272#define MVPP2_BM_HIGH_THRESH_OFFS 16
273#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
274#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
275 MVPP2_BM_HIGH_THRESH_OFFS)
276#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
277#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
278#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
279#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
280#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
281#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
282#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
283#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
284#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
285#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100286#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
287#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
288#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
289#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
Stefan Roese96c19042016-02-10 07:22:10 +0100290#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
291#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
292#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
293#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
294#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100295#define MVPP21_BM_MC_RLS_REG 0x64c4
Stefan Roese96c19042016-02-10 07:22:10 +0100296#define MVPP2_BM_MC_ID_MASK 0xfff
297#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100298#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
299#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
300#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
301#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
302#define MVPP22_BM_MC_RLS_REG 0x64d4
Stefan Roese96c19042016-02-10 07:22:10 +0100303
304/* TX Scheduler registers */
305#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
306#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
307#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
308#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
309#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
310#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
311#define MVPP2_TXP_SCHED_MTU_REG 0x801c
312#define MVPP2_TXP_MTU_MAX 0x7FFFF
313#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
314#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
315#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
316#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
317#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
318#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
319#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
320#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
321#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
322#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
323#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
324#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
325#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
326#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
327
328/* TX general registers */
329#define MVPP2_TX_SNOOP_REG 0x8800
330#define MVPP2_TX_PORT_FLUSH_REG 0x8810
331#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
332
333/* LMS registers */
334#define MVPP2_SRC_ADDR_MIDDLE 0x24
335#define MVPP2_SRC_ADDR_HIGH 0x28
336#define MVPP2_PHY_AN_CFG0_REG 0x34
337#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese96c19042016-02-10 07:22:10 +0100338#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoniebbe76f2017-02-15 12:16:23 +0100339#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese96c19042016-02-10 07:22:10 +0100340
341/* Per-port registers */
342#define MVPP2_GMAC_CTRL_0_REG 0x0
343#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
344#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
345#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
346#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
347#define MVPP2_GMAC_CTRL_1_REG 0x4
348#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
349#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
350#define MVPP2_GMAC_PCS_LB_EN_BIT 6
351#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
352#define MVPP2_GMAC_SA_LOW_OFFS 7
353#define MVPP2_GMAC_CTRL_2_REG 0x8
354#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
355#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
356#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
357#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
358#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
359#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
360#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
361#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
362#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
363#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
364#define MVPP2_GMAC_FC_ADV_EN BIT(9)
365#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
366#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
367#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
368#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
369#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
370#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
371 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
372
Thomas Petazzonicc2445f2017-02-20 11:42:51 +0100373#define MVPP22_SMI_MISC_CFG_REG 0x1204
374#define MVPP22_SMI_POLLING_EN BIT(10)
375
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100376#define MVPP22_PORT_BASE 0x30e00
377#define MVPP22_PORT_OFFSET 0x1000
378
Stefan Roese96c19042016-02-10 07:22:10 +0100379#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
380
381/* Descriptor ring Macros */
382#define MVPP2_QUEUE_NEXT_DESC(q, index) \
383 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
384
385/* SMI: 0xc0054 -> offset 0x54 to lms_base */
386#define MVPP2_SMI 0x0054
387#define MVPP2_PHY_REG_MASK 0x1f
388/* SMI register fields */
389#define MVPP2_SMI_DATA_OFFS 0 /* Data */
390#define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
391#define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
392#define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
393#define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
394#define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
395#define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
396#define MVPP2_SMI_BUSY (1 << 28) /* Busy */
397
398#define MVPP2_PHY_ADDR_MASK 0x1f
399#define MVPP2_PHY_REG_MASK 0x1f
400
401/* Various constants */
402
403/* Coalescing */
404#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
405#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
406#define MVPP2_RX_COAL_PKTS 32
407#define MVPP2_RX_COAL_USEC 100
408
409/* The two bytes Marvell header. Either contains a special value used
410 * by Marvell switches when a specific hardware mode is enabled (not
411 * supported by this driver) or is filled automatically by zeroes on
412 * the RX side. Those two bytes being at the front of the Ethernet
413 * header, they allow to have the IP header aligned on a 4 bytes
414 * boundary automatically: the hardware skips those two bytes on its
415 * own.
416 */
417#define MVPP2_MH_SIZE 2
418#define MVPP2_ETH_TYPE_LEN 2
419#define MVPP2_PPPOE_HDR_SIZE 8
420#define MVPP2_VLAN_TAG_LEN 4
421
422/* Lbtd 802.3 type */
423#define MVPP2_IP_LBDT_TYPE 0xfffa
424
425#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
426#define MVPP2_TX_CSUM_MAX_SIZE 9800
427
428/* Timeout constants */
429#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
430#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
431
432#define MVPP2_TX_MTU_MAX 0x7ffff
433
434/* Maximum number of T-CONTs of PON port */
435#define MVPP2_MAX_TCONT 16
436
437/* Maximum number of supported ports */
438#define MVPP2_MAX_PORTS 4
439
440/* Maximum number of TXQs used by single port */
441#define MVPP2_MAX_TXQ 8
442
443/* Maximum number of RXQs used by single port */
444#define MVPP2_MAX_RXQ 8
445
446/* Default number of TXQs in use */
447#define MVPP2_DEFAULT_TXQ 1
448
449/* Dfault number of RXQs in use */
450#define MVPP2_DEFAULT_RXQ 1
451#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
452
453/* Total number of RXQs available to all ports */
454#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
455
456/* Max number of Rx descriptors */
457#define MVPP2_MAX_RXD 16
458
459/* Max number of Tx descriptors */
460#define MVPP2_MAX_TXD 16
461
462/* Amount of Tx descriptors that can be reserved at once by CPU */
463#define MVPP2_CPU_DESC_CHUNK 64
464
465/* Max number of Tx descriptors in each aggregated queue */
466#define MVPP2_AGGR_TXQ_SIZE 256
467
468/* Descriptor aligned size */
469#define MVPP2_DESC_ALIGNED_SIZE 32
470
471/* Descriptor alignment mask */
472#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
473
474/* RX FIFO constants */
475#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
476#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
477#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
478
479/* RX buffer constants */
480#define MVPP2_SKB_SHINFO_SIZE \
481 0
482
483#define MVPP2_RX_PKT_SIZE(mtu) \
484 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
485 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
486
487#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
488#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
489#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
490 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
491
492#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
493
494/* IPv6 max L3 address size */
495#define MVPP2_MAX_L3_ADDR_SIZE 16
496
497/* Port flags */
498#define MVPP2_F_LOOPBACK BIT(0)
499
500/* Marvell tag types */
501enum mvpp2_tag_type {
502 MVPP2_TAG_TYPE_NONE = 0,
503 MVPP2_TAG_TYPE_MH = 1,
504 MVPP2_TAG_TYPE_DSA = 2,
505 MVPP2_TAG_TYPE_EDSA = 3,
506 MVPP2_TAG_TYPE_VLAN = 4,
507 MVPP2_TAG_TYPE_LAST = 5
508};
509
510/* Parser constants */
511#define MVPP2_PRS_TCAM_SRAM_SIZE 256
512#define MVPP2_PRS_TCAM_WORDS 6
513#define MVPP2_PRS_SRAM_WORDS 4
514#define MVPP2_PRS_FLOW_ID_SIZE 64
515#define MVPP2_PRS_FLOW_ID_MASK 0x3f
516#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
517#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
518#define MVPP2_PRS_IPV4_HEAD 0x40
519#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
520#define MVPP2_PRS_IPV4_MC 0xe0
521#define MVPP2_PRS_IPV4_MC_MASK 0xf0
522#define MVPP2_PRS_IPV4_BC_MASK 0xff
523#define MVPP2_PRS_IPV4_IHL 0x5
524#define MVPP2_PRS_IPV4_IHL_MASK 0xf
525#define MVPP2_PRS_IPV6_MC 0xff
526#define MVPP2_PRS_IPV6_MC_MASK 0xff
527#define MVPP2_PRS_IPV6_HOP_MASK 0xff
528#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
529#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
530#define MVPP2_PRS_DBL_VLANS_MAX 100
531
532/* Tcam structure:
533 * - lookup ID - 4 bits
534 * - port ID - 1 byte
535 * - additional information - 1 byte
536 * - header data - 8 bytes
537 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
538 */
539#define MVPP2_PRS_AI_BITS 8
540#define MVPP2_PRS_PORT_MASK 0xff
541#define MVPP2_PRS_LU_MASK 0xf
542#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
543 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
544#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
545 (((offs) * 2) - ((offs) % 2) + 2)
546#define MVPP2_PRS_TCAM_AI_BYTE 16
547#define MVPP2_PRS_TCAM_PORT_BYTE 17
548#define MVPP2_PRS_TCAM_LU_BYTE 20
549#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
550#define MVPP2_PRS_TCAM_INV_WORD 5
551/* Tcam entries ID */
552#define MVPP2_PE_DROP_ALL 0
553#define MVPP2_PE_FIRST_FREE_TID 1
554#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
555#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
556#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
557#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
558#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
559#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
560#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
561#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
562#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
563#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
564#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
565#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
566#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
567#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
568#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
569#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
570#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
571#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
572#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
573#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
574#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
575#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
576#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
577#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
578#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
579
580/* Sram structure
581 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
582 */
583#define MVPP2_PRS_SRAM_RI_OFFS 0
584#define MVPP2_PRS_SRAM_RI_WORD 0
585#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
586#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
587#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
588#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
589#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
590#define MVPP2_PRS_SRAM_UDF_OFFS 73
591#define MVPP2_PRS_SRAM_UDF_BITS 8
592#define MVPP2_PRS_SRAM_UDF_MASK 0xff
593#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
594#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
595#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
596#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
597#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
598#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
599#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
600#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
601#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
602#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
603#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
604#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
605#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
606#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
607#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
608#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
609#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
610#define MVPP2_PRS_SRAM_AI_OFFS 90
611#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
612#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
613#define MVPP2_PRS_SRAM_AI_MASK 0xff
614#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
615#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
616#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
617#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
618
619/* Sram result info bits assignment */
620#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
621#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100622#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
623#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100624#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
625#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
626#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
627#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
628#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100629#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
630#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100631#define MVPP2_PRS_RI_L2_MCAST BIT(9)
632#define MVPP2_PRS_RI_L2_BCAST BIT(10)
633#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100634#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
635#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100636#define MVPP2_PRS_RI_L3_IP4 BIT(12)
637#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
638#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
639#define MVPP2_PRS_RI_L3_IP6 BIT(14)
640#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
641#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100642#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
643#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100644#define MVPP2_PRS_RI_L3_MCAST BIT(15)
645#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
646#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
647#define MVPP2_PRS_RI_UDF3_MASK 0x300000
648#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
649#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
650#define MVPP2_PRS_RI_L4_TCP BIT(22)
651#define MVPP2_PRS_RI_L4_UDP BIT(23)
652#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
653#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
654#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
655#define MVPP2_PRS_RI_DROP_MASK 0x80000000
656
657/* Sram additional info bits assignment */
658#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
659#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
660#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
661#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
662#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
663#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
664#define MVPP2_PRS_SINGLE_VLAN_AI 0
665#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
666
667/* DSA/EDSA type */
668#define MVPP2_PRS_TAGGED true
669#define MVPP2_PRS_UNTAGGED false
670#define MVPP2_PRS_EDSA true
671#define MVPP2_PRS_DSA false
672
673/* MAC entries, shadow udf */
674enum mvpp2_prs_udf {
675 MVPP2_PRS_UDF_MAC_DEF,
676 MVPP2_PRS_UDF_MAC_RANGE,
677 MVPP2_PRS_UDF_L2_DEF,
678 MVPP2_PRS_UDF_L2_DEF_COPY,
679 MVPP2_PRS_UDF_L2_USER,
680};
681
682/* Lookup ID */
683enum mvpp2_prs_lookup {
684 MVPP2_PRS_LU_MH,
685 MVPP2_PRS_LU_MAC,
686 MVPP2_PRS_LU_DSA,
687 MVPP2_PRS_LU_VLAN,
688 MVPP2_PRS_LU_L2,
689 MVPP2_PRS_LU_PPPOE,
690 MVPP2_PRS_LU_IP4,
691 MVPP2_PRS_LU_IP6,
692 MVPP2_PRS_LU_FLOWS,
693 MVPP2_PRS_LU_LAST,
694};
695
696/* L3 cast enum */
697enum mvpp2_prs_l3_cast {
698 MVPP2_PRS_L3_UNI_CAST,
699 MVPP2_PRS_L3_MULTI_CAST,
700 MVPP2_PRS_L3_BROAD_CAST
701};
702
703/* Classifier constants */
704#define MVPP2_CLS_FLOWS_TBL_SIZE 512
705#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
706#define MVPP2_CLS_LKP_TBL_SIZE 64
707
708/* BM constants */
709#define MVPP2_BM_POOLS_NUM 1
710#define MVPP2_BM_LONG_BUF_NUM 16
711#define MVPP2_BM_SHORT_BUF_NUM 16
712#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
713#define MVPP2_BM_POOL_PTR_ALIGN 128
714#define MVPP2_BM_SWF_LONG_POOL(port) 0
715
716/* BM cookie (32 bits) definition */
717#define MVPP2_BM_COOKIE_POOL_OFFS 8
718#define MVPP2_BM_COOKIE_CPU_OFFS 24
719
720/* BM short pool packet size
721 * These value assure that for SWF the total number
722 * of bytes allocated for each buffer will be 512
723 */
724#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
725
726enum mvpp2_bm_type {
727 MVPP2_BM_FREE,
728 MVPP2_BM_SWF_LONG,
729 MVPP2_BM_SWF_SHORT
730};
731
732/* Definitions */
733
734/* Shared Packet Processor resources */
735struct mvpp2 {
736 /* Shared registers' base addresses */
737 void __iomem *base;
738 void __iomem *lms_base;
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100739 void __iomem *iface_base;
Stefan Roese96c19042016-02-10 07:22:10 +0100740
741 /* List of pointers to port structures */
742 struct mvpp2_port **port_list;
743
744 /* Aggregated TXQs */
745 struct mvpp2_tx_queue *aggr_txqs;
746
747 /* BM pools */
748 struct mvpp2_bm_pool *bm_pools;
749
750 /* PRS shadow table */
751 struct mvpp2_prs_shadow *prs_shadow;
752 /* PRS auxiliary table for double vlan entries control */
753 bool *prs_double_vlans;
754
755 /* Tclk value */
756 u32 tclk;
757
Thomas Petazzoni51ccb412017-02-15 14:08:59 +0100758 /* HW version */
759 enum { MVPP21, MVPP22 } hw_version;
760
Stefan Roese96c19042016-02-10 07:22:10 +0100761 struct mii_dev *bus;
762};
763
764struct mvpp2_pcpu_stats {
765 u64 rx_packets;
766 u64 rx_bytes;
767 u64 tx_packets;
768 u64 tx_bytes;
769};
770
771struct mvpp2_port {
772 u8 id;
773
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100774 /* Index of the port from the "group of ports" complex point
775 * of view
776 */
777 int gop_id;
778
Stefan Roese96c19042016-02-10 07:22:10 +0100779 int irq;
780
781 struct mvpp2 *priv;
782
783 /* Per-port registers' base address */
784 void __iomem *base;
785
786 struct mvpp2_rx_queue **rxqs;
787 struct mvpp2_tx_queue **txqs;
788
789 int pkt_size;
790
791 u32 pending_cause_rx;
792
793 /* Per-CPU port control */
794 struct mvpp2_port_pcpu __percpu *pcpu;
795
796 /* Flags */
797 unsigned long flags;
798
799 u16 tx_ring_size;
800 u16 rx_ring_size;
801 struct mvpp2_pcpu_stats __percpu *stats;
802
803 struct phy_device *phy_dev;
804 phy_interface_t phy_interface;
805 int phy_node;
806 int phyaddr;
807 int init;
808 unsigned int link;
809 unsigned int duplex;
810 unsigned int speed;
811
812 struct mvpp2_bm_pool *pool_long;
813 struct mvpp2_bm_pool *pool_short;
814
815 /* Index of first port's physical RXQ */
816 u8 first_rxq;
817
818 u8 dev_addr[ETH_ALEN];
819};
820
821/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
822 * layout of the transmit and reception DMA descriptors, and their
823 * layout is therefore defined by the hardware design
824 */
825
826#define MVPP2_TXD_L3_OFF_SHIFT 0
827#define MVPP2_TXD_IP_HLEN_SHIFT 8
828#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
829#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
830#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
831#define MVPP2_TXD_PADDING_DISABLE BIT(23)
832#define MVPP2_TXD_L4_UDP BIT(24)
833#define MVPP2_TXD_L3_IP6 BIT(26)
834#define MVPP2_TXD_L_DESC BIT(28)
835#define MVPP2_TXD_F_DESC BIT(29)
836
837#define MVPP2_RXD_ERR_SUMMARY BIT(15)
838#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
839#define MVPP2_RXD_ERR_CRC 0x0
840#define MVPP2_RXD_ERR_OVERRUN BIT(13)
841#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
842#define MVPP2_RXD_BM_POOL_ID_OFFS 16
843#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
844#define MVPP2_RXD_HWF_SYNC BIT(21)
845#define MVPP2_RXD_L4_CSUM_OK BIT(22)
846#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
847#define MVPP2_RXD_L4_TCP BIT(25)
848#define MVPP2_RXD_L4_UDP BIT(26)
849#define MVPP2_RXD_L3_IP4 BIT(28)
850#define MVPP2_RXD_L3_IP6 BIT(30)
851#define MVPP2_RXD_BUF_HDR BIT(31)
852
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100853/* HW TX descriptor for PPv2.1 */
854struct mvpp21_tx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +0100855 u32 command; /* Options used by HW for packet transmitting.*/
856 u8 packet_offset; /* the offset from the buffer beginning */
857 u8 phys_txq; /* destination queue ID */
858 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100859 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100860 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
861 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
862 u32 reserved2; /* reserved (for future use) */
863};
864
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100865/* HW RX descriptor for PPv2.1 */
866struct mvpp21_rx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +0100867 u32 status; /* info about received packet */
868 u16 reserved1; /* parser_info (for future use, PnC) */
869 u16 data_size; /* size of received packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100870 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100871 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
872 u16 reserved2; /* gem_port_id (for future use, PON) */
873 u16 reserved3; /* csum_l4 (for future use, PnC) */
874 u8 reserved4; /* bm_qset (for future use, BM) */
875 u8 reserved5;
876 u16 reserved6; /* classify_info (for future use, PnC) */
877 u32 reserved7; /* flow_id (for future use, PnC) */
878 u32 reserved8;
879};
880
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100881/* HW TX descriptor for PPv2.2 */
882struct mvpp22_tx_desc {
883 u32 command;
884 u8 packet_offset;
885 u8 phys_txq;
886 u16 data_size;
887 u64 reserved1;
888 u64 buf_dma_addr_ptp;
889 u64 buf_cookie_misc;
890};
891
892/* HW RX descriptor for PPv2.2 */
893struct mvpp22_rx_desc {
894 u32 status;
895 u16 reserved1;
896 u16 data_size;
897 u32 reserved2;
898 u32 reserved3;
899 u64 buf_dma_addr_key_hash;
900 u64 buf_cookie_misc;
901};
902
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100903/* Opaque type used by the driver to manipulate the HW TX and RX
904 * descriptors
905 */
906struct mvpp2_tx_desc {
907 union {
908 struct mvpp21_tx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100909 struct mvpp22_tx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100910 };
911};
912
913struct mvpp2_rx_desc {
914 union {
915 struct mvpp21_rx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100916 struct mvpp22_rx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100917 };
918};
919
Stefan Roese96c19042016-02-10 07:22:10 +0100920/* Per-CPU Tx queue control */
921struct mvpp2_txq_pcpu {
922 int cpu;
923
924 /* Number of Tx DMA descriptors in the descriptor ring */
925 int size;
926
927 /* Number of currently used Tx DMA descriptor in the
928 * descriptor ring
929 */
930 int count;
931
932 /* Number of Tx DMA descriptors reserved for each CPU */
933 int reserved_num;
934
935 /* Index of last TX DMA descriptor that was inserted */
936 int txq_put_index;
937
938 /* Index of the TX DMA descriptor to be cleaned up */
939 int txq_get_index;
940};
941
942struct mvpp2_tx_queue {
943 /* Physical number of this Tx queue */
944 u8 id;
945
946 /* Logical number of this Tx queue */
947 u8 log_id;
948
949 /* Number of Tx DMA descriptors in the descriptor ring */
950 int size;
951
952 /* Number of currently used Tx DMA descriptor in the descriptor ring */
953 int count;
954
955 /* Per-CPU control of physical Tx queues */
956 struct mvpp2_txq_pcpu __percpu *pcpu;
957
958 u32 done_pkts_coal;
959
960 /* Virtual address of thex Tx DMA descriptors array */
961 struct mvpp2_tx_desc *descs;
962
963 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100964 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +0100965
966 /* Index of the last Tx DMA descriptor */
967 int last_desc;
968
969 /* Index of the next Tx DMA descriptor to process */
970 int next_desc_to_proc;
971};
972
973struct mvpp2_rx_queue {
974 /* RX queue number, in the range 0-31 for physical RXQs */
975 u8 id;
976
977 /* Num of rx descriptors in the rx descriptor ring */
978 int size;
979
980 u32 pkts_coal;
981 u32 time_coal;
982
983 /* Virtual address of the RX DMA descriptors array */
984 struct mvpp2_rx_desc *descs;
985
986 /* DMA address of the RX DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100987 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +0100988
989 /* Index of the last RX DMA descriptor */
990 int last_desc;
991
992 /* Index of the next RX DMA descriptor to process */
993 int next_desc_to_proc;
994
995 /* ID of port to which physical RXQ is mapped */
996 int port;
997
998 /* Port's logic RXQ number to which physical RXQ is mapped */
999 int logic_rxq;
1000};
1001
1002union mvpp2_prs_tcam_entry {
1003 u32 word[MVPP2_PRS_TCAM_WORDS];
1004 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1005};
1006
1007union mvpp2_prs_sram_entry {
1008 u32 word[MVPP2_PRS_SRAM_WORDS];
1009 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1010};
1011
1012struct mvpp2_prs_entry {
1013 u32 index;
1014 union mvpp2_prs_tcam_entry tcam;
1015 union mvpp2_prs_sram_entry sram;
1016};
1017
1018struct mvpp2_prs_shadow {
1019 bool valid;
1020 bool finish;
1021
1022 /* Lookup ID */
1023 int lu;
1024
1025 /* User defined offset */
1026 int udf;
1027
1028 /* Result info */
1029 u32 ri;
1030 u32 ri_mask;
1031};
1032
1033struct mvpp2_cls_flow_entry {
1034 u32 index;
1035 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1036};
1037
1038struct mvpp2_cls_lookup_entry {
1039 u32 lkpid;
1040 u32 way;
1041 u32 data;
1042};
1043
1044struct mvpp2_bm_pool {
1045 /* Pool number in the range 0-7 */
1046 int id;
1047 enum mvpp2_bm_type type;
1048
1049 /* Buffer Pointers Pool External (BPPE) size */
1050 int size;
1051 /* Number of buffers for this pool */
1052 int buf_num;
1053 /* Pool buffer size */
1054 int buf_size;
1055 /* Packet size */
1056 int pkt_size;
1057
1058 /* BPPE virtual base address */
Stefan Roesefeb0b332017-02-15 12:46:18 +01001059 unsigned long *virt_addr;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001060 /* BPPE DMA base address */
1061 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01001062
1063 /* Ports using BM pool */
1064 u32 port_map;
1065
1066 /* Occupied buffers indicator */
1067 int in_use_thresh;
1068};
1069
Stefan Roese96c19042016-02-10 07:22:10 +01001070/* Static declaractions */
1071
1072/* Number of RXQs used by single port */
1073static int rxq_number = MVPP2_DEFAULT_RXQ;
1074/* Number of TXQs used by single port */
1075static int txq_number = MVPP2_DEFAULT_TXQ;
1076
1077#define MVPP2_DRIVER_NAME "mvpp2"
1078#define MVPP2_DRIVER_VERSION "1.0"
1079
1080/*
1081 * U-Boot internal data, mostly uncached buffers for descriptors and data
1082 */
1083struct buffer_location {
1084 struct mvpp2_tx_desc *aggr_tx_descs;
1085 struct mvpp2_tx_desc *tx_descs;
1086 struct mvpp2_rx_desc *rx_descs;
Stefan Roesefeb0b332017-02-15 12:46:18 +01001087 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1088 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese96c19042016-02-10 07:22:10 +01001089 int first_rxq;
1090};
1091
1092/*
1093 * All 4 interfaces use the same global buffer, since only one interface
1094 * can be enabled at once
1095 */
1096static struct buffer_location buffer_loc;
1097
1098/*
1099 * Page table entries are set to 1MB, or multiples of 1MB
1100 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1101 */
1102#define BD_SPACE (1 << 20)
1103
1104/* Utility/helper methods */
1105
1106static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1107{
1108 writel(data, priv->base + offset);
1109}
1110
1111static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1112{
1113 return readl(priv->base + offset);
1114}
1115
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001116static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1117 struct mvpp2_tx_desc *tx_desc,
1118 dma_addr_t dma_addr)
1119{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001120 if (port->priv->hw_version == MVPP21) {
1121 tx_desc->pp21.buf_dma_addr = dma_addr;
1122 } else {
1123 u64 val = (u64)dma_addr;
1124
1125 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1126 tx_desc->pp22.buf_dma_addr_ptp |= val;
1127 }
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001128}
1129
1130static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1131 struct mvpp2_tx_desc *tx_desc,
1132 size_t size)
1133{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001134 if (port->priv->hw_version == MVPP21)
1135 tx_desc->pp21.data_size = size;
1136 else
1137 tx_desc->pp22.data_size = size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001138}
1139
1140static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1141 struct mvpp2_tx_desc *tx_desc,
1142 unsigned int txq)
1143{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001144 if (port->priv->hw_version == MVPP21)
1145 tx_desc->pp21.phys_txq = txq;
1146 else
1147 tx_desc->pp22.phys_txq = txq;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001148}
1149
1150static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1151 struct mvpp2_tx_desc *tx_desc,
1152 unsigned int command)
1153{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001154 if (port->priv->hw_version == MVPP21)
1155 tx_desc->pp21.command = command;
1156 else
1157 tx_desc->pp22.command = command;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001158}
1159
1160static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1161 struct mvpp2_tx_desc *tx_desc,
1162 unsigned int offset)
1163{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001164 if (port->priv->hw_version == MVPP21)
1165 tx_desc->pp21.packet_offset = offset;
1166 else
1167 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001168}
1169
1170static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1171 struct mvpp2_rx_desc *rx_desc)
1172{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001173 if (port->priv->hw_version == MVPP21)
1174 return rx_desc->pp21.buf_dma_addr;
1175 else
1176 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001177}
1178
1179static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1180 struct mvpp2_rx_desc *rx_desc)
1181{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001182 if (port->priv->hw_version == MVPP21)
1183 return rx_desc->pp21.buf_cookie;
1184 else
1185 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001186}
1187
1188static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1189 struct mvpp2_rx_desc *rx_desc)
1190{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001191 if (port->priv->hw_version == MVPP21)
1192 return rx_desc->pp21.data_size;
1193 else
1194 return rx_desc->pp22.data_size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001195}
1196
1197static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1198 struct mvpp2_rx_desc *rx_desc)
1199{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001200 if (port->priv->hw_version == MVPP21)
1201 return rx_desc->pp21.status;
1202 else
1203 return rx_desc->pp22.status;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001204}
1205
Stefan Roese96c19042016-02-10 07:22:10 +01001206static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1207{
1208 txq_pcpu->txq_get_index++;
1209 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1210 txq_pcpu->txq_get_index = 0;
1211}
1212
1213/* Get number of physical egress port */
1214static inline int mvpp2_egress_port(struct mvpp2_port *port)
1215{
1216 return MVPP2_MAX_TCONT + port->id;
1217}
1218
1219/* Get number of physical TXQ */
1220static inline int mvpp2_txq_phys(int port, int txq)
1221{
1222 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1223}
1224
1225/* Parser configuration routines */
1226
1227/* Update parser tcam and sram hw entries */
1228static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1229{
1230 int i;
1231
1232 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1233 return -EINVAL;
1234
1235 /* Clear entry invalidation bit */
1236 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1237
1238 /* Write tcam index - indirect access */
1239 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1240 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1241 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1242
1243 /* Write sram index - indirect access */
1244 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1245 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1246 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1247
1248 return 0;
1249}
1250
1251/* Read tcam entry from hw */
1252static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1253{
1254 int i;
1255
1256 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1257 return -EINVAL;
1258
1259 /* Write tcam index - indirect access */
1260 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1261
1262 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1263 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1264 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1265 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1266
1267 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1268 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1269
1270 /* Write sram index - indirect access */
1271 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1272 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1273 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1274
1275 return 0;
1276}
1277
1278/* Invalidate tcam hw entry */
1279static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1280{
1281 /* Write index - indirect access */
1282 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1283 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1284 MVPP2_PRS_TCAM_INV_MASK);
1285}
1286
1287/* Enable shadow table entry and set its lookup ID */
1288static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1289{
1290 priv->prs_shadow[index].valid = true;
1291 priv->prs_shadow[index].lu = lu;
1292}
1293
1294/* Update ri fields in shadow table entry */
1295static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1296 unsigned int ri, unsigned int ri_mask)
1297{
1298 priv->prs_shadow[index].ri_mask = ri_mask;
1299 priv->prs_shadow[index].ri = ri;
1300}
1301
1302/* Update lookup field in tcam sw entry */
1303static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1304{
1305 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1306
1307 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1308 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1309}
1310
1311/* Update mask for single port in tcam sw entry */
1312static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1313 unsigned int port, bool add)
1314{
1315 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1316
1317 if (add)
1318 pe->tcam.byte[enable_off] &= ~(1 << port);
1319 else
1320 pe->tcam.byte[enable_off] |= 1 << port;
1321}
1322
1323/* Update port map in tcam sw entry */
1324static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1325 unsigned int ports)
1326{
1327 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1328 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1329
1330 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1331 pe->tcam.byte[enable_off] &= ~port_mask;
1332 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1333}
1334
1335/* Obtain port map from tcam sw entry */
1336static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1337{
1338 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1339
1340 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1341}
1342
1343/* Set byte of data and its enable bits in tcam sw entry */
1344static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1345 unsigned int offs, unsigned char byte,
1346 unsigned char enable)
1347{
1348 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1349 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1350}
1351
1352/* Get byte of data and its enable bits from tcam sw entry */
1353static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1354 unsigned int offs, unsigned char *byte,
1355 unsigned char *enable)
1356{
1357 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1358 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1359}
1360
1361/* Set ethertype in tcam sw entry */
1362static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1363 unsigned short ethertype)
1364{
1365 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1366 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1367}
1368
1369/* Set bits in sram sw entry */
1370static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1371 int val)
1372{
1373 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1374}
1375
1376/* Clear bits in sram sw entry */
1377static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1378 int val)
1379{
1380 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1381}
1382
1383/* Update ri bits in sram sw entry */
1384static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1385 unsigned int bits, unsigned int mask)
1386{
1387 unsigned int i;
1388
1389 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1390 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1391
1392 if (!(mask & BIT(i)))
1393 continue;
1394
1395 if (bits & BIT(i))
1396 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1397 else
1398 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1399
1400 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1401 }
1402}
1403
1404/* Update ai bits in sram sw entry */
1405static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1406 unsigned int bits, unsigned int mask)
1407{
1408 unsigned int i;
1409 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1410
1411 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1412
1413 if (!(mask & BIT(i)))
1414 continue;
1415
1416 if (bits & BIT(i))
1417 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1418 else
1419 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1420
1421 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1422 }
1423}
1424
1425/* Read ai bits from sram sw entry */
1426static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1427{
1428 u8 bits;
1429 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1430 int ai_en_off = ai_off + 1;
1431 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1432
1433 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1434 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1435
1436 return bits;
1437}
1438
1439/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1440 * lookup interation
1441 */
1442static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1443 unsigned int lu)
1444{
1445 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1446
1447 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1448 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1449 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1450}
1451
1452/* In the sram sw entry set sign and value of the next lookup offset
1453 * and the offset value generated to the classifier
1454 */
1455static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1456 unsigned int op)
1457{
1458 /* Set sign */
1459 if (shift < 0) {
1460 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1461 shift = 0 - shift;
1462 } else {
1463 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1464 }
1465
1466 /* Set value */
1467 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1468 (unsigned char)shift;
1469
1470 /* Reset and set operation */
1471 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1472 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1473 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1474
1475 /* Set base offset as current */
1476 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1477}
1478
1479/* In the sram sw entry set sign and value of the user defined offset
1480 * generated to the classifier
1481 */
1482static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1483 unsigned int type, int offset,
1484 unsigned int op)
1485{
1486 /* Set sign */
1487 if (offset < 0) {
1488 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1489 offset = 0 - offset;
1490 } else {
1491 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1492 }
1493
1494 /* Set value */
1495 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1496 MVPP2_PRS_SRAM_UDF_MASK);
1497 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1498 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1499 MVPP2_PRS_SRAM_UDF_BITS)] &=
1500 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1501 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1502 MVPP2_PRS_SRAM_UDF_BITS)] |=
1503 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1504
1505 /* Set offset type */
1506 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1507 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1508 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1509
1510 /* Set offset operation */
1511 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1512 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1513 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1514
1515 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1516 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1517 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1518 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1519
1520 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1521 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1522 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1523
1524 /* Set base offset as current */
1525 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1526}
1527
1528/* Find parser flow entry */
1529static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1530{
1531 struct mvpp2_prs_entry *pe;
1532 int tid;
1533
1534 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1535 if (!pe)
1536 return NULL;
1537 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1538
1539 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1540 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1541 u8 bits;
1542
1543 if (!priv->prs_shadow[tid].valid ||
1544 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1545 continue;
1546
1547 pe->index = tid;
1548 mvpp2_prs_hw_read(priv, pe);
1549 bits = mvpp2_prs_sram_ai_get(pe);
1550
1551 /* Sram store classification lookup ID in AI bits [5:0] */
1552 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1553 return pe;
1554 }
1555 kfree(pe);
1556
1557 return NULL;
1558}
1559
1560/* Return first free tcam index, seeking from start to end */
1561static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1562 unsigned char end)
1563{
1564 int tid;
1565
1566 if (start > end)
1567 swap(start, end);
1568
1569 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1570 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1571
1572 for (tid = start; tid <= end; tid++) {
1573 if (!priv->prs_shadow[tid].valid)
1574 return tid;
1575 }
1576
1577 return -EINVAL;
1578}
1579
1580/* Enable/disable dropping all mac da's */
1581static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1582{
1583 struct mvpp2_prs_entry pe;
1584
1585 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1586 /* Entry exist - update port only */
1587 pe.index = MVPP2_PE_DROP_ALL;
1588 mvpp2_prs_hw_read(priv, &pe);
1589 } else {
1590 /* Entry doesn't exist - create new */
1591 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1592 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1593 pe.index = MVPP2_PE_DROP_ALL;
1594
1595 /* Non-promiscuous mode for all ports - DROP unknown packets */
1596 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1597 MVPP2_PRS_RI_DROP_MASK);
1598
1599 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1600 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1601
1602 /* Update shadow table */
1603 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1604
1605 /* Mask all ports */
1606 mvpp2_prs_tcam_port_map_set(&pe, 0);
1607 }
1608
1609 /* Update port mask */
1610 mvpp2_prs_tcam_port_set(&pe, port, add);
1611
1612 mvpp2_prs_hw_write(priv, &pe);
1613}
1614
1615/* Set port to promiscuous mode */
1616static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1617{
1618 struct mvpp2_prs_entry pe;
1619
1620 /* Promiscuous mode - Accept unknown packets */
1621
1622 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1623 /* Entry exist - update port only */
1624 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1625 mvpp2_prs_hw_read(priv, &pe);
1626 } else {
1627 /* Entry doesn't exist - create new */
1628 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1629 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1630 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1631
1632 /* Continue - set next lookup */
1633 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1634
1635 /* Set result info bits */
1636 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1637 MVPP2_PRS_RI_L2_CAST_MASK);
1638
1639 /* Shift to ethertype */
1640 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1641 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1642
1643 /* Mask all ports */
1644 mvpp2_prs_tcam_port_map_set(&pe, 0);
1645
1646 /* Update shadow table */
1647 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1648 }
1649
1650 /* Update port mask */
1651 mvpp2_prs_tcam_port_set(&pe, port, add);
1652
1653 mvpp2_prs_hw_write(priv, &pe);
1654}
1655
1656/* Accept multicast */
1657static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1658 bool add)
1659{
1660 struct mvpp2_prs_entry pe;
1661 unsigned char da_mc;
1662
1663 /* Ethernet multicast address first byte is
1664 * 0x01 for IPv4 and 0x33 for IPv6
1665 */
1666 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1667
1668 if (priv->prs_shadow[index].valid) {
1669 /* Entry exist - update port only */
1670 pe.index = index;
1671 mvpp2_prs_hw_read(priv, &pe);
1672 } else {
1673 /* Entry doesn't exist - create new */
1674 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1675 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1676 pe.index = index;
1677
1678 /* Continue - set next lookup */
1679 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1680
1681 /* Set result info bits */
1682 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1683 MVPP2_PRS_RI_L2_CAST_MASK);
1684
1685 /* Update tcam entry data first byte */
1686 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1687
1688 /* Shift to ethertype */
1689 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1690 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1691
1692 /* Mask all ports */
1693 mvpp2_prs_tcam_port_map_set(&pe, 0);
1694
1695 /* Update shadow table */
1696 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1697 }
1698
1699 /* Update port mask */
1700 mvpp2_prs_tcam_port_set(&pe, port, add);
1701
1702 mvpp2_prs_hw_write(priv, &pe);
1703}
1704
1705/* Parser per-port initialization */
1706static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1707 int lu_max, int offset)
1708{
1709 u32 val;
1710
1711 /* Set lookup ID */
1712 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1713 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1714 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1715 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1716
1717 /* Set maximum number of loops for packet received from port */
1718 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1719 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1720 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1721 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1722
1723 /* Set initial offset for packet header extraction for the first
1724 * searching loop
1725 */
1726 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1727 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1728 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1729 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1730}
1731
1732/* Default flow entries initialization for all ports */
1733static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1734{
1735 struct mvpp2_prs_entry pe;
1736 int port;
1737
1738 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1739 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1740 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1741 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1742
1743 /* Mask all ports */
1744 mvpp2_prs_tcam_port_map_set(&pe, 0);
1745
1746 /* Set flow ID*/
1747 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1748 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1749
1750 /* Update shadow table and hw entry */
1751 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1752 mvpp2_prs_hw_write(priv, &pe);
1753 }
1754}
1755
1756/* Set default entry for Marvell Header field */
1757static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1758{
1759 struct mvpp2_prs_entry pe;
1760
1761 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1762
1763 pe.index = MVPP2_PE_MH_DEFAULT;
1764 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1765 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1766 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1767 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1768
1769 /* Unmask all ports */
1770 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1771
1772 /* Update shadow table and hw entry */
1773 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1774 mvpp2_prs_hw_write(priv, &pe);
1775}
1776
1777/* Set default entires (place holder) for promiscuous, non-promiscuous and
1778 * multicast MAC addresses
1779 */
1780static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1781{
1782 struct mvpp2_prs_entry pe;
1783
1784 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1785
1786 /* Non-promiscuous mode for all ports - DROP unknown packets */
1787 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1788 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1789
1790 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1791 MVPP2_PRS_RI_DROP_MASK);
1792 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1793 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1794
1795 /* Unmask all ports */
1796 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1797
1798 /* Update shadow table and hw entry */
1799 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1800 mvpp2_prs_hw_write(priv, &pe);
1801
1802 /* place holders only - no ports */
1803 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1804 mvpp2_prs_mac_promisc_set(priv, 0, false);
1805 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1806 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1807}
1808
1809/* Match basic ethertypes */
1810static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1811{
1812 struct mvpp2_prs_entry pe;
1813 int tid;
1814
1815 /* Ethertype: PPPoE */
1816 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1817 MVPP2_PE_LAST_FREE_TID);
1818 if (tid < 0)
1819 return tid;
1820
1821 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1822 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1823 pe.index = tid;
1824
1825 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1826
1827 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1828 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1829 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1830 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1831 MVPP2_PRS_RI_PPPOE_MASK);
1832
1833 /* Update shadow table and hw entry */
1834 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1835 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1836 priv->prs_shadow[pe.index].finish = false;
1837 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1838 MVPP2_PRS_RI_PPPOE_MASK);
1839 mvpp2_prs_hw_write(priv, &pe);
1840
1841 /* Ethertype: ARP */
1842 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1843 MVPP2_PE_LAST_FREE_TID);
1844 if (tid < 0)
1845 return tid;
1846
1847 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1848 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1849 pe.index = tid;
1850
1851 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1852
1853 /* Generate flow in the next iteration*/
1854 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1855 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1856 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1857 MVPP2_PRS_RI_L3_PROTO_MASK);
1858 /* Set L3 offset */
1859 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1860 MVPP2_ETH_TYPE_LEN,
1861 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1862
1863 /* Update shadow table and hw entry */
1864 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1865 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1866 priv->prs_shadow[pe.index].finish = true;
1867 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1868 MVPP2_PRS_RI_L3_PROTO_MASK);
1869 mvpp2_prs_hw_write(priv, &pe);
1870
1871 /* Ethertype: LBTD */
1872 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1873 MVPP2_PE_LAST_FREE_TID);
1874 if (tid < 0)
1875 return tid;
1876
1877 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1878 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1879 pe.index = tid;
1880
1881 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1882
1883 /* Generate flow in the next iteration*/
1884 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1885 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1886 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1887 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1888 MVPP2_PRS_RI_CPU_CODE_MASK |
1889 MVPP2_PRS_RI_UDF3_MASK);
1890 /* Set L3 offset */
1891 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1892 MVPP2_ETH_TYPE_LEN,
1893 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1894
1895 /* Update shadow table and hw entry */
1896 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1897 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1898 priv->prs_shadow[pe.index].finish = true;
1899 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1900 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1901 MVPP2_PRS_RI_CPU_CODE_MASK |
1902 MVPP2_PRS_RI_UDF3_MASK);
1903 mvpp2_prs_hw_write(priv, &pe);
1904
1905 /* Ethertype: IPv4 without options */
1906 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1907 MVPP2_PE_LAST_FREE_TID);
1908 if (tid < 0)
1909 return tid;
1910
1911 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1912 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1913 pe.index = tid;
1914
1915 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1916 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1917 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1918 MVPP2_PRS_IPV4_HEAD_MASK |
1919 MVPP2_PRS_IPV4_IHL_MASK);
1920
1921 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1922 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1923 MVPP2_PRS_RI_L3_PROTO_MASK);
1924 /* Skip eth_type + 4 bytes of IP header */
1925 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1926 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1927 /* Set L3 offset */
1928 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1929 MVPP2_ETH_TYPE_LEN,
1930 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1931
1932 /* Update shadow table and hw entry */
1933 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1934 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1935 priv->prs_shadow[pe.index].finish = false;
1936 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1937 MVPP2_PRS_RI_L3_PROTO_MASK);
1938 mvpp2_prs_hw_write(priv, &pe);
1939
1940 /* Ethertype: IPv4 with options */
1941 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1942 MVPP2_PE_LAST_FREE_TID);
1943 if (tid < 0)
1944 return tid;
1945
1946 pe.index = tid;
1947
1948 /* Clear tcam data before updating */
1949 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1950 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1951
1952 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1953 MVPP2_PRS_IPV4_HEAD,
1954 MVPP2_PRS_IPV4_HEAD_MASK);
1955
1956 /* Clear ri before updating */
1957 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1958 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1959 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1960 MVPP2_PRS_RI_L3_PROTO_MASK);
1961
1962 /* Update shadow table and hw entry */
1963 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1964 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1965 priv->prs_shadow[pe.index].finish = false;
1966 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1967 MVPP2_PRS_RI_L3_PROTO_MASK);
1968 mvpp2_prs_hw_write(priv, &pe);
1969
1970 /* Ethertype: IPv6 without options */
1971 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1972 MVPP2_PE_LAST_FREE_TID);
1973 if (tid < 0)
1974 return tid;
1975
1976 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1977 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1978 pe.index = tid;
1979
1980 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1981
1982 /* Skip DIP of IPV6 header */
1983 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1984 MVPP2_MAX_L3_ADDR_SIZE,
1985 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1986 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1987 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1988 MVPP2_PRS_RI_L3_PROTO_MASK);
1989 /* Set L3 offset */
1990 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1991 MVPP2_ETH_TYPE_LEN,
1992 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1993
1994 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1995 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1996 priv->prs_shadow[pe.index].finish = false;
1997 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1998 MVPP2_PRS_RI_L3_PROTO_MASK);
1999 mvpp2_prs_hw_write(priv, &pe);
2000
2001 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2002 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2003 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2004 pe.index = MVPP2_PE_ETH_TYPE_UN;
2005
2006 /* Unmask all ports */
2007 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2008
2009 /* Generate flow in the next iteration*/
2010 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2011 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2013 MVPP2_PRS_RI_L3_PROTO_MASK);
2014 /* Set L3 offset even it's unknown L3 */
2015 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2016 MVPP2_ETH_TYPE_LEN,
2017 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2018
2019 /* Update shadow table and hw entry */
2020 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2021 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2022 priv->prs_shadow[pe.index].finish = true;
2023 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2024 MVPP2_PRS_RI_L3_PROTO_MASK);
2025 mvpp2_prs_hw_write(priv, &pe);
2026
2027 return 0;
2028}
2029
2030/* Parser default initialization */
2031static int mvpp2_prs_default_init(struct udevice *dev,
2032 struct mvpp2 *priv)
2033{
2034 int err, index, i;
2035
2036 /* Enable tcam table */
2037 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2038
2039 /* Clear all tcam and sram entries */
2040 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2041 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2042 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2043 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2044
2045 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2046 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2047 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2048 }
2049
2050 /* Invalidate all tcam entries */
2051 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2052 mvpp2_prs_hw_inv(priv, index);
2053
2054 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2055 sizeof(struct mvpp2_prs_shadow),
2056 GFP_KERNEL);
2057 if (!priv->prs_shadow)
2058 return -ENOMEM;
2059
2060 /* Always start from lookup = 0 */
2061 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2062 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2063 MVPP2_PRS_PORT_LU_MAX, 0);
2064
2065 mvpp2_prs_def_flow_init(priv);
2066
2067 mvpp2_prs_mh_init(priv);
2068
2069 mvpp2_prs_mac_init(priv);
2070
2071 err = mvpp2_prs_etype_init(priv);
2072 if (err)
2073 return err;
2074
2075 return 0;
2076}
2077
2078/* Compare MAC DA with tcam entry data */
2079static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2080 const u8 *da, unsigned char *mask)
2081{
2082 unsigned char tcam_byte, tcam_mask;
2083 int index;
2084
2085 for (index = 0; index < ETH_ALEN; index++) {
2086 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2087 if (tcam_mask != mask[index])
2088 return false;
2089
2090 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2091 return false;
2092 }
2093
2094 return true;
2095}
2096
2097/* Find tcam entry with matched pair <MAC DA, port> */
2098static struct mvpp2_prs_entry *
2099mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2100 unsigned char *mask, int udf_type)
2101{
2102 struct mvpp2_prs_entry *pe;
2103 int tid;
2104
2105 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2106 if (!pe)
2107 return NULL;
2108 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2109
2110 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2111 for (tid = MVPP2_PE_FIRST_FREE_TID;
2112 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2113 unsigned int entry_pmap;
2114
2115 if (!priv->prs_shadow[tid].valid ||
2116 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2117 (priv->prs_shadow[tid].udf != udf_type))
2118 continue;
2119
2120 pe->index = tid;
2121 mvpp2_prs_hw_read(priv, pe);
2122 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2123
2124 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2125 entry_pmap == pmap)
2126 return pe;
2127 }
2128 kfree(pe);
2129
2130 return NULL;
2131}
2132
2133/* Update parser's mac da entry */
2134static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2135 const u8 *da, bool add)
2136{
2137 struct mvpp2_prs_entry *pe;
2138 unsigned int pmap, len, ri;
2139 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2140 int tid;
2141
2142 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2143 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2144 MVPP2_PRS_UDF_MAC_DEF);
2145
2146 /* No such entry */
2147 if (!pe) {
2148 if (!add)
2149 return 0;
2150
2151 /* Create new TCAM entry */
2152 /* Find first range mac entry*/
2153 for (tid = MVPP2_PE_FIRST_FREE_TID;
2154 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2155 if (priv->prs_shadow[tid].valid &&
2156 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2157 (priv->prs_shadow[tid].udf ==
2158 MVPP2_PRS_UDF_MAC_RANGE))
2159 break;
2160
2161 /* Go through the all entries from first to last */
2162 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2163 tid - 1);
2164 if (tid < 0)
2165 return tid;
2166
2167 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2168 if (!pe)
2169 return -1;
2170 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2171 pe->index = tid;
2172
2173 /* Mask all ports */
2174 mvpp2_prs_tcam_port_map_set(pe, 0);
2175 }
2176
2177 /* Update port mask */
2178 mvpp2_prs_tcam_port_set(pe, port, add);
2179
2180 /* Invalidate the entry if no ports are left enabled */
2181 pmap = mvpp2_prs_tcam_port_map_get(pe);
2182 if (pmap == 0) {
2183 if (add) {
2184 kfree(pe);
2185 return -1;
2186 }
2187 mvpp2_prs_hw_inv(priv, pe->index);
2188 priv->prs_shadow[pe->index].valid = false;
2189 kfree(pe);
2190 return 0;
2191 }
2192
2193 /* Continue - set next lookup */
2194 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2195
2196 /* Set match on DA */
2197 len = ETH_ALEN;
2198 while (len--)
2199 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2200
2201 /* Set result info bits */
2202 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2203
2204 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2205 MVPP2_PRS_RI_MAC_ME_MASK);
2206 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2207 MVPP2_PRS_RI_MAC_ME_MASK);
2208
2209 /* Shift to ethertype */
2210 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2211 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2212
2213 /* Update shadow table and hw entry */
2214 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2215 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2216 mvpp2_prs_hw_write(priv, pe);
2217
2218 kfree(pe);
2219
2220 return 0;
2221}
2222
2223static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2224{
2225 int err;
2226
2227 /* Remove old parser entry */
2228 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2229 false);
2230 if (err)
2231 return err;
2232
2233 /* Add new parser entry */
2234 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2235 if (err)
2236 return err;
2237
2238 /* Set addr in the device */
2239 memcpy(port->dev_addr, da, ETH_ALEN);
2240
2241 return 0;
2242}
2243
2244/* Set prs flow for the port */
2245static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2246{
2247 struct mvpp2_prs_entry *pe;
2248 int tid;
2249
2250 pe = mvpp2_prs_flow_find(port->priv, port->id);
2251
2252 /* Such entry not exist */
2253 if (!pe) {
2254 /* Go through the all entires from last to first */
2255 tid = mvpp2_prs_tcam_first_free(port->priv,
2256 MVPP2_PE_LAST_FREE_TID,
2257 MVPP2_PE_FIRST_FREE_TID);
2258 if (tid < 0)
2259 return tid;
2260
2261 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2262 if (!pe)
2263 return -ENOMEM;
2264
2265 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2266 pe->index = tid;
2267
2268 /* Set flow ID*/
2269 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2270 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2271
2272 /* Update shadow table */
2273 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2274 }
2275
2276 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2277 mvpp2_prs_hw_write(port->priv, pe);
2278 kfree(pe);
2279
2280 return 0;
2281}
2282
2283/* Classifier configuration routines */
2284
2285/* Update classification flow table registers */
2286static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2287 struct mvpp2_cls_flow_entry *fe)
2288{
2289 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2290 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2291 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2292 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2293}
2294
2295/* Update classification lookup table register */
2296static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2297 struct mvpp2_cls_lookup_entry *le)
2298{
2299 u32 val;
2300
2301 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2302 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2303 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2304}
2305
2306/* Classifier default initialization */
2307static void mvpp2_cls_init(struct mvpp2 *priv)
2308{
2309 struct mvpp2_cls_lookup_entry le;
2310 struct mvpp2_cls_flow_entry fe;
2311 int index;
2312
2313 /* Enable classifier */
2314 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2315
2316 /* Clear classifier flow table */
2317 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2318 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2319 fe.index = index;
2320 mvpp2_cls_flow_write(priv, &fe);
2321 }
2322
2323 /* Clear classifier lookup table */
2324 le.data = 0;
2325 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2326 le.lkpid = index;
2327 le.way = 0;
2328 mvpp2_cls_lookup_write(priv, &le);
2329
2330 le.way = 1;
2331 mvpp2_cls_lookup_write(priv, &le);
2332 }
2333}
2334
2335static void mvpp2_cls_port_config(struct mvpp2_port *port)
2336{
2337 struct mvpp2_cls_lookup_entry le;
2338 u32 val;
2339
2340 /* Set way for the port */
2341 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2342 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2343 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2344
2345 /* Pick the entry to be accessed in lookup ID decoding table
2346 * according to the way and lkpid.
2347 */
2348 le.lkpid = port->id;
2349 le.way = 0;
2350 le.data = 0;
2351
2352 /* Set initial CPU queue for receiving packets */
2353 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2354 le.data |= port->first_rxq;
2355
2356 /* Disable classification engines */
2357 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2358
2359 /* Update lookup ID table entry */
2360 mvpp2_cls_lookup_write(port->priv, &le);
2361}
2362
2363/* Set CPU queue number for oversize packets */
2364static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2365{
2366 u32 val;
2367
2368 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2369 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2370
2371 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2372 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2373
2374 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2375 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2376 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2377}
2378
2379/* Buffer Manager configuration routines */
2380
2381/* Create pool */
2382static int mvpp2_bm_pool_create(struct udevice *dev,
2383 struct mvpp2 *priv,
2384 struct mvpp2_bm_pool *bm_pool, int size)
2385{
2386 u32 val;
2387
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002388 /* Number of buffer pointers must be a multiple of 16, as per
2389 * hardware constraints
2390 */
2391 if (!IS_ALIGNED(size, 16))
2392 return -EINVAL;
2393
Stefan Roese96c19042016-02-10 07:22:10 +01002394 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002395 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese96c19042016-02-10 07:22:10 +01002396 if (!bm_pool->virt_addr)
2397 return -ENOMEM;
2398
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002399 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2400 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese96c19042016-02-10 07:22:10 +01002401 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2402 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2403 return -ENOMEM;
2404 }
2405
2406 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002407 lower_32_bits(bm_pool->dma_addr));
Stefan Roese96c19042016-02-10 07:22:10 +01002408 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2409
2410 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2411 val |= MVPP2_BM_START_MASK;
2412 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2413
2414 bm_pool->type = MVPP2_BM_FREE;
2415 bm_pool->size = size;
2416 bm_pool->pkt_size = 0;
2417 bm_pool->buf_num = 0;
2418
2419 return 0;
2420}
2421
2422/* Set pool buffer size */
2423static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2424 struct mvpp2_bm_pool *bm_pool,
2425 int buf_size)
2426{
2427 u32 val;
2428
2429 bm_pool->buf_size = buf_size;
2430
2431 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2432 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2433}
2434
2435/* Free all buffers from the pool */
2436static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2437 struct mvpp2_bm_pool *bm_pool)
2438{
2439 bm_pool->buf_num = 0;
2440}
2441
2442/* Cleanup pool */
2443static int mvpp2_bm_pool_destroy(struct udevice *dev,
2444 struct mvpp2 *priv,
2445 struct mvpp2_bm_pool *bm_pool)
2446{
2447 u32 val;
2448
2449 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2450 if (bm_pool->buf_num) {
2451 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2452 return 0;
2453 }
2454
2455 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2456 val |= MVPP2_BM_STOP_MASK;
2457 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2458
2459 return 0;
2460}
2461
2462static int mvpp2_bm_pools_init(struct udevice *dev,
2463 struct mvpp2 *priv)
2464{
2465 int i, err, size;
2466 struct mvpp2_bm_pool *bm_pool;
2467
2468 /* Create all pools with maximum size */
2469 size = MVPP2_BM_POOL_SIZE_MAX;
2470 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2471 bm_pool = &priv->bm_pools[i];
2472 bm_pool->id = i;
2473 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2474 if (err)
2475 goto err_unroll_pools;
2476 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2477 }
2478 return 0;
2479
2480err_unroll_pools:
2481 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2482 for (i = i - 1; i >= 0; i--)
2483 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2484 return err;
2485}
2486
2487static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2488{
2489 int i, err;
2490
2491 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2492 /* Mask BM all interrupts */
2493 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2494 /* Clear BM cause register */
2495 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2496 }
2497
2498 /* Allocate and initialize BM pools */
2499 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2500 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2501 if (!priv->bm_pools)
2502 return -ENOMEM;
2503
2504 err = mvpp2_bm_pools_init(dev, priv);
2505 if (err < 0)
2506 return err;
2507 return 0;
2508}
2509
2510/* Attach long pool to rxq */
2511static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2512 int lrxq, int long_pool)
2513{
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002514 u32 val, mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002515 int prxq;
2516
2517 /* Get queue physical ID */
2518 prxq = port->rxqs[lrxq]->id;
2519
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002520 if (port->priv->hw_version == MVPP21)
2521 mask = MVPP21_RXQ_POOL_LONG_MASK;
2522 else
2523 mask = MVPP22_RXQ_POOL_LONG_MASK;
Stefan Roese96c19042016-02-10 07:22:10 +01002524
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002525 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2526 val &= ~mask;
2527 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002528 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2529}
2530
2531/* Set pool number in a BM cookie */
2532static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2533{
2534 u32 bm;
2535
2536 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2537 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2538
2539 return bm;
2540}
2541
2542/* Get pool number from a BM cookie */
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002543static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese96c19042016-02-10 07:22:10 +01002544{
2545 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2546}
2547
2548/* Release buffer to BM */
2549static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002550 dma_addr_t buf_dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002551 unsigned long buf_phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002552{
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002553 if (port->priv->hw_version == MVPP22) {
2554 u32 val = 0;
2555
2556 if (sizeof(dma_addr_t) == 8)
2557 val |= upper_32_bits(buf_dma_addr) &
2558 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2559
2560 if (sizeof(phys_addr_t) == 8)
2561 val |= (upper_32_bits(buf_phys_addr)
2562 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2563 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2564
2565 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2566 }
2567
Thomas Petazzoni09831762017-02-20 10:37:59 +01002568 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2569 * returned in the "cookie" field of the RX
2570 * descriptor. Instead of storing the virtual address, we
2571 * store the physical address
2572 */
2573 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002574 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002575}
2576
2577/* Refill BM pool */
2578static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002579 dma_addr_t dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002580 phys_addr_t phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002581{
2582 int pool = mvpp2_bm_cookie_pool_get(bm);
2583
Thomas Petazzoni09831762017-02-20 10:37:59 +01002584 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002585}
2586
2587/* Allocate buffers for the pool */
2588static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2589 struct mvpp2_bm_pool *bm_pool, int buf_num)
2590{
2591 int i;
Stefan Roese96c19042016-02-10 07:22:10 +01002592
2593 if (buf_num < 0 ||
2594 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2595 netdev_err(port->dev,
2596 "cannot allocate %d buffers for pool %d\n",
2597 buf_num, bm_pool->id);
2598 return 0;
2599 }
2600
Stefan Roese96c19042016-02-10 07:22:10 +01002601 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002602 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002603 (dma_addr_t)buffer_loc.rx_buffer[i],
2604 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002605
Stefan Roese96c19042016-02-10 07:22:10 +01002606 }
2607
2608 /* Update BM driver with number of buffers added to pool */
2609 bm_pool->buf_num += i;
2610 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2611
2612 return i;
2613}
2614
2615/* Notify the driver that BM pool is being used as specific type and return the
2616 * pool pointer on success
2617 */
2618static struct mvpp2_bm_pool *
2619mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2620 int pkt_size)
2621{
2622 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2623 int num;
2624
2625 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2626 netdev_err(port->dev, "mixing pool types is forbidden\n");
2627 return NULL;
2628 }
2629
2630 if (new_pool->type == MVPP2_BM_FREE)
2631 new_pool->type = type;
2632
2633 /* Allocate buffers in case BM pool is used as long pool, but packet
2634 * size doesn't match MTU or BM pool hasn't being used yet
2635 */
2636 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2637 (new_pool->pkt_size == 0)) {
2638 int pkts_num;
2639
2640 /* Set default buffer number or free all the buffers in case
2641 * the pool is not empty
2642 */
2643 pkts_num = new_pool->buf_num;
2644 if (pkts_num == 0)
2645 pkts_num = type == MVPP2_BM_SWF_LONG ?
2646 MVPP2_BM_LONG_BUF_NUM :
2647 MVPP2_BM_SHORT_BUF_NUM;
2648 else
2649 mvpp2_bm_bufs_free(NULL,
2650 port->priv, new_pool);
2651
2652 new_pool->pkt_size = pkt_size;
2653
2654 /* Allocate buffers for this pool */
2655 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2656 if (num != pkts_num) {
2657 dev_err(dev, "pool %d: %d of %d allocated\n",
2658 new_pool->id, num, pkts_num);
2659 return NULL;
2660 }
2661 }
2662
2663 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2664 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2665
2666 return new_pool;
2667}
2668
2669/* Initialize pools for swf */
2670static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2671{
2672 int rxq;
2673
2674 if (!port->pool_long) {
2675 port->pool_long =
2676 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2677 MVPP2_BM_SWF_LONG,
2678 port->pkt_size);
2679 if (!port->pool_long)
2680 return -ENOMEM;
2681
2682 port->pool_long->port_map |= (1 << port->id);
2683
2684 for (rxq = 0; rxq < rxq_number; rxq++)
2685 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2686 }
2687
2688 return 0;
2689}
2690
2691/* Port configuration routines */
2692
2693static void mvpp2_port_mii_set(struct mvpp2_port *port)
2694{
2695 u32 val;
2696
2697 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2698
2699 switch (port->phy_interface) {
2700 case PHY_INTERFACE_MODE_SGMII:
2701 val |= MVPP2_GMAC_INBAND_AN_MASK;
2702 break;
2703 case PHY_INTERFACE_MODE_RGMII:
2704 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2705 default:
2706 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2707 }
2708
2709 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2710}
2711
2712static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2713{
2714 u32 val;
2715
2716 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2717 val |= MVPP2_GMAC_FC_ADV_EN;
2718 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2719}
2720
2721static void mvpp2_port_enable(struct mvpp2_port *port)
2722{
2723 u32 val;
2724
2725 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2726 val |= MVPP2_GMAC_PORT_EN_MASK;
2727 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2728 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2729}
2730
2731static void mvpp2_port_disable(struct mvpp2_port *port)
2732{
2733 u32 val;
2734
2735 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2736 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2737 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2738}
2739
2740/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2741static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2742{
2743 u32 val;
2744
2745 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2746 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2747 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2748}
2749
2750/* Configure loopback port */
2751static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2752{
2753 u32 val;
2754
2755 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2756
2757 if (port->speed == 1000)
2758 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2759 else
2760 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2761
2762 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2763 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2764 else
2765 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2766
2767 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2768}
2769
2770static void mvpp2_port_reset(struct mvpp2_port *port)
2771{
2772 u32 val;
2773
2774 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2775 ~MVPP2_GMAC_PORT_RESET_MASK;
2776 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2777
2778 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2779 MVPP2_GMAC_PORT_RESET_MASK)
2780 continue;
2781}
2782
2783/* Change maximum receive size of the port */
2784static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2785{
2786 u32 val;
2787
2788 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2789 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2790 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2791 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2792 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2793}
2794
2795/* Set defaults to the MVPP2 port */
2796static void mvpp2_defaults_set(struct mvpp2_port *port)
2797{
2798 int tx_port_num, val, queue, ptxq, lrxq;
2799
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01002800 if (port->priv->hw_version == MVPP21) {
2801 /* Configure port to loopback if needed */
2802 if (port->flags & MVPP2_F_LOOPBACK)
2803 mvpp2_port_loopback_set(port);
Stefan Roese96c19042016-02-10 07:22:10 +01002804
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01002805 /* Update TX FIFO MIN Threshold */
2806 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2807 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2808 /* Min. TX threshold must be less than minimal packet length */
2809 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2810 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2811 }
Stefan Roese96c19042016-02-10 07:22:10 +01002812
2813 /* Disable Legacy WRR, Disable EJP, Release from reset */
2814 tx_port_num = mvpp2_egress_port(port);
2815 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2816 tx_port_num);
2817 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2818
2819 /* Close bandwidth for all queues */
2820 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2821 ptxq = mvpp2_txq_phys(port->id, queue);
2822 mvpp2_write(port->priv,
2823 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2824 }
2825
2826 /* Set refill period to 1 usec, refill tokens
2827 * and bucket size to maximum
2828 */
2829 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2830 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2831 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2832 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2833 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2834 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2835 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2836 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2837
2838 /* Set MaximumLowLatencyPacketSize value to 256 */
2839 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2840 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2841 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2842
2843 /* Enable Rx cache snoop */
2844 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2845 queue = port->rxqs[lrxq]->id;
2846 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2847 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2848 MVPP2_SNOOP_BUF_HDR_MASK;
2849 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2850 }
2851}
2852
2853/* Enable/disable receiving packets */
2854static void mvpp2_ingress_enable(struct mvpp2_port *port)
2855{
2856 u32 val;
2857 int lrxq, queue;
2858
2859 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2860 queue = port->rxqs[lrxq]->id;
2861 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2862 val &= ~MVPP2_RXQ_DISABLE_MASK;
2863 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2864 }
2865}
2866
2867static void mvpp2_ingress_disable(struct mvpp2_port *port)
2868{
2869 u32 val;
2870 int lrxq, queue;
2871
2872 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2873 queue = port->rxqs[lrxq]->id;
2874 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2875 val |= MVPP2_RXQ_DISABLE_MASK;
2876 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2877 }
2878}
2879
2880/* Enable transmit via physical egress queue
2881 * - HW starts take descriptors from DRAM
2882 */
2883static void mvpp2_egress_enable(struct mvpp2_port *port)
2884{
2885 u32 qmap;
2886 int queue;
2887 int tx_port_num = mvpp2_egress_port(port);
2888
2889 /* Enable all initialized TXs. */
2890 qmap = 0;
2891 for (queue = 0; queue < txq_number; queue++) {
2892 struct mvpp2_tx_queue *txq = port->txqs[queue];
2893
2894 if (txq->descs != NULL)
2895 qmap |= (1 << queue);
2896 }
2897
2898 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2899 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2900}
2901
2902/* Disable transmit via physical egress queue
2903 * - HW doesn't take descriptors from DRAM
2904 */
2905static void mvpp2_egress_disable(struct mvpp2_port *port)
2906{
2907 u32 reg_data;
2908 int delay;
2909 int tx_port_num = mvpp2_egress_port(port);
2910
2911 /* Issue stop command for active channels only */
2912 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2913 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2914 MVPP2_TXP_SCHED_ENQ_MASK;
2915 if (reg_data != 0)
2916 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2917 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2918
2919 /* Wait for all Tx activity to terminate. */
2920 delay = 0;
2921 do {
2922 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2923 netdev_warn(port->dev,
2924 "Tx stop timed out, status=0x%08x\n",
2925 reg_data);
2926 break;
2927 }
2928 mdelay(1);
2929 delay++;
2930
2931 /* Check port TX Command register that all
2932 * Tx queues are stopped
2933 */
2934 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2935 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2936}
2937
2938/* Rx descriptors helper methods */
2939
2940/* Get number of Rx descriptors occupied by received packets */
2941static inline int
2942mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2943{
2944 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2945
2946 return val & MVPP2_RXQ_OCCUPIED_MASK;
2947}
2948
2949/* Update Rx queue status with the number of occupied and available
2950 * Rx descriptor slots.
2951 */
2952static inline void
2953mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2954 int used_count, int free_count)
2955{
2956 /* Decrement the number of used descriptors and increment count
2957 * increment the number of free descriptors.
2958 */
2959 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2960
2961 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2962}
2963
2964/* Get pointer to next RX descriptor to be processed by SW */
2965static inline struct mvpp2_rx_desc *
2966mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2967{
2968 int rx_desc = rxq->next_desc_to_proc;
2969
2970 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2971 prefetch(rxq->descs + rxq->next_desc_to_proc);
2972 return rxq->descs + rx_desc;
2973}
2974
2975/* Set rx queue offset */
2976static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2977 int prxq, int offset)
2978{
2979 u32 val;
2980
2981 /* Convert offset from bytes to units of 32 bytes */
2982 offset = offset >> 5;
2983
2984 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2985 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2986
2987 /* Offset is in */
2988 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2989 MVPP2_RXQ_PACKET_OFFSET_MASK);
2990
2991 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2992}
2993
2994/* Obtain BM cookie information from descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01002995static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
2996 struct mvpp2_rx_desc *rx_desc)
Stefan Roese96c19042016-02-10 07:22:10 +01002997{
Stefan Roese96c19042016-02-10 07:22:10 +01002998 int cpu = smp_processor_id();
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01002999 int pool;
3000
3001 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3002 MVPP2_RXD_BM_POOL_ID_MASK) >>
3003 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese96c19042016-02-10 07:22:10 +01003004
3005 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3006 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3007}
3008
3009/* Tx descriptors helper methods */
3010
3011/* Get number of Tx descriptors waiting to be transmitted by HW */
3012static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3013 struct mvpp2_tx_queue *txq)
3014{
3015 u32 val;
3016
3017 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3018 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3019
3020 return val & MVPP2_TXQ_PENDING_MASK;
3021}
3022
3023/* Get pointer to next Tx descriptor to be processed (send) by HW */
3024static struct mvpp2_tx_desc *
3025mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3026{
3027 int tx_desc = txq->next_desc_to_proc;
3028
3029 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3030 return txq->descs + tx_desc;
3031}
3032
3033/* Update HW with number of aggregated Tx descriptors to be sent */
3034static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3035{
3036 /* aggregated access - relevant TXQ number is written in TX desc */
3037 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3038}
3039
3040/* Get number of sent descriptors and decrement counter.
3041 * The number of sent descriptors is returned.
3042 * Per-CPU access
3043 */
3044static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3045 struct mvpp2_tx_queue *txq)
3046{
3047 u32 val;
3048
3049 /* Reading status reg resets transmitted descriptor counter */
3050 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3051
3052 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3053 MVPP2_TRANSMITTED_COUNT_OFFSET;
3054}
3055
3056static void mvpp2_txq_sent_counter_clear(void *arg)
3057{
3058 struct mvpp2_port *port = arg;
3059 int queue;
3060
3061 for (queue = 0; queue < txq_number; queue++) {
3062 int id = port->txqs[queue]->id;
3063
3064 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3065 }
3066}
3067
3068/* Set max sizes for Tx queues */
3069static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3070{
3071 u32 val, size, mtu;
3072 int txq, tx_port_num;
3073
3074 mtu = port->pkt_size * 8;
3075 if (mtu > MVPP2_TXP_MTU_MAX)
3076 mtu = MVPP2_TXP_MTU_MAX;
3077
3078 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3079 mtu = 3 * mtu;
3080
3081 /* Indirect access to registers */
3082 tx_port_num = mvpp2_egress_port(port);
3083 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3084
3085 /* Set MTU */
3086 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3087 val &= ~MVPP2_TXP_MTU_MAX;
3088 val |= mtu;
3089 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3090
3091 /* TXP token size and all TXQs token size must be larger that MTU */
3092 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3093 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3094 if (size < mtu) {
3095 size = mtu;
3096 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3097 val |= size;
3098 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3099 }
3100
3101 for (txq = 0; txq < txq_number; txq++) {
3102 val = mvpp2_read(port->priv,
3103 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3104 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3105
3106 if (size < mtu) {
3107 size = mtu;
3108 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3109 val |= size;
3110 mvpp2_write(port->priv,
3111 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3112 val);
3113 }
3114 }
3115}
3116
3117/* Free Tx queue skbuffs */
3118static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
3119 struct mvpp2_tx_queue *txq,
3120 struct mvpp2_txq_pcpu *txq_pcpu, int num)
3121{
3122 int i;
3123
3124 for (i = 0; i < num; i++)
3125 mvpp2_txq_inc_get(txq_pcpu);
3126}
3127
3128static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
3129 u32 cause)
3130{
3131 int queue = fls(cause) - 1;
3132
3133 return port->rxqs[queue];
3134}
3135
3136static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
3137 u32 cause)
3138{
3139 int queue = fls(cause) - 1;
3140
3141 return port->txqs[queue];
3142}
3143
3144/* Rx/Tx queue initialization/cleanup methods */
3145
3146/* Allocate and initialize descriptors for aggr TXQ */
3147static int mvpp2_aggr_txq_init(struct udevice *dev,
3148 struct mvpp2_tx_queue *aggr_txq,
3149 int desc_num, int cpu,
3150 struct mvpp2 *priv)
3151{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003152 u32 txq_dma;
3153
Stefan Roese96c19042016-02-10 07:22:10 +01003154 /* Allocate memory for TX descriptors */
3155 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003156 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003157 if (!aggr_txq->descs)
3158 return -ENOMEM;
3159
3160 /* Make sure descriptor address is cache line size aligned */
3161 BUG_ON(aggr_txq->descs !=
3162 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3163
3164 aggr_txq->last_desc = aggr_txq->size - 1;
3165
3166 /* Aggr TXQ no reset WA */
3167 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
3168 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
3169
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003170 /* Set Tx descriptors queue starting address indirect
3171 * access
3172 */
3173 if (priv->hw_version == MVPP21)
3174 txq_dma = aggr_txq->descs_dma;
3175 else
3176 txq_dma = aggr_txq->descs_dma >>
3177 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
3178
3179 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003180 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
3181
3182 return 0;
3183}
3184
3185/* Create a specified Rx queue */
3186static int mvpp2_rxq_init(struct mvpp2_port *port,
3187 struct mvpp2_rx_queue *rxq)
3188
3189{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003190 u32 rxq_dma;
3191
Stefan Roese96c19042016-02-10 07:22:10 +01003192 rxq->size = port->rx_ring_size;
3193
3194 /* Allocate memory for RX descriptors */
3195 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003196 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003197 if (!rxq->descs)
3198 return -ENOMEM;
3199
3200 BUG_ON(rxq->descs !=
3201 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3202
3203 rxq->last_desc = rxq->size - 1;
3204
3205 /* Zero occupied and non-occupied counters - direct access */
3206 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3207
3208 /* Set Rx descriptors queue starting address - indirect access */
3209 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003210 if (port->priv->hw_version == MVPP21)
3211 rxq_dma = rxq->descs_dma;
3212 else
3213 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
3214 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003215 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
3216 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
3217
3218 /* Set Offset */
3219 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3220
3221 /* Add number of descriptors ready for receiving packets */
3222 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3223
3224 return 0;
3225}
3226
3227/* Push packets received by the RXQ to BM pool */
3228static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3229 struct mvpp2_rx_queue *rxq)
3230{
3231 int rx_received, i;
3232
3233 rx_received = mvpp2_rxq_received(port, rxq->id);
3234 if (!rx_received)
3235 return;
3236
3237 for (i = 0; i < rx_received; i++) {
3238 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003239 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01003240
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003241 mvpp2_pool_refill(port, bm,
3242 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3243 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese96c19042016-02-10 07:22:10 +01003244 }
3245 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3246}
3247
3248/* Cleanup Rx queue */
3249static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3250 struct mvpp2_rx_queue *rxq)
3251{
3252 mvpp2_rxq_drop_pkts(port, rxq);
3253
3254 rxq->descs = NULL;
3255 rxq->last_desc = 0;
3256 rxq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003257 rxq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003258
3259 /* Clear Rx descriptors queue starting address and size;
3260 * free descriptor number
3261 */
3262 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3263 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3264 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3265 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3266}
3267
3268/* Create and initialize a Tx queue */
3269static int mvpp2_txq_init(struct mvpp2_port *port,
3270 struct mvpp2_tx_queue *txq)
3271{
3272 u32 val;
3273 int cpu, desc, desc_per_txq, tx_port_num;
3274 struct mvpp2_txq_pcpu *txq_pcpu;
3275
3276 txq->size = port->tx_ring_size;
3277
3278 /* Allocate memory for Tx descriptors */
3279 txq->descs = buffer_loc.tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003280 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003281 if (!txq->descs)
3282 return -ENOMEM;
3283
3284 /* Make sure descriptor address is cache line size aligned */
3285 BUG_ON(txq->descs !=
3286 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3287
3288 txq->last_desc = txq->size - 1;
3289
3290 /* Set Tx descriptors queue starting address - indirect access */
3291 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003292 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003293 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3294 MVPP2_TXQ_DESC_SIZE_MASK);
3295 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3296 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3297 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3298 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3299 val &= ~MVPP2_TXQ_PENDING_MASK;
3300 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3301
3302 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3303 * for each existing TXQ.
3304 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3305 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3306 */
3307 desc_per_txq = 16;
3308 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3309 (txq->log_id * desc_per_txq);
3310
3311 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3312 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
Thomas Petazzoni5555f072017-02-16 08:03:37 +01003313 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Stefan Roese96c19042016-02-10 07:22:10 +01003314
3315 /* WRR / EJP configuration - indirect access */
3316 tx_port_num = mvpp2_egress_port(port);
3317 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3318
3319 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3320 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3321 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3322 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3323 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3324
3325 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3326 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3327 val);
3328
3329 for_each_present_cpu(cpu) {
3330 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3331 txq_pcpu->size = txq->size;
3332 }
3333
3334 return 0;
3335}
3336
3337/* Free allocated TXQ resources */
3338static void mvpp2_txq_deinit(struct mvpp2_port *port,
3339 struct mvpp2_tx_queue *txq)
3340{
3341 txq->descs = NULL;
3342 txq->last_desc = 0;
3343 txq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003344 txq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003345
3346 /* Set minimum bandwidth for disabled TXQs */
3347 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3348
3349 /* Set Tx descriptors queue starting address and size */
3350 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3351 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3352 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3353}
3354
3355/* Cleanup Tx ports */
3356static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3357{
3358 struct mvpp2_txq_pcpu *txq_pcpu;
3359 int delay, pending, cpu;
3360 u32 val;
3361
3362 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3363 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3364 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3365 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3366
3367 /* The napi queue has been stopped so wait for all packets
3368 * to be transmitted.
3369 */
3370 delay = 0;
3371 do {
3372 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3373 netdev_warn(port->dev,
3374 "port %d: cleaning queue %d timed out\n",
3375 port->id, txq->log_id);
3376 break;
3377 }
3378 mdelay(1);
3379 delay++;
3380
3381 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3382 } while (pending);
3383
3384 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3385 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3386
3387 for_each_present_cpu(cpu) {
3388 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3389
3390 /* Release all packets */
3391 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3392
3393 /* Reset queue */
3394 txq_pcpu->count = 0;
3395 txq_pcpu->txq_put_index = 0;
3396 txq_pcpu->txq_get_index = 0;
3397 }
3398}
3399
3400/* Cleanup all Tx queues */
3401static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3402{
3403 struct mvpp2_tx_queue *txq;
3404 int queue;
3405 u32 val;
3406
3407 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3408
3409 /* Reset Tx ports and delete Tx queues */
3410 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3411 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3412
3413 for (queue = 0; queue < txq_number; queue++) {
3414 txq = port->txqs[queue];
3415 mvpp2_txq_clean(port, txq);
3416 mvpp2_txq_deinit(port, txq);
3417 }
3418
3419 mvpp2_txq_sent_counter_clear(port);
3420
3421 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3422 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3423}
3424
3425/* Cleanup all Rx queues */
3426static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3427{
3428 int queue;
3429
3430 for (queue = 0; queue < rxq_number; queue++)
3431 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3432}
3433
3434/* Init all Rx queues for port */
3435static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3436{
3437 int queue, err;
3438
3439 for (queue = 0; queue < rxq_number; queue++) {
3440 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3441 if (err)
3442 goto err_cleanup;
3443 }
3444 return 0;
3445
3446err_cleanup:
3447 mvpp2_cleanup_rxqs(port);
3448 return err;
3449}
3450
3451/* Init all tx queues for port */
3452static int mvpp2_setup_txqs(struct mvpp2_port *port)
3453{
3454 struct mvpp2_tx_queue *txq;
3455 int queue, err;
3456
3457 for (queue = 0; queue < txq_number; queue++) {
3458 txq = port->txqs[queue];
3459 err = mvpp2_txq_init(port, txq);
3460 if (err)
3461 goto err_cleanup;
3462 }
3463
3464 mvpp2_txq_sent_counter_clear(port);
3465 return 0;
3466
3467err_cleanup:
3468 mvpp2_cleanup_txqs(port);
3469 return err;
3470}
3471
3472/* Adjust link */
3473static void mvpp2_link_event(struct mvpp2_port *port)
3474{
3475 struct phy_device *phydev = port->phy_dev;
3476 int status_change = 0;
3477 u32 val;
3478
3479 if (phydev->link) {
3480 if ((port->speed != phydev->speed) ||
3481 (port->duplex != phydev->duplex)) {
3482 u32 val;
3483
3484 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3485 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3486 MVPP2_GMAC_CONFIG_GMII_SPEED |
3487 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3488 MVPP2_GMAC_AN_SPEED_EN |
3489 MVPP2_GMAC_AN_DUPLEX_EN);
3490
3491 if (phydev->duplex)
3492 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3493
3494 if (phydev->speed == SPEED_1000)
3495 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3496 else if (phydev->speed == SPEED_100)
3497 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3498
3499 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3500
3501 port->duplex = phydev->duplex;
3502 port->speed = phydev->speed;
3503 }
3504 }
3505
3506 if (phydev->link != port->link) {
3507 if (!phydev->link) {
3508 port->duplex = -1;
3509 port->speed = 0;
3510 }
3511
3512 port->link = phydev->link;
3513 status_change = 1;
3514 }
3515
3516 if (status_change) {
3517 if (phydev->link) {
3518 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3519 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3520 MVPP2_GMAC_FORCE_LINK_DOWN);
3521 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3522 mvpp2_egress_enable(port);
3523 mvpp2_ingress_enable(port);
3524 } else {
3525 mvpp2_ingress_disable(port);
3526 mvpp2_egress_disable(port);
3527 }
3528 }
3529}
3530
3531/* Main RX/TX processing routines */
3532
3533/* Display more error info */
3534static void mvpp2_rx_error(struct mvpp2_port *port,
3535 struct mvpp2_rx_desc *rx_desc)
3536{
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003537 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3538 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01003539
3540 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3541 case MVPP2_RXD_ERR_CRC:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003542 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
3543 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003544 break;
3545 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003546 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
3547 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003548 break;
3549 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003550 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
3551 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003552 break;
3553 }
3554}
3555
3556/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3557static int mvpp2_rx_refill(struct mvpp2_port *port,
3558 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003559 u32 bm, dma_addr_t dma_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01003560{
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003561 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01003562 return 0;
3563}
3564
3565/* Set hw internals when starting port */
3566static void mvpp2_start_dev(struct mvpp2_port *port)
3567{
3568 mvpp2_gmac_max_rx_size_set(port);
3569 mvpp2_txp_max_tx_size_set(port);
3570
3571 mvpp2_port_enable(port);
3572}
3573
3574/* Set hw internals when stopping port */
3575static void mvpp2_stop_dev(struct mvpp2_port *port)
3576{
3577 /* Stop new packets from arriving to RXQs */
3578 mvpp2_ingress_disable(port);
3579
3580 mvpp2_egress_disable(port);
3581 mvpp2_port_disable(port);
3582}
3583
3584static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3585{
3586 struct phy_device *phy_dev;
3587
3588 if (!port->init || port->link == 0) {
3589 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3590 port->phy_interface);
3591 port->phy_dev = phy_dev;
3592 if (!phy_dev) {
3593 netdev_err(port->dev, "cannot connect to phy\n");
3594 return -ENODEV;
3595 }
3596 phy_dev->supported &= PHY_GBIT_FEATURES;
3597 phy_dev->advertising = phy_dev->supported;
3598
3599 port->phy_dev = phy_dev;
3600 port->link = 0;
3601 port->duplex = 0;
3602 port->speed = 0;
3603
3604 phy_config(phy_dev);
3605 phy_startup(phy_dev);
3606 if (!phy_dev->link) {
3607 printf("%s: No link\n", phy_dev->dev->name);
3608 return -1;
3609 }
3610
3611 port->init = 1;
3612 } else {
3613 mvpp2_egress_enable(port);
3614 mvpp2_ingress_enable(port);
3615 }
3616
3617 return 0;
3618}
3619
3620static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3621{
3622 unsigned char mac_bcast[ETH_ALEN] = {
3623 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3624 int err;
3625
3626 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3627 if (err) {
3628 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3629 return err;
3630 }
3631 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3632 port->dev_addr, true);
3633 if (err) {
3634 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3635 return err;
3636 }
3637 err = mvpp2_prs_def_flow(port);
3638 if (err) {
3639 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3640 return err;
3641 }
3642
3643 /* Allocate the Rx/Tx queues */
3644 err = mvpp2_setup_rxqs(port);
3645 if (err) {
3646 netdev_err(port->dev, "cannot allocate Rx queues\n");
3647 return err;
3648 }
3649
3650 err = mvpp2_setup_txqs(port);
3651 if (err) {
3652 netdev_err(port->dev, "cannot allocate Tx queues\n");
3653 return err;
3654 }
3655
3656 err = mvpp2_phy_connect(dev, port);
3657 if (err < 0)
3658 return err;
3659
3660 mvpp2_link_event(port);
3661
3662 mvpp2_start_dev(port);
3663
3664 return 0;
3665}
3666
3667/* No Device ops here in U-Boot */
3668
3669/* Driver initialization */
3670
3671static void mvpp2_port_power_up(struct mvpp2_port *port)
3672{
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01003673 struct mvpp2 *priv = port->priv;
3674
Stefan Roese96c19042016-02-10 07:22:10 +01003675 mvpp2_port_mii_set(port);
3676 mvpp2_port_periodic_xon_disable(port);
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01003677 if (priv->hw_version == MVPP21)
3678 mvpp2_port_fc_adv_enable(port);
Stefan Roese96c19042016-02-10 07:22:10 +01003679 mvpp2_port_reset(port);
3680}
3681
3682/* Initialize port HW */
3683static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3684{
3685 struct mvpp2 *priv = port->priv;
3686 struct mvpp2_txq_pcpu *txq_pcpu;
3687 int queue, cpu, err;
3688
3689 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3690 return -EINVAL;
3691
3692 /* Disable port */
3693 mvpp2_egress_disable(port);
3694 mvpp2_port_disable(port);
3695
3696 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3697 GFP_KERNEL);
3698 if (!port->txqs)
3699 return -ENOMEM;
3700
3701 /* Associate physical Tx queues to this port and initialize.
3702 * The mapping is predefined.
3703 */
3704 for (queue = 0; queue < txq_number; queue++) {
3705 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3706 struct mvpp2_tx_queue *txq;
3707
3708 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3709 if (!txq)
3710 return -ENOMEM;
3711
3712 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3713 GFP_KERNEL);
3714 if (!txq->pcpu)
3715 return -ENOMEM;
3716
3717 txq->id = queue_phy_id;
3718 txq->log_id = queue;
3719 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3720 for_each_present_cpu(cpu) {
3721 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3722 txq_pcpu->cpu = cpu;
3723 }
3724
3725 port->txqs[queue] = txq;
3726 }
3727
3728 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3729 GFP_KERNEL);
3730 if (!port->rxqs)
3731 return -ENOMEM;
3732
3733 /* Allocate and initialize Rx queue for this port */
3734 for (queue = 0; queue < rxq_number; queue++) {
3735 struct mvpp2_rx_queue *rxq;
3736
3737 /* Map physical Rx queue to port's logical Rx queue */
3738 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3739 if (!rxq)
3740 return -ENOMEM;
3741 /* Map this Rx queue to a physical queue */
3742 rxq->id = port->first_rxq + queue;
3743 rxq->port = port->id;
3744 rxq->logic_rxq = queue;
3745
3746 port->rxqs[queue] = rxq;
3747 }
3748
3749 /* Configure Rx queue group interrupt for this port */
3750 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3751
3752 /* Create Rx descriptor rings */
3753 for (queue = 0; queue < rxq_number; queue++) {
3754 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3755
3756 rxq->size = port->rx_ring_size;
3757 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3758 rxq->time_coal = MVPP2_RX_COAL_USEC;
3759 }
3760
3761 mvpp2_ingress_disable(port);
3762
3763 /* Port default configuration */
3764 mvpp2_defaults_set(port);
3765
3766 /* Port's classifier configuration */
3767 mvpp2_cls_oversize_rxq_set(port);
3768 mvpp2_cls_port_config(port);
3769
3770 /* Provide an initial Rx packet size */
3771 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3772
3773 /* Initialize pools for swf */
3774 err = mvpp2_swf_bm_pool_init(port);
3775 if (err)
3776 return err;
3777
3778 return 0;
3779}
3780
3781/* Ports initialization */
3782static int mvpp2_port_probe(struct udevice *dev,
3783 struct mvpp2_port *port,
3784 int port_node,
3785 struct mvpp2 *priv,
3786 int *next_first_rxq)
3787{
3788 int phy_node;
3789 u32 id;
3790 u32 phyaddr;
3791 const char *phy_mode_str;
3792 int phy_mode = -1;
3793 int priv_common_regs_num = 2;
3794 int err;
3795
3796 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3797 if (phy_node < 0) {
3798 dev_err(&pdev->dev, "missing phy\n");
3799 return -ENODEV;
3800 }
3801
3802 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3803 if (phy_mode_str)
3804 phy_mode = phy_get_interface_by_name(phy_mode_str);
3805 if (phy_mode == -1) {
3806 dev_err(&pdev->dev, "incorrect phy mode\n");
3807 return -EINVAL;
3808 }
3809
3810 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3811 if (id == -1) {
3812 dev_err(&pdev->dev, "missing port-id value\n");
3813 return -EINVAL;
3814 }
3815
3816 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3817
3818 port->priv = priv;
3819 port->id = id;
3820 port->first_rxq = *next_first_rxq;
3821 port->phy_node = phy_node;
3822 port->phy_interface = phy_mode;
3823 port->phyaddr = phyaddr;
3824
Thomas Petazzoni5555f072017-02-16 08:03:37 +01003825 if (priv->hw_version == MVPP21) {
3826 port->base = (void __iomem *)dev_get_addr_index(
3827 dev->parent, priv_common_regs_num + id);
3828 if (IS_ERR(port->base))
3829 return PTR_ERR(port->base);
3830 } else {
3831 u32 gop_id;
3832
3833 gop_id = fdtdec_get_int(gd->fdt_blob, port_node,
3834 "gop-port-id", -1);
3835 if (id == -1) {
3836 dev_err(&pdev->dev, "missing gop-port-id value\n");
3837 return -EINVAL;
3838 }
3839
3840 port->base = priv->iface_base + MVPP22_PORT_BASE +
3841 gop_id * MVPP22_PORT_OFFSET;
3842 }
Stefan Roese96c19042016-02-10 07:22:10 +01003843
3844 port->tx_ring_size = MVPP2_MAX_TXD;
3845 port->rx_ring_size = MVPP2_MAX_RXD;
3846
3847 err = mvpp2_port_init(dev, port);
3848 if (err < 0) {
3849 dev_err(&pdev->dev, "failed to init port %d\n", id);
3850 return err;
3851 }
3852 mvpp2_port_power_up(port);
3853
3854 /* Increment the first Rx queue number to be used by the next port */
3855 *next_first_rxq += CONFIG_MV_ETH_RXQ;
3856 priv->port_list[id] = port;
3857 return 0;
3858}
3859
3860/* Initialize decoding windows */
3861static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3862 struct mvpp2 *priv)
3863{
3864 u32 win_enable;
3865 int i;
3866
3867 for (i = 0; i < 6; i++) {
3868 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3869 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3870
3871 if (i < 4)
3872 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3873 }
3874
3875 win_enable = 0;
3876
3877 for (i = 0; i < dram->num_cs; i++) {
3878 const struct mbus_dram_window *cs = dram->cs + i;
3879
3880 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3881 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3882 dram->mbus_dram_target_id);
3883
3884 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3885 (cs->size - 1) & 0xffff0000);
3886
3887 win_enable |= (1 << i);
3888 }
3889
3890 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3891}
3892
3893/* Initialize Rx FIFO's */
3894static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3895{
3896 int port;
3897
3898 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3899 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3900 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3901 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3902 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3903 }
3904
3905 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3906 MVPP2_RX_FIFO_PORT_MIN_PKT);
3907 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3908}
3909
Thomas Petazzonica560ab2017-02-16 08:41:07 +01003910static void mvpp2_axi_init(struct mvpp2 *priv)
3911{
3912 u32 val, rdval, wrval;
3913
3914 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
3915
3916 /* AXI Bridge Configuration */
3917
3918 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
3919 << MVPP22_AXI_ATTR_CACHE_OFFS;
3920 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3921 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
3922
3923 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
3924 << MVPP22_AXI_ATTR_CACHE_OFFS;
3925 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3926 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
3927
3928 /* BM */
3929 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
3930 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
3931
3932 /* Descriptors */
3933 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
3934 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
3935 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
3936 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
3937
3938 /* Buffer Data */
3939 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
3940 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
3941
3942 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
3943 << MVPP22_AXI_CODE_CACHE_OFFS;
3944 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
3945 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3946 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
3947 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
3948
3949 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
3950 << MVPP22_AXI_CODE_CACHE_OFFS;
3951 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3952 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3953
3954 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
3955
3956 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
3957 << MVPP22_AXI_CODE_CACHE_OFFS;
3958 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3959 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3960
3961 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
3962}
3963
Stefan Roese96c19042016-02-10 07:22:10 +01003964/* Initialize network controller common part HW */
3965static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3966{
3967 const struct mbus_dram_target_info *dram_target_info;
3968 int err, i;
3969 u32 val;
3970
3971 /* Checks for hardware constraints (U-Boot uses only one rxq) */
3972 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3973 dev_err(&pdev->dev, "invalid queue size parameter\n");
3974 return -EINVAL;
3975 }
3976
3977 /* MBUS windows configuration */
3978 dram_target_info = mvebu_mbus_dram_info();
3979 if (dram_target_info)
3980 mvpp2_conf_mbus_windows(dram_target_info, priv);
3981
Thomas Petazzonica560ab2017-02-16 08:41:07 +01003982 if (priv->hw_version == MVPP22)
3983 mvpp2_axi_init(priv);
3984
Stefan Roese96c19042016-02-10 07:22:10 +01003985 /* Disable HW PHY polling */
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01003986 if (priv->hw_version == MVPP21) {
3987 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3988 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3989 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3990 } else {
3991 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
3992 val &= ~MVPP22_SMI_POLLING_EN;
3993 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
3994 }
Stefan Roese96c19042016-02-10 07:22:10 +01003995
3996 /* Allocate and initialize aggregated TXQs */
3997 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3998 sizeof(struct mvpp2_tx_queue),
3999 GFP_KERNEL);
4000 if (!priv->aggr_txqs)
4001 return -ENOMEM;
4002
4003 for_each_present_cpu(i) {
4004 priv->aggr_txqs[i].id = i;
4005 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
4006 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
4007 MVPP2_AGGR_TXQ_SIZE, i, priv);
4008 if (err < 0)
4009 return err;
4010 }
4011
4012 /* Rx Fifo Init */
4013 mvpp2_rx_fifo_init(priv);
4014
4015 /* Reset Rx queue group interrupt configuration */
4016 for (i = 0; i < MVPP2_MAX_PORTS; i++)
4017 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
4018 CONFIG_MV_ETH_RXQ);
4019
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01004020 if (priv->hw_version == MVPP21)
4021 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
4022 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Stefan Roese96c19042016-02-10 07:22:10 +01004023
4024 /* Allow cache snoop when transmiting packets */
4025 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
4026
4027 /* Buffer Manager initialization */
4028 err = mvpp2_bm_init(dev, priv);
4029 if (err < 0)
4030 return err;
4031
4032 /* Parser default initialization */
4033 err = mvpp2_prs_default_init(dev, priv);
4034 if (err < 0)
4035 return err;
4036
4037 /* Classifier default initialization */
4038 mvpp2_cls_init(priv);
4039
4040 return 0;
4041}
4042
4043/* SMI / MDIO functions */
4044
4045static int smi_wait_ready(struct mvpp2 *priv)
4046{
4047 u32 timeout = MVPP2_SMI_TIMEOUT;
4048 u32 smi_reg;
4049
4050 /* wait till the SMI is not busy */
4051 do {
4052 /* read smi register */
4053 smi_reg = readl(priv->lms_base + MVPP2_SMI);
4054 if (timeout-- == 0) {
4055 printf("Error: SMI busy timeout\n");
4056 return -EFAULT;
4057 }
4058 } while (smi_reg & MVPP2_SMI_BUSY);
4059
4060 return 0;
4061}
4062
4063/*
4064 * mpp2_mdio_read - miiphy_read callback function.
4065 *
4066 * Returns 16bit phy register value, or 0xffff on error
4067 */
4068static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
4069{
4070 struct mvpp2 *priv = bus->priv;
4071 u32 smi_reg;
4072 u32 timeout;
4073
4074 /* check parameters */
4075 if (addr > MVPP2_PHY_ADDR_MASK) {
4076 printf("Error: Invalid PHY address %d\n", addr);
4077 return -EFAULT;
4078 }
4079
4080 if (reg > MVPP2_PHY_REG_MASK) {
4081 printf("Err: Invalid register offset %d\n", reg);
4082 return -EFAULT;
4083 }
4084
4085 /* wait till the SMI is not busy */
4086 if (smi_wait_ready(priv) < 0)
4087 return -EFAULT;
4088
4089 /* fill the phy address and regiser offset and read opcode */
4090 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4091 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
4092 | MVPP2_SMI_OPCODE_READ;
4093
4094 /* write the smi register */
4095 writel(smi_reg, priv->lms_base + MVPP2_SMI);
4096
4097 /* wait till read value is ready */
4098 timeout = MVPP2_SMI_TIMEOUT;
4099
4100 do {
4101 /* read smi register */
4102 smi_reg = readl(priv->lms_base + MVPP2_SMI);
4103 if (timeout-- == 0) {
4104 printf("Err: SMI read ready timeout\n");
4105 return -EFAULT;
4106 }
4107 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
4108
4109 /* Wait for the data to update in the SMI register */
4110 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
4111 ;
4112
4113 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
4114}
4115
4116/*
4117 * mpp2_mdio_write - miiphy_write callback function.
4118 *
4119 * Returns 0 if write succeed, -EINVAL on bad parameters
4120 * -ETIME on timeout
4121 */
4122static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
4123 u16 value)
4124{
4125 struct mvpp2 *priv = bus->priv;
4126 u32 smi_reg;
4127
4128 /* check parameters */
4129 if (addr > MVPP2_PHY_ADDR_MASK) {
4130 printf("Error: Invalid PHY address %d\n", addr);
4131 return -EFAULT;
4132 }
4133
4134 if (reg > MVPP2_PHY_REG_MASK) {
4135 printf("Err: Invalid register offset %d\n", reg);
4136 return -EFAULT;
4137 }
4138
4139 /* wait till the SMI is not busy */
4140 if (smi_wait_ready(priv) < 0)
4141 return -EFAULT;
4142
4143 /* fill the phy addr and reg offset and write opcode and data */
4144 smi_reg = value << MVPP2_SMI_DATA_OFFS;
4145 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4146 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
4147 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
4148
4149 /* write the smi register */
4150 writel(smi_reg, priv->lms_base + MVPP2_SMI);
4151
4152 return 0;
4153}
4154
4155static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
4156{
4157 struct mvpp2_port *port = dev_get_priv(dev);
4158 struct mvpp2_rx_desc *rx_desc;
4159 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004160 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01004161 u32 bm, rx_status;
4162 int pool, rx_bytes, err;
4163 int rx_received;
4164 struct mvpp2_rx_queue *rxq;
4165 u32 cause_rx_tx, cause_rx, cause_misc;
4166 u8 *data;
4167
4168 cause_rx_tx = mvpp2_read(port->priv,
4169 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4170 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4171 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4172 if (!cause_rx_tx && !cause_misc)
4173 return 0;
4174
4175 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4176
4177 /* Process RX packets */
4178 cause_rx |= port->pending_cause_rx;
4179 rxq = mvpp2_get_rx_queue(port, cause_rx);
4180
4181 /* Get number of received packets and clamp the to-do */
4182 rx_received = mvpp2_rxq_received(port, rxq->id);
4183
4184 /* Return if no packets are received */
4185 if (!rx_received)
4186 return 0;
4187
4188 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004189 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
4190 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
4191 rx_bytes -= MVPP2_MH_SIZE;
4192 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004193
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004194 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004195 pool = mvpp2_bm_cookie_pool_get(bm);
4196 bm_pool = &port->priv->bm_pools[pool];
4197
Stefan Roese96c19042016-02-10 07:22:10 +01004198 /* In case of an error, release the requested buffer pointer
4199 * to the Buffer Manager. This request process is controlled
4200 * by the hardware, and the information about the buffer is
4201 * comprised by the RX descriptor.
4202 */
4203 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
4204 mvpp2_rx_error(port, rx_desc);
4205 /* Return the buffer to the pool */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004206 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01004207 return 0;
4208 }
4209
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004210 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01004211 if (err) {
4212 netdev_err(port->dev, "failed to refill BM pools\n");
4213 return 0;
4214 }
4215
4216 /* Update Rx queue management counters */
4217 mb();
4218 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
4219
4220 /* give packet to stack - skip on first n bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004221 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese96c19042016-02-10 07:22:10 +01004222
4223 if (rx_bytes <= 0)
4224 return 0;
4225
4226 /*
4227 * No cache invalidation needed here, since the rx_buffer's are
4228 * located in a uncached memory region
4229 */
4230 *packetp = data;
4231
4232 return rx_bytes;
4233}
4234
4235/* Drain Txq */
4236static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4237 int enable)
4238{
4239 u32 val;
4240
4241 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4242 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4243 if (enable)
4244 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4245 else
4246 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4247 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4248}
4249
4250static int mvpp2_send(struct udevice *dev, void *packet, int length)
4251{
4252 struct mvpp2_port *port = dev_get_priv(dev);
4253 struct mvpp2_tx_queue *txq, *aggr_txq;
4254 struct mvpp2_tx_desc *tx_desc;
4255 int tx_done;
4256 int timeout;
4257
4258 txq = port->txqs[0];
4259 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
4260
4261 /* Get a descriptor for the first part of the packet */
4262 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004263 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4264 mvpp2_txdesc_size_set(port, tx_desc, length);
4265 mvpp2_txdesc_offset_set(port, tx_desc,
4266 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
4267 mvpp2_txdesc_dma_addr_set(port, tx_desc,
4268 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese96c19042016-02-10 07:22:10 +01004269 /* First and Last descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004270 mvpp2_txdesc_cmd_set(port, tx_desc,
4271 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
4272 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese96c19042016-02-10 07:22:10 +01004273
4274 /* Flush tx data */
Stefan Roeseb4268e22017-02-16 13:58:37 +01004275 flush_dcache_range((unsigned long)packet,
4276 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese96c19042016-02-10 07:22:10 +01004277
4278 /* Enable transmit */
4279 mb();
4280 mvpp2_aggr_txq_pend_desc_add(port, 1);
4281
4282 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4283
4284 timeout = 0;
4285 do {
4286 if (timeout++ > 10000) {
4287 printf("timeout: packet not sent from aggregated to phys TXQ\n");
4288 return 0;
4289 }
4290 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
4291 } while (tx_done);
4292
4293 /* Enable TXQ drain */
4294 mvpp2_txq_drain(port, txq, 1);
4295
4296 timeout = 0;
4297 do {
4298 if (timeout++ > 10000) {
4299 printf("timeout: packet not sent\n");
4300 return 0;
4301 }
4302 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4303 } while (!tx_done);
4304
4305 /* Disable TXQ drain */
4306 mvpp2_txq_drain(port, txq, 0);
4307
4308 return 0;
4309}
4310
4311static int mvpp2_start(struct udevice *dev)
4312{
4313 struct eth_pdata *pdata = dev_get_platdata(dev);
4314 struct mvpp2_port *port = dev_get_priv(dev);
4315
4316 /* Load current MAC address */
4317 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4318
4319 /* Reconfigure parser accept the original MAC address */
4320 mvpp2_prs_update_mac_da(port, port->dev_addr);
4321
4322 mvpp2_port_power_up(port);
4323
4324 mvpp2_open(dev, port);
4325
4326 return 0;
4327}
4328
4329static void mvpp2_stop(struct udevice *dev)
4330{
4331 struct mvpp2_port *port = dev_get_priv(dev);
4332
4333 mvpp2_stop_dev(port);
4334 mvpp2_cleanup_rxqs(port);
4335 mvpp2_cleanup_txqs(port);
4336}
4337
4338static int mvpp2_probe(struct udevice *dev)
4339{
4340 struct mvpp2_port *port = dev_get_priv(dev);
4341 struct mvpp2 *priv = dev_get_priv(dev->parent);
4342 int err;
4343
4344 /* Initialize network controller */
4345 err = mvpp2_init(dev, priv);
4346 if (err < 0) {
4347 dev_err(&pdev->dev, "failed to initialize controller\n");
4348 return err;
4349 }
4350
Simon Glassdd79d6e2017-01-17 16:52:55 -07004351 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv,
Stefan Roese96c19042016-02-10 07:22:10 +01004352 &buffer_loc.first_rxq);
4353}
4354
4355static const struct eth_ops mvpp2_ops = {
4356 .start = mvpp2_start,
4357 .send = mvpp2_send,
4358 .recv = mvpp2_recv,
4359 .stop = mvpp2_stop,
4360};
4361
4362static struct driver mvpp2_driver = {
4363 .name = "mvpp2",
4364 .id = UCLASS_ETH,
4365 .probe = mvpp2_probe,
4366 .ops = &mvpp2_ops,
4367 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4368 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4369};
4370
4371/*
4372 * Use a MISC device to bind the n instances (child nodes) of the
4373 * network base controller in UCLASS_ETH.
4374 */
4375static int mvpp2_base_probe(struct udevice *dev)
4376{
4377 struct mvpp2 *priv = dev_get_priv(dev);
4378 struct mii_dev *bus;
4379 void *bd_space;
4380 u32 size = 0;
4381 int i;
4382
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01004383 /* Save hw-version */
4384 priv->hw_version = dev_get_driver_data(dev);
4385
Stefan Roese96c19042016-02-10 07:22:10 +01004386 /*
4387 * U-Boot special buffer handling:
4388 *
4389 * Allocate buffer area for descs and rx_buffers. This is only
4390 * done once for all interfaces. As only one interface can
4391 * be active. Make this area DMA-safe by disabling the D-cache
4392 */
4393
4394 /* Align buffer area for descs and rx_buffers to 1MiB */
4395 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesefeb0b332017-02-15 12:46:18 +01004396 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4397 BD_SPACE, DCACHE_OFF);
Stefan Roese96c19042016-02-10 07:22:10 +01004398
4399 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4400 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4401
Stefan Roesefeb0b332017-02-15 12:46:18 +01004402 buffer_loc.tx_descs =
4403 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004404 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4405
Stefan Roesefeb0b332017-02-15 12:46:18 +01004406 buffer_loc.rx_descs =
4407 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004408 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4409
4410 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004411 buffer_loc.bm_pool[i] =
4412 (unsigned long *)((unsigned long)bd_space + size);
Thomas Petazzoni3520a332017-02-20 11:29:16 +01004413 if (priv->hw_version == MVPP21)
4414 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
4415 else
4416 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
Stefan Roese96c19042016-02-10 07:22:10 +01004417 }
4418
4419 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004420 buffer_loc.rx_buffer[i] =
4421 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004422 size += RX_BUFFER_SIZE;
4423 }
4424
4425 /* Save base addresses for later use */
4426 priv->base = (void *)dev_get_addr_index(dev, 0);
4427 if (IS_ERR(priv->base))
4428 return PTR_ERR(priv->base);
4429
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004430 if (priv->hw_version == MVPP21) {
4431 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4432 if (IS_ERR(priv->lms_base))
4433 return PTR_ERR(priv->lms_base);
4434 } else {
4435 priv->iface_base = (void *)dev_get_addr_index(dev, 1);
4436 if (IS_ERR(priv->iface_base))
4437 return PTR_ERR(priv->iface_base);
4438 }
Stefan Roese96c19042016-02-10 07:22:10 +01004439
4440 /* Finally create and register the MDIO bus driver */
4441 bus = mdio_alloc();
4442 if (!bus) {
4443 printf("Failed to allocate MDIO bus\n");
4444 return -ENOMEM;
4445 }
4446
4447 bus->read = mpp2_mdio_read;
4448 bus->write = mpp2_mdio_write;
4449 snprintf(bus->name, sizeof(bus->name), dev->name);
4450 bus->priv = (void *)priv;
4451 priv->bus = bus;
4452
4453 return mdio_register(bus);
4454}
4455
4456static int mvpp2_base_bind(struct udevice *parent)
4457{
4458 const void *blob = gd->fdt_blob;
Simon Glassdd79d6e2017-01-17 16:52:55 -07004459 int node = dev_of_offset(parent);
Stefan Roese96c19042016-02-10 07:22:10 +01004460 struct uclass_driver *drv;
4461 struct udevice *dev;
4462 struct eth_pdata *plat;
4463 char *name;
4464 int subnode;
4465 u32 id;
4466
4467 /* Lookup eth driver */
4468 drv = lists_uclass_lookup(UCLASS_ETH);
4469 if (!drv) {
4470 puts("Cannot find eth driver\n");
4471 return -ENOENT;
4472 }
4473
Simon Glass499c29e2016-10-02 17:59:29 -06004474 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roese96c19042016-02-10 07:22:10 +01004475 /* Skip disabled ports */
4476 if (!fdtdec_get_is_enabled(blob, subnode))
4477 continue;
4478
4479 plat = calloc(1, sizeof(*plat));
4480 if (!plat)
4481 return -ENOMEM;
4482
4483 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4484
4485 name = calloc(1, 16);
4486 sprintf(name, "mvpp2-%d", id);
4487
4488 /* Create child device UCLASS_ETH and bind it */
4489 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glassdd79d6e2017-01-17 16:52:55 -07004490 dev_set_of_offset(dev, subnode);
Stefan Roese96c19042016-02-10 07:22:10 +01004491 }
4492
4493 return 0;
4494}
4495
4496static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01004497 {
4498 .compatible = "marvell,armada-375-pp2",
4499 .data = MVPP21,
4500 },
Stefan Roese96c19042016-02-10 07:22:10 +01004501 { }
4502};
4503
4504U_BOOT_DRIVER(mvpp2_base) = {
4505 .name = "mvpp2_base",
4506 .id = UCLASS_MISC,
4507 .of_match = mvpp2_ids,
4508 .bind = mvpp2_base_bind,
4509 .probe = mvpp2_base_probe,
4510 .priv_auto_alloc_size = sizeof(struct mvpp2),
4511};