blob: 17ce0e9f7de39ba321723a73b8758e8b8e4db0e7 [file] [log] [blame]
Stefan Roese96c19042016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <dm/device-internal.h>
19#include <dm/lists.h>
20#include <net.h>
21#include <netdev.h>
22#include <config.h>
23#include <malloc.h>
24#include <asm/io.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090025#include <linux/errno.h>
Stefan Roese96c19042016-02-10 07:22:10 +010026#include <phy.h>
27#include <miiphy.h>
28#include <watchdog.h>
29#include <asm/arch/cpu.h>
30#include <asm/arch/soc.h>
31#include <linux/compat.h>
32#include <linux/mbus.h>
33
34DECLARE_GLOBAL_DATA_PTR;
35
36/* Some linux -> U-Boot compatibility stuff */
37#define netdev_err(dev, fmt, args...) \
38 printf(fmt, ##args)
39#define netdev_warn(dev, fmt, args...) \
40 printf(fmt, ##args)
41#define netdev_info(dev, fmt, args...) \
42 printf(fmt, ##args)
43#define netdev_dbg(dev, fmt, args...) \
44 printf(fmt, ##args)
45
46#define ETH_ALEN 6 /* Octets in one ethernet addr */
47
48#define __verify_pcpu_ptr(ptr) \
49do { \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
51 (void)__vpp_verify; \
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p) \
55({ \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
58})
59
60#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id() 0
62#define num_present_cpus() 1
63#define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
68#define CONFIG_NR_CPUS 1
69#define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
70
71/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72#define WRAP (2 + ETH_HLEN + 4 + 32)
73#define MTU 1500
74#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
75
76#define MVPP2_SMI_TIMEOUT 10000
77
78/* RX Fifo Registers */
79#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82#define MVPP2_RX_FIFO_INIT_REG 0x64
83
84/* RX DMA Top Registers */
85#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89#define MVPP2_POOL_BUF_SIZE_OFFSET 5
90#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93#define MVPP2_RXQ_POOL_SHORT_OFFS 20
94#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
95#define MVPP2_RXQ_POOL_LONG_OFFS 24
96#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
97#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
98#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
99#define MVPP2_RXQ_DISABLE_MASK BIT(31)
100
101/* Parser Registers */
102#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
103#define MVPP2_PRS_PORT_LU_MAX 0xf
104#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
105#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
106#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
107#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
108#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
109#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
110#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
111#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
112#define MVPP2_PRS_TCAM_IDX_REG 0x1100
113#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
114#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
115#define MVPP2_PRS_SRAM_IDX_REG 0x1200
116#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
117#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
118#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
119
120/* Classifier Registers */
121#define MVPP2_CLS_MODE_REG 0x1800
122#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
123#define MVPP2_CLS_PORT_WAY_REG 0x1810
124#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
125#define MVPP2_CLS_LKP_INDEX_REG 0x1814
126#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
127#define MVPP2_CLS_LKP_TBL_REG 0x1818
128#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
129#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
130#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
131#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
132#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
133#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
134#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
135#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
136#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
137#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
138#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
139#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
140
141/* Descriptor Manager Top Registers */
142#define MVPP2_RXQ_NUM_REG 0x2040
143#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
144#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
145#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
146#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
147#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
148#define MVPP2_RXQ_NUM_NEW_OFFSET 16
149#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
150#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
151#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
152#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
153#define MVPP2_RXQ_THRESH_REG 0x204c
154#define MVPP2_OCCUPIED_THRESH_OFFSET 0
155#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
156#define MVPP2_RXQ_INDEX_REG 0x2050
157#define MVPP2_TXQ_NUM_REG 0x2080
158#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
159#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
160#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
161#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
162#define MVPP2_TXQ_THRESH_REG 0x2094
163#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
164#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
165#define MVPP2_TXQ_INDEX_REG 0x2098
166#define MVPP2_TXQ_PREF_BUF_REG 0x209c
167#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
168#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
169#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
170#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
171#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
172#define MVPP2_TXQ_PENDING_REG 0x20a0
173#define MVPP2_TXQ_PENDING_MASK 0x3fff
174#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
175#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
176#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
177#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
178#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
179#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
180#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
181#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
182#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
183#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
184#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
185#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
186#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
187#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
188#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
189#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
190
191/* MBUS bridge registers */
192#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
193#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
194#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
195#define MVPP2_BASE_ADDR_ENABLE 0x4060
196
197/* Interrupt Cause and Mask registers */
198#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
199#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
200#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
201#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
202#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
203#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
204#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
205#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
206#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
207#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
208#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
209#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
210#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
211#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
212#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
213#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
214#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
215#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
216#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
217#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
218
219/* Buffer Manager registers */
220#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
221#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
222#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
223#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
224#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
225#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
226#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
227#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
228#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
229#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
230#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
231#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
232#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
233#define MVPP2_BM_START_MASK BIT(0)
234#define MVPP2_BM_STOP_MASK BIT(1)
235#define MVPP2_BM_STATE_MASK BIT(4)
236#define MVPP2_BM_LOW_THRESH_OFFS 8
237#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
238#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
239 MVPP2_BM_LOW_THRESH_OFFS)
240#define MVPP2_BM_HIGH_THRESH_OFFS 16
241#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
242#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
243 MVPP2_BM_HIGH_THRESH_OFFS)
244#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
245#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
246#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
247#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
248#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
249#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
250#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
251#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
252#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
253#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
254#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
255#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
256#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
257#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
258#define MVPP2_BM_VIRT_RLS_REG 0x64c0
259#define MVPP2_BM_MC_RLS_REG 0x64c4
260#define MVPP2_BM_MC_ID_MASK 0xfff
261#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
262
263/* TX Scheduler registers */
264#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
265#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
266#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
267#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
268#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
269#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
270#define MVPP2_TXP_SCHED_MTU_REG 0x801c
271#define MVPP2_TXP_MTU_MAX 0x7FFFF
272#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
273#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
274#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
275#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
276#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
277#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
278#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
279#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
280#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
281#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
282#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
283#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
284#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
285#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
286
287/* TX general registers */
288#define MVPP2_TX_SNOOP_REG 0x8800
289#define MVPP2_TX_PORT_FLUSH_REG 0x8810
290#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
291
292/* LMS registers */
293#define MVPP2_SRC_ADDR_MIDDLE 0x24
294#define MVPP2_SRC_ADDR_HIGH 0x28
295#define MVPP2_PHY_AN_CFG0_REG 0x34
296#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese96c19042016-02-10 07:22:10 +0100297#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoniebbe76f2017-02-15 12:16:23 +0100298#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese96c19042016-02-10 07:22:10 +0100299
300/* Per-port registers */
301#define MVPP2_GMAC_CTRL_0_REG 0x0
302#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
303#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
304#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
305#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
306#define MVPP2_GMAC_CTRL_1_REG 0x4
307#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
308#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
309#define MVPP2_GMAC_PCS_LB_EN_BIT 6
310#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
311#define MVPP2_GMAC_SA_LOW_OFFS 7
312#define MVPP2_GMAC_CTRL_2_REG 0x8
313#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
314#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
315#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
316#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
317#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
318#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
319#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
320#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
321#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
322#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
323#define MVPP2_GMAC_FC_ADV_EN BIT(9)
324#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
325#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
326#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
327#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
328#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
329#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
330 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
331
332#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
333
334/* Descriptor ring Macros */
335#define MVPP2_QUEUE_NEXT_DESC(q, index) \
336 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
337
338/* SMI: 0xc0054 -> offset 0x54 to lms_base */
339#define MVPP2_SMI 0x0054
340#define MVPP2_PHY_REG_MASK 0x1f
341/* SMI register fields */
342#define MVPP2_SMI_DATA_OFFS 0 /* Data */
343#define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
344#define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
345#define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
346#define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
347#define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
348#define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
349#define MVPP2_SMI_BUSY (1 << 28) /* Busy */
350
351#define MVPP2_PHY_ADDR_MASK 0x1f
352#define MVPP2_PHY_REG_MASK 0x1f
353
354/* Various constants */
355
356/* Coalescing */
357#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
358#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
359#define MVPP2_RX_COAL_PKTS 32
360#define MVPP2_RX_COAL_USEC 100
361
362/* The two bytes Marvell header. Either contains a special value used
363 * by Marvell switches when a specific hardware mode is enabled (not
364 * supported by this driver) or is filled automatically by zeroes on
365 * the RX side. Those two bytes being at the front of the Ethernet
366 * header, they allow to have the IP header aligned on a 4 bytes
367 * boundary automatically: the hardware skips those two bytes on its
368 * own.
369 */
370#define MVPP2_MH_SIZE 2
371#define MVPP2_ETH_TYPE_LEN 2
372#define MVPP2_PPPOE_HDR_SIZE 8
373#define MVPP2_VLAN_TAG_LEN 4
374
375/* Lbtd 802.3 type */
376#define MVPP2_IP_LBDT_TYPE 0xfffa
377
378#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
379#define MVPP2_TX_CSUM_MAX_SIZE 9800
380
381/* Timeout constants */
382#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
383#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
384
385#define MVPP2_TX_MTU_MAX 0x7ffff
386
387/* Maximum number of T-CONTs of PON port */
388#define MVPP2_MAX_TCONT 16
389
390/* Maximum number of supported ports */
391#define MVPP2_MAX_PORTS 4
392
393/* Maximum number of TXQs used by single port */
394#define MVPP2_MAX_TXQ 8
395
396/* Maximum number of RXQs used by single port */
397#define MVPP2_MAX_RXQ 8
398
399/* Default number of TXQs in use */
400#define MVPP2_DEFAULT_TXQ 1
401
402/* Dfault number of RXQs in use */
403#define MVPP2_DEFAULT_RXQ 1
404#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
405
406/* Total number of RXQs available to all ports */
407#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
408
409/* Max number of Rx descriptors */
410#define MVPP2_MAX_RXD 16
411
412/* Max number of Tx descriptors */
413#define MVPP2_MAX_TXD 16
414
415/* Amount of Tx descriptors that can be reserved at once by CPU */
416#define MVPP2_CPU_DESC_CHUNK 64
417
418/* Max number of Tx descriptors in each aggregated queue */
419#define MVPP2_AGGR_TXQ_SIZE 256
420
421/* Descriptor aligned size */
422#define MVPP2_DESC_ALIGNED_SIZE 32
423
424/* Descriptor alignment mask */
425#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
426
427/* RX FIFO constants */
428#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
429#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
430#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
431
432/* RX buffer constants */
433#define MVPP2_SKB_SHINFO_SIZE \
434 0
435
436#define MVPP2_RX_PKT_SIZE(mtu) \
437 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
438 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
439
440#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
441#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
442#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
443 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
444
445#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
446
447/* IPv6 max L3 address size */
448#define MVPP2_MAX_L3_ADDR_SIZE 16
449
450/* Port flags */
451#define MVPP2_F_LOOPBACK BIT(0)
452
453/* Marvell tag types */
454enum mvpp2_tag_type {
455 MVPP2_TAG_TYPE_NONE = 0,
456 MVPP2_TAG_TYPE_MH = 1,
457 MVPP2_TAG_TYPE_DSA = 2,
458 MVPP2_TAG_TYPE_EDSA = 3,
459 MVPP2_TAG_TYPE_VLAN = 4,
460 MVPP2_TAG_TYPE_LAST = 5
461};
462
463/* Parser constants */
464#define MVPP2_PRS_TCAM_SRAM_SIZE 256
465#define MVPP2_PRS_TCAM_WORDS 6
466#define MVPP2_PRS_SRAM_WORDS 4
467#define MVPP2_PRS_FLOW_ID_SIZE 64
468#define MVPP2_PRS_FLOW_ID_MASK 0x3f
469#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
470#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
471#define MVPP2_PRS_IPV4_HEAD 0x40
472#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
473#define MVPP2_PRS_IPV4_MC 0xe0
474#define MVPP2_PRS_IPV4_MC_MASK 0xf0
475#define MVPP2_PRS_IPV4_BC_MASK 0xff
476#define MVPP2_PRS_IPV4_IHL 0x5
477#define MVPP2_PRS_IPV4_IHL_MASK 0xf
478#define MVPP2_PRS_IPV6_MC 0xff
479#define MVPP2_PRS_IPV6_MC_MASK 0xff
480#define MVPP2_PRS_IPV6_HOP_MASK 0xff
481#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
482#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
483#define MVPP2_PRS_DBL_VLANS_MAX 100
484
485/* Tcam structure:
486 * - lookup ID - 4 bits
487 * - port ID - 1 byte
488 * - additional information - 1 byte
489 * - header data - 8 bytes
490 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
491 */
492#define MVPP2_PRS_AI_BITS 8
493#define MVPP2_PRS_PORT_MASK 0xff
494#define MVPP2_PRS_LU_MASK 0xf
495#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
496 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
497#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
498 (((offs) * 2) - ((offs) % 2) + 2)
499#define MVPP2_PRS_TCAM_AI_BYTE 16
500#define MVPP2_PRS_TCAM_PORT_BYTE 17
501#define MVPP2_PRS_TCAM_LU_BYTE 20
502#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
503#define MVPP2_PRS_TCAM_INV_WORD 5
504/* Tcam entries ID */
505#define MVPP2_PE_DROP_ALL 0
506#define MVPP2_PE_FIRST_FREE_TID 1
507#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
508#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
509#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
510#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
511#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
512#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
513#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
514#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
515#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
516#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
517#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
518#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
519#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
520#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
521#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
522#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
523#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
524#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
525#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
526#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
527#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
528#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
529#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
530#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
531#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
532
533/* Sram structure
534 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
535 */
536#define MVPP2_PRS_SRAM_RI_OFFS 0
537#define MVPP2_PRS_SRAM_RI_WORD 0
538#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
539#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
540#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
541#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
542#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
543#define MVPP2_PRS_SRAM_UDF_OFFS 73
544#define MVPP2_PRS_SRAM_UDF_BITS 8
545#define MVPP2_PRS_SRAM_UDF_MASK 0xff
546#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
547#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
548#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
549#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
550#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
551#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
552#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
553#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
554#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
555#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
556#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
557#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
558#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
559#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
560#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
561#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
562#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
563#define MVPP2_PRS_SRAM_AI_OFFS 90
564#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
565#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
566#define MVPP2_PRS_SRAM_AI_MASK 0xff
567#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
568#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
569#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
570#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
571
572/* Sram result info bits assignment */
573#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
574#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100575#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
576#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100577#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
578#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
579#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
580#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
581#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100582#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
583#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100584#define MVPP2_PRS_RI_L2_MCAST BIT(9)
585#define MVPP2_PRS_RI_L2_BCAST BIT(10)
586#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100587#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
588#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100589#define MVPP2_PRS_RI_L3_IP4 BIT(12)
590#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
591#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
592#define MVPP2_PRS_RI_L3_IP6 BIT(14)
593#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
594#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100595#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
596#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100597#define MVPP2_PRS_RI_L3_MCAST BIT(15)
598#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
599#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
600#define MVPP2_PRS_RI_UDF3_MASK 0x300000
601#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
602#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
603#define MVPP2_PRS_RI_L4_TCP BIT(22)
604#define MVPP2_PRS_RI_L4_UDP BIT(23)
605#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
606#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
607#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
608#define MVPP2_PRS_RI_DROP_MASK 0x80000000
609
610/* Sram additional info bits assignment */
611#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
612#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
613#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
614#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
615#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
616#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
617#define MVPP2_PRS_SINGLE_VLAN_AI 0
618#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
619
620/* DSA/EDSA type */
621#define MVPP2_PRS_TAGGED true
622#define MVPP2_PRS_UNTAGGED false
623#define MVPP2_PRS_EDSA true
624#define MVPP2_PRS_DSA false
625
626/* MAC entries, shadow udf */
627enum mvpp2_prs_udf {
628 MVPP2_PRS_UDF_MAC_DEF,
629 MVPP2_PRS_UDF_MAC_RANGE,
630 MVPP2_PRS_UDF_L2_DEF,
631 MVPP2_PRS_UDF_L2_DEF_COPY,
632 MVPP2_PRS_UDF_L2_USER,
633};
634
635/* Lookup ID */
636enum mvpp2_prs_lookup {
637 MVPP2_PRS_LU_MH,
638 MVPP2_PRS_LU_MAC,
639 MVPP2_PRS_LU_DSA,
640 MVPP2_PRS_LU_VLAN,
641 MVPP2_PRS_LU_L2,
642 MVPP2_PRS_LU_PPPOE,
643 MVPP2_PRS_LU_IP4,
644 MVPP2_PRS_LU_IP6,
645 MVPP2_PRS_LU_FLOWS,
646 MVPP2_PRS_LU_LAST,
647};
648
649/* L3 cast enum */
650enum mvpp2_prs_l3_cast {
651 MVPP2_PRS_L3_UNI_CAST,
652 MVPP2_PRS_L3_MULTI_CAST,
653 MVPP2_PRS_L3_BROAD_CAST
654};
655
656/* Classifier constants */
657#define MVPP2_CLS_FLOWS_TBL_SIZE 512
658#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
659#define MVPP2_CLS_LKP_TBL_SIZE 64
660
661/* BM constants */
662#define MVPP2_BM_POOLS_NUM 1
663#define MVPP2_BM_LONG_BUF_NUM 16
664#define MVPP2_BM_SHORT_BUF_NUM 16
665#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
666#define MVPP2_BM_POOL_PTR_ALIGN 128
667#define MVPP2_BM_SWF_LONG_POOL(port) 0
668
669/* BM cookie (32 bits) definition */
670#define MVPP2_BM_COOKIE_POOL_OFFS 8
671#define MVPP2_BM_COOKIE_CPU_OFFS 24
672
673/* BM short pool packet size
674 * These value assure that for SWF the total number
675 * of bytes allocated for each buffer will be 512
676 */
677#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
678
679enum mvpp2_bm_type {
680 MVPP2_BM_FREE,
681 MVPP2_BM_SWF_LONG,
682 MVPP2_BM_SWF_SHORT
683};
684
685/* Definitions */
686
687/* Shared Packet Processor resources */
688struct mvpp2 {
689 /* Shared registers' base addresses */
690 void __iomem *base;
691 void __iomem *lms_base;
692
693 /* List of pointers to port structures */
694 struct mvpp2_port **port_list;
695
696 /* Aggregated TXQs */
697 struct mvpp2_tx_queue *aggr_txqs;
698
699 /* BM pools */
700 struct mvpp2_bm_pool *bm_pools;
701
702 /* PRS shadow table */
703 struct mvpp2_prs_shadow *prs_shadow;
704 /* PRS auxiliary table for double vlan entries control */
705 bool *prs_double_vlans;
706
707 /* Tclk value */
708 u32 tclk;
709
710 struct mii_dev *bus;
711};
712
713struct mvpp2_pcpu_stats {
714 u64 rx_packets;
715 u64 rx_bytes;
716 u64 tx_packets;
717 u64 tx_bytes;
718};
719
720struct mvpp2_port {
721 u8 id;
722
723 int irq;
724
725 struct mvpp2 *priv;
726
727 /* Per-port registers' base address */
728 void __iomem *base;
729
730 struct mvpp2_rx_queue **rxqs;
731 struct mvpp2_tx_queue **txqs;
732
733 int pkt_size;
734
735 u32 pending_cause_rx;
736
737 /* Per-CPU port control */
738 struct mvpp2_port_pcpu __percpu *pcpu;
739
740 /* Flags */
741 unsigned long flags;
742
743 u16 tx_ring_size;
744 u16 rx_ring_size;
745 struct mvpp2_pcpu_stats __percpu *stats;
746
747 struct phy_device *phy_dev;
748 phy_interface_t phy_interface;
749 int phy_node;
750 int phyaddr;
751 int init;
752 unsigned int link;
753 unsigned int duplex;
754 unsigned int speed;
755
756 struct mvpp2_bm_pool *pool_long;
757 struct mvpp2_bm_pool *pool_short;
758
759 /* Index of first port's physical RXQ */
760 u8 first_rxq;
761
762 u8 dev_addr[ETH_ALEN];
763};
764
765/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
766 * layout of the transmit and reception DMA descriptors, and their
767 * layout is therefore defined by the hardware design
768 */
769
770#define MVPP2_TXD_L3_OFF_SHIFT 0
771#define MVPP2_TXD_IP_HLEN_SHIFT 8
772#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
773#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
774#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
775#define MVPP2_TXD_PADDING_DISABLE BIT(23)
776#define MVPP2_TXD_L4_UDP BIT(24)
777#define MVPP2_TXD_L3_IP6 BIT(26)
778#define MVPP2_TXD_L_DESC BIT(28)
779#define MVPP2_TXD_F_DESC BIT(29)
780
781#define MVPP2_RXD_ERR_SUMMARY BIT(15)
782#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
783#define MVPP2_RXD_ERR_CRC 0x0
784#define MVPP2_RXD_ERR_OVERRUN BIT(13)
785#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
786#define MVPP2_RXD_BM_POOL_ID_OFFS 16
787#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
788#define MVPP2_RXD_HWF_SYNC BIT(21)
789#define MVPP2_RXD_L4_CSUM_OK BIT(22)
790#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
791#define MVPP2_RXD_L4_TCP BIT(25)
792#define MVPP2_RXD_L4_UDP BIT(26)
793#define MVPP2_RXD_L3_IP4 BIT(28)
794#define MVPP2_RXD_L3_IP6 BIT(30)
795#define MVPP2_RXD_BUF_HDR BIT(31)
796
797struct mvpp2_tx_desc {
798 u32 command; /* Options used by HW for packet transmitting.*/
799 u8 packet_offset; /* the offset from the buffer beginning */
800 u8 phys_txq; /* destination queue ID */
801 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100802 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100803 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
804 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
805 u32 reserved2; /* reserved (for future use) */
806};
807
808struct mvpp2_rx_desc {
809 u32 status; /* info about received packet */
810 u16 reserved1; /* parser_info (for future use, PnC) */
811 u16 data_size; /* size of received packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100812 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100813 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
814 u16 reserved2; /* gem_port_id (for future use, PON) */
815 u16 reserved3; /* csum_l4 (for future use, PnC) */
816 u8 reserved4; /* bm_qset (for future use, BM) */
817 u8 reserved5;
818 u16 reserved6; /* classify_info (for future use, PnC) */
819 u32 reserved7; /* flow_id (for future use, PnC) */
820 u32 reserved8;
821};
822
823/* Per-CPU Tx queue control */
824struct mvpp2_txq_pcpu {
825 int cpu;
826
827 /* Number of Tx DMA descriptors in the descriptor ring */
828 int size;
829
830 /* Number of currently used Tx DMA descriptor in the
831 * descriptor ring
832 */
833 int count;
834
835 /* Number of Tx DMA descriptors reserved for each CPU */
836 int reserved_num;
837
838 /* Index of last TX DMA descriptor that was inserted */
839 int txq_put_index;
840
841 /* Index of the TX DMA descriptor to be cleaned up */
842 int txq_get_index;
843};
844
845struct mvpp2_tx_queue {
846 /* Physical number of this Tx queue */
847 u8 id;
848
849 /* Logical number of this Tx queue */
850 u8 log_id;
851
852 /* Number of Tx DMA descriptors in the descriptor ring */
853 int size;
854
855 /* Number of currently used Tx DMA descriptor in the descriptor ring */
856 int count;
857
858 /* Per-CPU control of physical Tx queues */
859 struct mvpp2_txq_pcpu __percpu *pcpu;
860
861 u32 done_pkts_coal;
862
863 /* Virtual address of thex Tx DMA descriptors array */
864 struct mvpp2_tx_desc *descs;
865
866 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100867 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +0100868
869 /* Index of the last Tx DMA descriptor */
870 int last_desc;
871
872 /* Index of the next Tx DMA descriptor to process */
873 int next_desc_to_proc;
874};
875
876struct mvpp2_rx_queue {
877 /* RX queue number, in the range 0-31 for physical RXQs */
878 u8 id;
879
880 /* Num of rx descriptors in the rx descriptor ring */
881 int size;
882
883 u32 pkts_coal;
884 u32 time_coal;
885
886 /* Virtual address of the RX DMA descriptors array */
887 struct mvpp2_rx_desc *descs;
888
889 /* DMA address of the RX DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100890 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +0100891
892 /* Index of the last RX DMA descriptor */
893 int last_desc;
894
895 /* Index of the next RX DMA descriptor to process */
896 int next_desc_to_proc;
897
898 /* ID of port to which physical RXQ is mapped */
899 int port;
900
901 /* Port's logic RXQ number to which physical RXQ is mapped */
902 int logic_rxq;
903};
904
905union mvpp2_prs_tcam_entry {
906 u32 word[MVPP2_PRS_TCAM_WORDS];
907 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
908};
909
910union mvpp2_prs_sram_entry {
911 u32 word[MVPP2_PRS_SRAM_WORDS];
912 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
913};
914
915struct mvpp2_prs_entry {
916 u32 index;
917 union mvpp2_prs_tcam_entry tcam;
918 union mvpp2_prs_sram_entry sram;
919};
920
921struct mvpp2_prs_shadow {
922 bool valid;
923 bool finish;
924
925 /* Lookup ID */
926 int lu;
927
928 /* User defined offset */
929 int udf;
930
931 /* Result info */
932 u32 ri;
933 u32 ri_mask;
934};
935
936struct mvpp2_cls_flow_entry {
937 u32 index;
938 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
939};
940
941struct mvpp2_cls_lookup_entry {
942 u32 lkpid;
943 u32 way;
944 u32 data;
945};
946
947struct mvpp2_bm_pool {
948 /* Pool number in the range 0-7 */
949 int id;
950 enum mvpp2_bm_type type;
951
952 /* Buffer Pointers Pool External (BPPE) size */
953 int size;
954 /* Number of buffers for this pool */
955 int buf_num;
956 /* Pool buffer size */
957 int buf_size;
958 /* Packet size */
959 int pkt_size;
960
961 /* BPPE virtual base address */
Stefan Roesefeb0b332017-02-15 12:46:18 +0100962 unsigned long *virt_addr;
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100963 /* BPPE DMA base address */
964 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +0100965
966 /* Ports using BM pool */
967 u32 port_map;
968
969 /* Occupied buffers indicator */
970 int in_use_thresh;
971};
972
Stefan Roese96c19042016-02-10 07:22:10 +0100973/* Static declaractions */
974
975/* Number of RXQs used by single port */
976static int rxq_number = MVPP2_DEFAULT_RXQ;
977/* Number of TXQs used by single port */
978static int txq_number = MVPP2_DEFAULT_TXQ;
979
980#define MVPP2_DRIVER_NAME "mvpp2"
981#define MVPP2_DRIVER_VERSION "1.0"
982
983/*
984 * U-Boot internal data, mostly uncached buffers for descriptors and data
985 */
986struct buffer_location {
987 struct mvpp2_tx_desc *aggr_tx_descs;
988 struct mvpp2_tx_desc *tx_descs;
989 struct mvpp2_rx_desc *rx_descs;
Stefan Roesefeb0b332017-02-15 12:46:18 +0100990 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
991 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese96c19042016-02-10 07:22:10 +0100992 int first_rxq;
993};
994
995/*
996 * All 4 interfaces use the same global buffer, since only one interface
997 * can be enabled at once
998 */
999static struct buffer_location buffer_loc;
1000
1001/*
1002 * Page table entries are set to 1MB, or multiples of 1MB
1003 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1004 */
1005#define BD_SPACE (1 << 20)
1006
1007/* Utility/helper methods */
1008
1009static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1010{
1011 writel(data, priv->base + offset);
1012}
1013
1014static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1015{
1016 return readl(priv->base + offset);
1017}
1018
1019static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1020{
1021 txq_pcpu->txq_get_index++;
1022 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1023 txq_pcpu->txq_get_index = 0;
1024}
1025
1026/* Get number of physical egress port */
1027static inline int mvpp2_egress_port(struct mvpp2_port *port)
1028{
1029 return MVPP2_MAX_TCONT + port->id;
1030}
1031
1032/* Get number of physical TXQ */
1033static inline int mvpp2_txq_phys(int port, int txq)
1034{
1035 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1036}
1037
1038/* Parser configuration routines */
1039
1040/* Update parser tcam and sram hw entries */
1041static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1042{
1043 int i;
1044
1045 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1046 return -EINVAL;
1047
1048 /* Clear entry invalidation bit */
1049 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1050
1051 /* Write tcam index - indirect access */
1052 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1053 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1054 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1055
1056 /* Write sram index - indirect access */
1057 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1058 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1059 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1060
1061 return 0;
1062}
1063
1064/* Read tcam entry from hw */
1065static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1066{
1067 int i;
1068
1069 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1070 return -EINVAL;
1071
1072 /* Write tcam index - indirect access */
1073 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1074
1075 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1076 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1077 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1078 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1079
1080 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1081 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1082
1083 /* Write sram index - indirect access */
1084 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1085 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1086 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1087
1088 return 0;
1089}
1090
1091/* Invalidate tcam hw entry */
1092static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1093{
1094 /* Write index - indirect access */
1095 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1096 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1097 MVPP2_PRS_TCAM_INV_MASK);
1098}
1099
1100/* Enable shadow table entry and set its lookup ID */
1101static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1102{
1103 priv->prs_shadow[index].valid = true;
1104 priv->prs_shadow[index].lu = lu;
1105}
1106
1107/* Update ri fields in shadow table entry */
1108static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1109 unsigned int ri, unsigned int ri_mask)
1110{
1111 priv->prs_shadow[index].ri_mask = ri_mask;
1112 priv->prs_shadow[index].ri = ri;
1113}
1114
1115/* Update lookup field in tcam sw entry */
1116static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1117{
1118 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1119
1120 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1121 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1122}
1123
1124/* Update mask for single port in tcam sw entry */
1125static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1126 unsigned int port, bool add)
1127{
1128 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1129
1130 if (add)
1131 pe->tcam.byte[enable_off] &= ~(1 << port);
1132 else
1133 pe->tcam.byte[enable_off] |= 1 << port;
1134}
1135
1136/* Update port map in tcam sw entry */
1137static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1138 unsigned int ports)
1139{
1140 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1141 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1142
1143 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1144 pe->tcam.byte[enable_off] &= ~port_mask;
1145 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1146}
1147
1148/* Obtain port map from tcam sw entry */
1149static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1150{
1151 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1152
1153 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1154}
1155
1156/* Set byte of data and its enable bits in tcam sw entry */
1157static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1158 unsigned int offs, unsigned char byte,
1159 unsigned char enable)
1160{
1161 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1162 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1163}
1164
1165/* Get byte of data and its enable bits from tcam sw entry */
1166static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1167 unsigned int offs, unsigned char *byte,
1168 unsigned char *enable)
1169{
1170 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1171 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1172}
1173
1174/* Set ethertype in tcam sw entry */
1175static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1176 unsigned short ethertype)
1177{
1178 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1179 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1180}
1181
1182/* Set bits in sram sw entry */
1183static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1184 int val)
1185{
1186 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1187}
1188
1189/* Clear bits in sram sw entry */
1190static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1191 int val)
1192{
1193 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1194}
1195
1196/* Update ri bits in sram sw entry */
1197static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1198 unsigned int bits, unsigned int mask)
1199{
1200 unsigned int i;
1201
1202 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1203 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1204
1205 if (!(mask & BIT(i)))
1206 continue;
1207
1208 if (bits & BIT(i))
1209 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1210 else
1211 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1212
1213 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1214 }
1215}
1216
1217/* Update ai bits in sram sw entry */
1218static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1219 unsigned int bits, unsigned int mask)
1220{
1221 unsigned int i;
1222 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1223
1224 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1225
1226 if (!(mask & BIT(i)))
1227 continue;
1228
1229 if (bits & BIT(i))
1230 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1231 else
1232 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1233
1234 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1235 }
1236}
1237
1238/* Read ai bits from sram sw entry */
1239static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1240{
1241 u8 bits;
1242 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1243 int ai_en_off = ai_off + 1;
1244 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1245
1246 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1247 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1248
1249 return bits;
1250}
1251
1252/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1253 * lookup interation
1254 */
1255static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1256 unsigned int lu)
1257{
1258 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1259
1260 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1261 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1262 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1263}
1264
1265/* In the sram sw entry set sign and value of the next lookup offset
1266 * and the offset value generated to the classifier
1267 */
1268static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1269 unsigned int op)
1270{
1271 /* Set sign */
1272 if (shift < 0) {
1273 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1274 shift = 0 - shift;
1275 } else {
1276 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1277 }
1278
1279 /* Set value */
1280 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1281 (unsigned char)shift;
1282
1283 /* Reset and set operation */
1284 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1285 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1286 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1287
1288 /* Set base offset as current */
1289 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1290}
1291
1292/* In the sram sw entry set sign and value of the user defined offset
1293 * generated to the classifier
1294 */
1295static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1296 unsigned int type, int offset,
1297 unsigned int op)
1298{
1299 /* Set sign */
1300 if (offset < 0) {
1301 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1302 offset = 0 - offset;
1303 } else {
1304 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1305 }
1306
1307 /* Set value */
1308 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1309 MVPP2_PRS_SRAM_UDF_MASK);
1310 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1311 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1312 MVPP2_PRS_SRAM_UDF_BITS)] &=
1313 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1314 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1315 MVPP2_PRS_SRAM_UDF_BITS)] |=
1316 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1317
1318 /* Set offset type */
1319 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1320 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1321 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1322
1323 /* Set offset operation */
1324 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1325 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1326 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1327
1328 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1329 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1330 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1331 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1332
1333 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1334 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1335 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1336
1337 /* Set base offset as current */
1338 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1339}
1340
1341/* Find parser flow entry */
1342static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1343{
1344 struct mvpp2_prs_entry *pe;
1345 int tid;
1346
1347 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1348 if (!pe)
1349 return NULL;
1350 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1351
1352 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1353 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1354 u8 bits;
1355
1356 if (!priv->prs_shadow[tid].valid ||
1357 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1358 continue;
1359
1360 pe->index = tid;
1361 mvpp2_prs_hw_read(priv, pe);
1362 bits = mvpp2_prs_sram_ai_get(pe);
1363
1364 /* Sram store classification lookup ID in AI bits [5:0] */
1365 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1366 return pe;
1367 }
1368 kfree(pe);
1369
1370 return NULL;
1371}
1372
1373/* Return first free tcam index, seeking from start to end */
1374static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1375 unsigned char end)
1376{
1377 int tid;
1378
1379 if (start > end)
1380 swap(start, end);
1381
1382 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1383 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1384
1385 for (tid = start; tid <= end; tid++) {
1386 if (!priv->prs_shadow[tid].valid)
1387 return tid;
1388 }
1389
1390 return -EINVAL;
1391}
1392
1393/* Enable/disable dropping all mac da's */
1394static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1395{
1396 struct mvpp2_prs_entry pe;
1397
1398 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1399 /* Entry exist - update port only */
1400 pe.index = MVPP2_PE_DROP_ALL;
1401 mvpp2_prs_hw_read(priv, &pe);
1402 } else {
1403 /* Entry doesn't exist - create new */
1404 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1405 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1406 pe.index = MVPP2_PE_DROP_ALL;
1407
1408 /* Non-promiscuous mode for all ports - DROP unknown packets */
1409 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1410 MVPP2_PRS_RI_DROP_MASK);
1411
1412 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1413 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1414
1415 /* Update shadow table */
1416 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1417
1418 /* Mask all ports */
1419 mvpp2_prs_tcam_port_map_set(&pe, 0);
1420 }
1421
1422 /* Update port mask */
1423 mvpp2_prs_tcam_port_set(&pe, port, add);
1424
1425 mvpp2_prs_hw_write(priv, &pe);
1426}
1427
1428/* Set port to promiscuous mode */
1429static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1430{
1431 struct mvpp2_prs_entry pe;
1432
1433 /* Promiscuous mode - Accept unknown packets */
1434
1435 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1436 /* Entry exist - update port only */
1437 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1438 mvpp2_prs_hw_read(priv, &pe);
1439 } else {
1440 /* Entry doesn't exist - create new */
1441 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1442 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1443 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1444
1445 /* Continue - set next lookup */
1446 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1447
1448 /* Set result info bits */
1449 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1450 MVPP2_PRS_RI_L2_CAST_MASK);
1451
1452 /* Shift to ethertype */
1453 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1454 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1455
1456 /* Mask all ports */
1457 mvpp2_prs_tcam_port_map_set(&pe, 0);
1458
1459 /* Update shadow table */
1460 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1461 }
1462
1463 /* Update port mask */
1464 mvpp2_prs_tcam_port_set(&pe, port, add);
1465
1466 mvpp2_prs_hw_write(priv, &pe);
1467}
1468
1469/* Accept multicast */
1470static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1471 bool add)
1472{
1473 struct mvpp2_prs_entry pe;
1474 unsigned char da_mc;
1475
1476 /* Ethernet multicast address first byte is
1477 * 0x01 for IPv4 and 0x33 for IPv6
1478 */
1479 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1480
1481 if (priv->prs_shadow[index].valid) {
1482 /* Entry exist - update port only */
1483 pe.index = index;
1484 mvpp2_prs_hw_read(priv, &pe);
1485 } else {
1486 /* Entry doesn't exist - create new */
1487 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1488 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1489 pe.index = index;
1490
1491 /* Continue - set next lookup */
1492 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1493
1494 /* Set result info bits */
1495 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1496 MVPP2_PRS_RI_L2_CAST_MASK);
1497
1498 /* Update tcam entry data first byte */
1499 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1500
1501 /* Shift to ethertype */
1502 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1503 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1504
1505 /* Mask all ports */
1506 mvpp2_prs_tcam_port_map_set(&pe, 0);
1507
1508 /* Update shadow table */
1509 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1510 }
1511
1512 /* Update port mask */
1513 mvpp2_prs_tcam_port_set(&pe, port, add);
1514
1515 mvpp2_prs_hw_write(priv, &pe);
1516}
1517
1518/* Parser per-port initialization */
1519static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1520 int lu_max, int offset)
1521{
1522 u32 val;
1523
1524 /* Set lookup ID */
1525 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1526 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1527 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1528 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1529
1530 /* Set maximum number of loops for packet received from port */
1531 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1532 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1533 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1534 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1535
1536 /* Set initial offset for packet header extraction for the first
1537 * searching loop
1538 */
1539 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1540 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1541 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1542 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1543}
1544
1545/* Default flow entries initialization for all ports */
1546static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1547{
1548 struct mvpp2_prs_entry pe;
1549 int port;
1550
1551 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1552 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1553 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1554 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1555
1556 /* Mask all ports */
1557 mvpp2_prs_tcam_port_map_set(&pe, 0);
1558
1559 /* Set flow ID*/
1560 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1561 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1562
1563 /* Update shadow table and hw entry */
1564 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1565 mvpp2_prs_hw_write(priv, &pe);
1566 }
1567}
1568
1569/* Set default entry for Marvell Header field */
1570static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1571{
1572 struct mvpp2_prs_entry pe;
1573
1574 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1575
1576 pe.index = MVPP2_PE_MH_DEFAULT;
1577 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1578 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1579 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1580 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1581
1582 /* Unmask all ports */
1583 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1584
1585 /* Update shadow table and hw entry */
1586 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1587 mvpp2_prs_hw_write(priv, &pe);
1588}
1589
1590/* Set default entires (place holder) for promiscuous, non-promiscuous and
1591 * multicast MAC addresses
1592 */
1593static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1594{
1595 struct mvpp2_prs_entry pe;
1596
1597 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1598
1599 /* Non-promiscuous mode for all ports - DROP unknown packets */
1600 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1601 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1602
1603 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1604 MVPP2_PRS_RI_DROP_MASK);
1605 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1606 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1607
1608 /* Unmask all ports */
1609 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1610
1611 /* Update shadow table and hw entry */
1612 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1613 mvpp2_prs_hw_write(priv, &pe);
1614
1615 /* place holders only - no ports */
1616 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1617 mvpp2_prs_mac_promisc_set(priv, 0, false);
1618 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1619 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1620}
1621
1622/* Match basic ethertypes */
1623static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1624{
1625 struct mvpp2_prs_entry pe;
1626 int tid;
1627
1628 /* Ethertype: PPPoE */
1629 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1630 MVPP2_PE_LAST_FREE_TID);
1631 if (tid < 0)
1632 return tid;
1633
1634 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1635 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1636 pe.index = tid;
1637
1638 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1639
1640 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1641 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1642 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1643 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1644 MVPP2_PRS_RI_PPPOE_MASK);
1645
1646 /* Update shadow table and hw entry */
1647 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1648 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1649 priv->prs_shadow[pe.index].finish = false;
1650 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1651 MVPP2_PRS_RI_PPPOE_MASK);
1652 mvpp2_prs_hw_write(priv, &pe);
1653
1654 /* Ethertype: ARP */
1655 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1656 MVPP2_PE_LAST_FREE_TID);
1657 if (tid < 0)
1658 return tid;
1659
1660 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1661 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1662 pe.index = tid;
1663
1664 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1665
1666 /* Generate flow in the next iteration*/
1667 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1668 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1669 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1670 MVPP2_PRS_RI_L3_PROTO_MASK);
1671 /* Set L3 offset */
1672 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1673 MVPP2_ETH_TYPE_LEN,
1674 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1675
1676 /* Update shadow table and hw entry */
1677 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1678 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1679 priv->prs_shadow[pe.index].finish = true;
1680 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1681 MVPP2_PRS_RI_L3_PROTO_MASK);
1682 mvpp2_prs_hw_write(priv, &pe);
1683
1684 /* Ethertype: LBTD */
1685 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1686 MVPP2_PE_LAST_FREE_TID);
1687 if (tid < 0)
1688 return tid;
1689
1690 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1691 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1692 pe.index = tid;
1693
1694 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1695
1696 /* Generate flow in the next iteration*/
1697 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1698 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1699 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1700 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1701 MVPP2_PRS_RI_CPU_CODE_MASK |
1702 MVPP2_PRS_RI_UDF3_MASK);
1703 /* Set L3 offset */
1704 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1705 MVPP2_ETH_TYPE_LEN,
1706 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1707
1708 /* Update shadow table and hw entry */
1709 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1710 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1711 priv->prs_shadow[pe.index].finish = true;
1712 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1713 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1714 MVPP2_PRS_RI_CPU_CODE_MASK |
1715 MVPP2_PRS_RI_UDF3_MASK);
1716 mvpp2_prs_hw_write(priv, &pe);
1717
1718 /* Ethertype: IPv4 without options */
1719 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1720 MVPP2_PE_LAST_FREE_TID);
1721 if (tid < 0)
1722 return tid;
1723
1724 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1725 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1726 pe.index = tid;
1727
1728 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1729 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1730 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1731 MVPP2_PRS_IPV4_HEAD_MASK |
1732 MVPP2_PRS_IPV4_IHL_MASK);
1733
1734 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1735 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1736 MVPP2_PRS_RI_L3_PROTO_MASK);
1737 /* Skip eth_type + 4 bytes of IP header */
1738 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1739 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1740 /* Set L3 offset */
1741 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1742 MVPP2_ETH_TYPE_LEN,
1743 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1744
1745 /* Update shadow table and hw entry */
1746 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1747 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1748 priv->prs_shadow[pe.index].finish = false;
1749 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1750 MVPP2_PRS_RI_L3_PROTO_MASK);
1751 mvpp2_prs_hw_write(priv, &pe);
1752
1753 /* Ethertype: IPv4 with options */
1754 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1755 MVPP2_PE_LAST_FREE_TID);
1756 if (tid < 0)
1757 return tid;
1758
1759 pe.index = tid;
1760
1761 /* Clear tcam data before updating */
1762 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1763 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1764
1765 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1766 MVPP2_PRS_IPV4_HEAD,
1767 MVPP2_PRS_IPV4_HEAD_MASK);
1768
1769 /* Clear ri before updating */
1770 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1771 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1772 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1773 MVPP2_PRS_RI_L3_PROTO_MASK);
1774
1775 /* Update shadow table and hw entry */
1776 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1777 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1778 priv->prs_shadow[pe.index].finish = false;
1779 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1780 MVPP2_PRS_RI_L3_PROTO_MASK);
1781 mvpp2_prs_hw_write(priv, &pe);
1782
1783 /* Ethertype: IPv6 without options */
1784 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1785 MVPP2_PE_LAST_FREE_TID);
1786 if (tid < 0)
1787 return tid;
1788
1789 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1790 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1791 pe.index = tid;
1792
1793 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1794
1795 /* Skip DIP of IPV6 header */
1796 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1797 MVPP2_MAX_L3_ADDR_SIZE,
1798 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1799 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1800 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1801 MVPP2_PRS_RI_L3_PROTO_MASK);
1802 /* Set L3 offset */
1803 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1804 MVPP2_ETH_TYPE_LEN,
1805 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1806
1807 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1808 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1809 priv->prs_shadow[pe.index].finish = false;
1810 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1811 MVPP2_PRS_RI_L3_PROTO_MASK);
1812 mvpp2_prs_hw_write(priv, &pe);
1813
1814 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1815 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1816 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1817 pe.index = MVPP2_PE_ETH_TYPE_UN;
1818
1819 /* Unmask all ports */
1820 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1821
1822 /* Generate flow in the next iteration*/
1823 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1824 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1825 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1826 MVPP2_PRS_RI_L3_PROTO_MASK);
1827 /* Set L3 offset even it's unknown L3 */
1828 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1829 MVPP2_ETH_TYPE_LEN,
1830 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1831
1832 /* Update shadow table and hw entry */
1833 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1834 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1835 priv->prs_shadow[pe.index].finish = true;
1836 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1837 MVPP2_PRS_RI_L3_PROTO_MASK);
1838 mvpp2_prs_hw_write(priv, &pe);
1839
1840 return 0;
1841}
1842
1843/* Parser default initialization */
1844static int mvpp2_prs_default_init(struct udevice *dev,
1845 struct mvpp2 *priv)
1846{
1847 int err, index, i;
1848
1849 /* Enable tcam table */
1850 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
1851
1852 /* Clear all tcam and sram entries */
1853 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
1854 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1855 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1856 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
1857
1858 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
1859 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1860 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
1861 }
1862
1863 /* Invalidate all tcam entries */
1864 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
1865 mvpp2_prs_hw_inv(priv, index);
1866
1867 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
1868 sizeof(struct mvpp2_prs_shadow),
1869 GFP_KERNEL);
1870 if (!priv->prs_shadow)
1871 return -ENOMEM;
1872
1873 /* Always start from lookup = 0 */
1874 for (index = 0; index < MVPP2_MAX_PORTS; index++)
1875 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
1876 MVPP2_PRS_PORT_LU_MAX, 0);
1877
1878 mvpp2_prs_def_flow_init(priv);
1879
1880 mvpp2_prs_mh_init(priv);
1881
1882 mvpp2_prs_mac_init(priv);
1883
1884 err = mvpp2_prs_etype_init(priv);
1885 if (err)
1886 return err;
1887
1888 return 0;
1889}
1890
1891/* Compare MAC DA with tcam entry data */
1892static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
1893 const u8 *da, unsigned char *mask)
1894{
1895 unsigned char tcam_byte, tcam_mask;
1896 int index;
1897
1898 for (index = 0; index < ETH_ALEN; index++) {
1899 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
1900 if (tcam_mask != mask[index])
1901 return false;
1902
1903 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
1904 return false;
1905 }
1906
1907 return true;
1908}
1909
1910/* Find tcam entry with matched pair <MAC DA, port> */
1911static struct mvpp2_prs_entry *
1912mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
1913 unsigned char *mask, int udf_type)
1914{
1915 struct mvpp2_prs_entry *pe;
1916 int tid;
1917
1918 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1919 if (!pe)
1920 return NULL;
1921 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
1922
1923 /* Go through the all entires with MVPP2_PRS_LU_MAC */
1924 for (tid = MVPP2_PE_FIRST_FREE_TID;
1925 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1926 unsigned int entry_pmap;
1927
1928 if (!priv->prs_shadow[tid].valid ||
1929 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
1930 (priv->prs_shadow[tid].udf != udf_type))
1931 continue;
1932
1933 pe->index = tid;
1934 mvpp2_prs_hw_read(priv, pe);
1935 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
1936
1937 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
1938 entry_pmap == pmap)
1939 return pe;
1940 }
1941 kfree(pe);
1942
1943 return NULL;
1944}
1945
1946/* Update parser's mac da entry */
1947static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
1948 const u8 *da, bool add)
1949{
1950 struct mvpp2_prs_entry *pe;
1951 unsigned int pmap, len, ri;
1952 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1953 int tid;
1954
1955 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
1956 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
1957 MVPP2_PRS_UDF_MAC_DEF);
1958
1959 /* No such entry */
1960 if (!pe) {
1961 if (!add)
1962 return 0;
1963
1964 /* Create new TCAM entry */
1965 /* Find first range mac entry*/
1966 for (tid = MVPP2_PE_FIRST_FREE_TID;
1967 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
1968 if (priv->prs_shadow[tid].valid &&
1969 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
1970 (priv->prs_shadow[tid].udf ==
1971 MVPP2_PRS_UDF_MAC_RANGE))
1972 break;
1973
1974 /* Go through the all entries from first to last */
1975 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1976 tid - 1);
1977 if (tid < 0)
1978 return tid;
1979
1980 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1981 if (!pe)
1982 return -1;
1983 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
1984 pe->index = tid;
1985
1986 /* Mask all ports */
1987 mvpp2_prs_tcam_port_map_set(pe, 0);
1988 }
1989
1990 /* Update port mask */
1991 mvpp2_prs_tcam_port_set(pe, port, add);
1992
1993 /* Invalidate the entry if no ports are left enabled */
1994 pmap = mvpp2_prs_tcam_port_map_get(pe);
1995 if (pmap == 0) {
1996 if (add) {
1997 kfree(pe);
1998 return -1;
1999 }
2000 mvpp2_prs_hw_inv(priv, pe->index);
2001 priv->prs_shadow[pe->index].valid = false;
2002 kfree(pe);
2003 return 0;
2004 }
2005
2006 /* Continue - set next lookup */
2007 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2008
2009 /* Set match on DA */
2010 len = ETH_ALEN;
2011 while (len--)
2012 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2013
2014 /* Set result info bits */
2015 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2016
2017 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2018 MVPP2_PRS_RI_MAC_ME_MASK);
2019 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2020 MVPP2_PRS_RI_MAC_ME_MASK);
2021
2022 /* Shift to ethertype */
2023 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2024 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2025
2026 /* Update shadow table and hw entry */
2027 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2028 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2029 mvpp2_prs_hw_write(priv, pe);
2030
2031 kfree(pe);
2032
2033 return 0;
2034}
2035
2036static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2037{
2038 int err;
2039
2040 /* Remove old parser entry */
2041 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2042 false);
2043 if (err)
2044 return err;
2045
2046 /* Add new parser entry */
2047 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2048 if (err)
2049 return err;
2050
2051 /* Set addr in the device */
2052 memcpy(port->dev_addr, da, ETH_ALEN);
2053
2054 return 0;
2055}
2056
2057/* Set prs flow for the port */
2058static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2059{
2060 struct mvpp2_prs_entry *pe;
2061 int tid;
2062
2063 pe = mvpp2_prs_flow_find(port->priv, port->id);
2064
2065 /* Such entry not exist */
2066 if (!pe) {
2067 /* Go through the all entires from last to first */
2068 tid = mvpp2_prs_tcam_first_free(port->priv,
2069 MVPP2_PE_LAST_FREE_TID,
2070 MVPP2_PE_FIRST_FREE_TID);
2071 if (tid < 0)
2072 return tid;
2073
2074 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2075 if (!pe)
2076 return -ENOMEM;
2077
2078 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2079 pe->index = tid;
2080
2081 /* Set flow ID*/
2082 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2083 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2084
2085 /* Update shadow table */
2086 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2087 }
2088
2089 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2090 mvpp2_prs_hw_write(port->priv, pe);
2091 kfree(pe);
2092
2093 return 0;
2094}
2095
2096/* Classifier configuration routines */
2097
2098/* Update classification flow table registers */
2099static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2100 struct mvpp2_cls_flow_entry *fe)
2101{
2102 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2103 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2104 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2105 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2106}
2107
2108/* Update classification lookup table register */
2109static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2110 struct mvpp2_cls_lookup_entry *le)
2111{
2112 u32 val;
2113
2114 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2115 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2116 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2117}
2118
2119/* Classifier default initialization */
2120static void mvpp2_cls_init(struct mvpp2 *priv)
2121{
2122 struct mvpp2_cls_lookup_entry le;
2123 struct mvpp2_cls_flow_entry fe;
2124 int index;
2125
2126 /* Enable classifier */
2127 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2128
2129 /* Clear classifier flow table */
2130 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2131 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2132 fe.index = index;
2133 mvpp2_cls_flow_write(priv, &fe);
2134 }
2135
2136 /* Clear classifier lookup table */
2137 le.data = 0;
2138 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2139 le.lkpid = index;
2140 le.way = 0;
2141 mvpp2_cls_lookup_write(priv, &le);
2142
2143 le.way = 1;
2144 mvpp2_cls_lookup_write(priv, &le);
2145 }
2146}
2147
2148static void mvpp2_cls_port_config(struct mvpp2_port *port)
2149{
2150 struct mvpp2_cls_lookup_entry le;
2151 u32 val;
2152
2153 /* Set way for the port */
2154 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2155 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2156 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2157
2158 /* Pick the entry to be accessed in lookup ID decoding table
2159 * according to the way and lkpid.
2160 */
2161 le.lkpid = port->id;
2162 le.way = 0;
2163 le.data = 0;
2164
2165 /* Set initial CPU queue for receiving packets */
2166 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2167 le.data |= port->first_rxq;
2168
2169 /* Disable classification engines */
2170 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2171
2172 /* Update lookup ID table entry */
2173 mvpp2_cls_lookup_write(port->priv, &le);
2174}
2175
2176/* Set CPU queue number for oversize packets */
2177static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2178{
2179 u32 val;
2180
2181 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2182 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2183
2184 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2185 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2186
2187 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2188 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2189 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2190}
2191
2192/* Buffer Manager configuration routines */
2193
2194/* Create pool */
2195static int mvpp2_bm_pool_create(struct udevice *dev,
2196 struct mvpp2 *priv,
2197 struct mvpp2_bm_pool *bm_pool, int size)
2198{
2199 u32 val;
2200
2201 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002202 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese96c19042016-02-10 07:22:10 +01002203 if (!bm_pool->virt_addr)
2204 return -ENOMEM;
2205
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002206 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2207 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese96c19042016-02-10 07:22:10 +01002208 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2209 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2210 return -ENOMEM;
2211 }
2212
2213 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002214 bm_pool->dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002215 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2216
2217 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2218 val |= MVPP2_BM_START_MASK;
2219 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2220
2221 bm_pool->type = MVPP2_BM_FREE;
2222 bm_pool->size = size;
2223 bm_pool->pkt_size = 0;
2224 bm_pool->buf_num = 0;
2225
2226 return 0;
2227}
2228
2229/* Set pool buffer size */
2230static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2231 struct mvpp2_bm_pool *bm_pool,
2232 int buf_size)
2233{
2234 u32 val;
2235
2236 bm_pool->buf_size = buf_size;
2237
2238 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2239 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2240}
2241
2242/* Free all buffers from the pool */
2243static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2244 struct mvpp2_bm_pool *bm_pool)
2245{
2246 bm_pool->buf_num = 0;
2247}
2248
2249/* Cleanup pool */
2250static int mvpp2_bm_pool_destroy(struct udevice *dev,
2251 struct mvpp2 *priv,
2252 struct mvpp2_bm_pool *bm_pool)
2253{
2254 u32 val;
2255
2256 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2257 if (bm_pool->buf_num) {
2258 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2259 return 0;
2260 }
2261
2262 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2263 val |= MVPP2_BM_STOP_MASK;
2264 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2265
2266 return 0;
2267}
2268
2269static int mvpp2_bm_pools_init(struct udevice *dev,
2270 struct mvpp2 *priv)
2271{
2272 int i, err, size;
2273 struct mvpp2_bm_pool *bm_pool;
2274
2275 /* Create all pools with maximum size */
2276 size = MVPP2_BM_POOL_SIZE_MAX;
2277 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2278 bm_pool = &priv->bm_pools[i];
2279 bm_pool->id = i;
2280 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2281 if (err)
2282 goto err_unroll_pools;
2283 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2284 }
2285 return 0;
2286
2287err_unroll_pools:
2288 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2289 for (i = i - 1; i >= 0; i--)
2290 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2291 return err;
2292}
2293
2294static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2295{
2296 int i, err;
2297
2298 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2299 /* Mask BM all interrupts */
2300 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2301 /* Clear BM cause register */
2302 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2303 }
2304
2305 /* Allocate and initialize BM pools */
2306 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2307 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2308 if (!priv->bm_pools)
2309 return -ENOMEM;
2310
2311 err = mvpp2_bm_pools_init(dev, priv);
2312 if (err < 0)
2313 return err;
2314 return 0;
2315}
2316
2317/* Attach long pool to rxq */
2318static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2319 int lrxq, int long_pool)
2320{
2321 u32 val;
2322 int prxq;
2323
2324 /* Get queue physical ID */
2325 prxq = port->rxqs[lrxq]->id;
2326
2327 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2328 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2329 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
2330 MVPP2_RXQ_POOL_LONG_MASK);
2331
2332 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2333}
2334
2335/* Set pool number in a BM cookie */
2336static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2337{
2338 u32 bm;
2339
2340 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2341 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2342
2343 return bm;
2344}
2345
2346/* Get pool number from a BM cookie */
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002347static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese96c19042016-02-10 07:22:10 +01002348{
2349 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2350}
2351
2352/* Release buffer to BM */
2353static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002354 dma_addr_t buf_dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002355 unsigned long buf_phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002356{
Thomas Petazzoni09831762017-02-20 10:37:59 +01002357 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2358 * returned in the "cookie" field of the RX
2359 * descriptor. Instead of storing the virtual address, we
2360 * store the physical address
2361 */
2362 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002363 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002364}
2365
2366/* Refill BM pool */
2367static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002368 dma_addr_t dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002369 phys_addr_t phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002370{
2371 int pool = mvpp2_bm_cookie_pool_get(bm);
2372
Thomas Petazzoni09831762017-02-20 10:37:59 +01002373 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002374}
2375
2376/* Allocate buffers for the pool */
2377static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2378 struct mvpp2_bm_pool *bm_pool, int buf_num)
2379{
2380 int i;
Stefan Roese96c19042016-02-10 07:22:10 +01002381
2382 if (buf_num < 0 ||
2383 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2384 netdev_err(port->dev,
2385 "cannot allocate %d buffers for pool %d\n",
2386 buf_num, bm_pool->id);
2387 return 0;
2388 }
2389
Stefan Roese96c19042016-02-10 07:22:10 +01002390 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002391 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002392 (dma_addr_t)buffer_loc.rx_buffer[i],
2393 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002394
Stefan Roese96c19042016-02-10 07:22:10 +01002395 }
2396
2397 /* Update BM driver with number of buffers added to pool */
2398 bm_pool->buf_num += i;
2399 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2400
2401 return i;
2402}
2403
2404/* Notify the driver that BM pool is being used as specific type and return the
2405 * pool pointer on success
2406 */
2407static struct mvpp2_bm_pool *
2408mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2409 int pkt_size)
2410{
2411 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2412 int num;
2413
2414 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2415 netdev_err(port->dev, "mixing pool types is forbidden\n");
2416 return NULL;
2417 }
2418
2419 if (new_pool->type == MVPP2_BM_FREE)
2420 new_pool->type = type;
2421
2422 /* Allocate buffers in case BM pool is used as long pool, but packet
2423 * size doesn't match MTU or BM pool hasn't being used yet
2424 */
2425 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2426 (new_pool->pkt_size == 0)) {
2427 int pkts_num;
2428
2429 /* Set default buffer number or free all the buffers in case
2430 * the pool is not empty
2431 */
2432 pkts_num = new_pool->buf_num;
2433 if (pkts_num == 0)
2434 pkts_num = type == MVPP2_BM_SWF_LONG ?
2435 MVPP2_BM_LONG_BUF_NUM :
2436 MVPP2_BM_SHORT_BUF_NUM;
2437 else
2438 mvpp2_bm_bufs_free(NULL,
2439 port->priv, new_pool);
2440
2441 new_pool->pkt_size = pkt_size;
2442
2443 /* Allocate buffers for this pool */
2444 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2445 if (num != pkts_num) {
2446 dev_err(dev, "pool %d: %d of %d allocated\n",
2447 new_pool->id, num, pkts_num);
2448 return NULL;
2449 }
2450 }
2451
2452 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2453 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2454
2455 return new_pool;
2456}
2457
2458/* Initialize pools for swf */
2459static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2460{
2461 int rxq;
2462
2463 if (!port->pool_long) {
2464 port->pool_long =
2465 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2466 MVPP2_BM_SWF_LONG,
2467 port->pkt_size);
2468 if (!port->pool_long)
2469 return -ENOMEM;
2470
2471 port->pool_long->port_map |= (1 << port->id);
2472
2473 for (rxq = 0; rxq < rxq_number; rxq++)
2474 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2475 }
2476
2477 return 0;
2478}
2479
2480/* Port configuration routines */
2481
2482static void mvpp2_port_mii_set(struct mvpp2_port *port)
2483{
2484 u32 val;
2485
2486 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2487
2488 switch (port->phy_interface) {
2489 case PHY_INTERFACE_MODE_SGMII:
2490 val |= MVPP2_GMAC_INBAND_AN_MASK;
2491 break;
2492 case PHY_INTERFACE_MODE_RGMII:
2493 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2494 default:
2495 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2496 }
2497
2498 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2499}
2500
2501static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2502{
2503 u32 val;
2504
2505 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2506 val |= MVPP2_GMAC_FC_ADV_EN;
2507 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2508}
2509
2510static void mvpp2_port_enable(struct mvpp2_port *port)
2511{
2512 u32 val;
2513
2514 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2515 val |= MVPP2_GMAC_PORT_EN_MASK;
2516 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2517 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2518}
2519
2520static void mvpp2_port_disable(struct mvpp2_port *port)
2521{
2522 u32 val;
2523
2524 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2525 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2526 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2527}
2528
2529/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2530static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2531{
2532 u32 val;
2533
2534 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2535 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2536 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2537}
2538
2539/* Configure loopback port */
2540static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2541{
2542 u32 val;
2543
2544 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2545
2546 if (port->speed == 1000)
2547 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2548 else
2549 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2550
2551 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2552 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2553 else
2554 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2555
2556 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2557}
2558
2559static void mvpp2_port_reset(struct mvpp2_port *port)
2560{
2561 u32 val;
2562
2563 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2564 ~MVPP2_GMAC_PORT_RESET_MASK;
2565 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2566
2567 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2568 MVPP2_GMAC_PORT_RESET_MASK)
2569 continue;
2570}
2571
2572/* Change maximum receive size of the port */
2573static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2574{
2575 u32 val;
2576
2577 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2578 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2579 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2580 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2581 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2582}
2583
2584/* Set defaults to the MVPP2 port */
2585static void mvpp2_defaults_set(struct mvpp2_port *port)
2586{
2587 int tx_port_num, val, queue, ptxq, lrxq;
2588
2589 /* Configure port to loopback if needed */
2590 if (port->flags & MVPP2_F_LOOPBACK)
2591 mvpp2_port_loopback_set(port);
2592
2593 /* Update TX FIFO MIN Threshold */
2594 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2595 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2596 /* Min. TX threshold must be less than minimal packet length */
2597 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2598 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2599
2600 /* Disable Legacy WRR, Disable EJP, Release from reset */
2601 tx_port_num = mvpp2_egress_port(port);
2602 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2603 tx_port_num);
2604 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2605
2606 /* Close bandwidth for all queues */
2607 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2608 ptxq = mvpp2_txq_phys(port->id, queue);
2609 mvpp2_write(port->priv,
2610 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2611 }
2612
2613 /* Set refill period to 1 usec, refill tokens
2614 * and bucket size to maximum
2615 */
2616 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2617 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2618 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2619 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2620 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2621 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2622 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2623 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2624
2625 /* Set MaximumLowLatencyPacketSize value to 256 */
2626 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2627 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2628 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2629
2630 /* Enable Rx cache snoop */
2631 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2632 queue = port->rxqs[lrxq]->id;
2633 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2634 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2635 MVPP2_SNOOP_BUF_HDR_MASK;
2636 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2637 }
2638}
2639
2640/* Enable/disable receiving packets */
2641static void mvpp2_ingress_enable(struct mvpp2_port *port)
2642{
2643 u32 val;
2644 int lrxq, queue;
2645
2646 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2647 queue = port->rxqs[lrxq]->id;
2648 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2649 val &= ~MVPP2_RXQ_DISABLE_MASK;
2650 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2651 }
2652}
2653
2654static void mvpp2_ingress_disable(struct mvpp2_port *port)
2655{
2656 u32 val;
2657 int lrxq, queue;
2658
2659 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2660 queue = port->rxqs[lrxq]->id;
2661 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2662 val |= MVPP2_RXQ_DISABLE_MASK;
2663 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2664 }
2665}
2666
2667/* Enable transmit via physical egress queue
2668 * - HW starts take descriptors from DRAM
2669 */
2670static void mvpp2_egress_enable(struct mvpp2_port *port)
2671{
2672 u32 qmap;
2673 int queue;
2674 int tx_port_num = mvpp2_egress_port(port);
2675
2676 /* Enable all initialized TXs. */
2677 qmap = 0;
2678 for (queue = 0; queue < txq_number; queue++) {
2679 struct mvpp2_tx_queue *txq = port->txqs[queue];
2680
2681 if (txq->descs != NULL)
2682 qmap |= (1 << queue);
2683 }
2684
2685 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2686 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2687}
2688
2689/* Disable transmit via physical egress queue
2690 * - HW doesn't take descriptors from DRAM
2691 */
2692static void mvpp2_egress_disable(struct mvpp2_port *port)
2693{
2694 u32 reg_data;
2695 int delay;
2696 int tx_port_num = mvpp2_egress_port(port);
2697
2698 /* Issue stop command for active channels only */
2699 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2700 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2701 MVPP2_TXP_SCHED_ENQ_MASK;
2702 if (reg_data != 0)
2703 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2704 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2705
2706 /* Wait for all Tx activity to terminate. */
2707 delay = 0;
2708 do {
2709 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2710 netdev_warn(port->dev,
2711 "Tx stop timed out, status=0x%08x\n",
2712 reg_data);
2713 break;
2714 }
2715 mdelay(1);
2716 delay++;
2717
2718 /* Check port TX Command register that all
2719 * Tx queues are stopped
2720 */
2721 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2722 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2723}
2724
2725/* Rx descriptors helper methods */
2726
2727/* Get number of Rx descriptors occupied by received packets */
2728static inline int
2729mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2730{
2731 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2732
2733 return val & MVPP2_RXQ_OCCUPIED_MASK;
2734}
2735
2736/* Update Rx queue status with the number of occupied and available
2737 * Rx descriptor slots.
2738 */
2739static inline void
2740mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2741 int used_count, int free_count)
2742{
2743 /* Decrement the number of used descriptors and increment count
2744 * increment the number of free descriptors.
2745 */
2746 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2747
2748 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2749}
2750
2751/* Get pointer to next RX descriptor to be processed by SW */
2752static inline struct mvpp2_rx_desc *
2753mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2754{
2755 int rx_desc = rxq->next_desc_to_proc;
2756
2757 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2758 prefetch(rxq->descs + rxq->next_desc_to_proc);
2759 return rxq->descs + rx_desc;
2760}
2761
2762/* Set rx queue offset */
2763static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2764 int prxq, int offset)
2765{
2766 u32 val;
2767
2768 /* Convert offset from bytes to units of 32 bytes */
2769 offset = offset >> 5;
2770
2771 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2772 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2773
2774 /* Offset is in */
2775 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2776 MVPP2_RXQ_PACKET_OFFSET_MASK);
2777
2778 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2779}
2780
2781/* Obtain BM cookie information from descriptor */
2782static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
2783{
2784 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2785 MVPP2_RXD_BM_POOL_ID_OFFS;
2786 int cpu = smp_processor_id();
2787
2788 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
2789 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
2790}
2791
2792/* Tx descriptors helper methods */
2793
2794/* Get number of Tx descriptors waiting to be transmitted by HW */
2795static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
2796 struct mvpp2_tx_queue *txq)
2797{
2798 u32 val;
2799
2800 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
2801 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
2802
2803 return val & MVPP2_TXQ_PENDING_MASK;
2804}
2805
2806/* Get pointer to next Tx descriptor to be processed (send) by HW */
2807static struct mvpp2_tx_desc *
2808mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2809{
2810 int tx_desc = txq->next_desc_to_proc;
2811
2812 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2813 return txq->descs + tx_desc;
2814}
2815
2816/* Update HW with number of aggregated Tx descriptors to be sent */
2817static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2818{
2819 /* aggregated access - relevant TXQ number is written in TX desc */
2820 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2821}
2822
2823/* Get number of sent descriptors and decrement counter.
2824 * The number of sent descriptors is returned.
2825 * Per-CPU access
2826 */
2827static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2828 struct mvpp2_tx_queue *txq)
2829{
2830 u32 val;
2831
2832 /* Reading status reg resets transmitted descriptor counter */
2833 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
2834
2835 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2836 MVPP2_TRANSMITTED_COUNT_OFFSET;
2837}
2838
2839static void mvpp2_txq_sent_counter_clear(void *arg)
2840{
2841 struct mvpp2_port *port = arg;
2842 int queue;
2843
2844 for (queue = 0; queue < txq_number; queue++) {
2845 int id = port->txqs[queue]->id;
2846
2847 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
2848 }
2849}
2850
2851/* Set max sizes for Tx queues */
2852static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2853{
2854 u32 val, size, mtu;
2855 int txq, tx_port_num;
2856
2857 mtu = port->pkt_size * 8;
2858 if (mtu > MVPP2_TXP_MTU_MAX)
2859 mtu = MVPP2_TXP_MTU_MAX;
2860
2861 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2862 mtu = 3 * mtu;
2863
2864 /* Indirect access to registers */
2865 tx_port_num = mvpp2_egress_port(port);
2866 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2867
2868 /* Set MTU */
2869 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2870 val &= ~MVPP2_TXP_MTU_MAX;
2871 val |= mtu;
2872 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2873
2874 /* TXP token size and all TXQs token size must be larger that MTU */
2875 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2876 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2877 if (size < mtu) {
2878 size = mtu;
2879 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2880 val |= size;
2881 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2882 }
2883
2884 for (txq = 0; txq < txq_number; txq++) {
2885 val = mvpp2_read(port->priv,
2886 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2887 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2888
2889 if (size < mtu) {
2890 size = mtu;
2891 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2892 val |= size;
2893 mvpp2_write(port->priv,
2894 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2895 val);
2896 }
2897 }
2898}
2899
2900/* Free Tx queue skbuffs */
2901static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2902 struct mvpp2_tx_queue *txq,
2903 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2904{
2905 int i;
2906
2907 for (i = 0; i < num; i++)
2908 mvpp2_txq_inc_get(txq_pcpu);
2909}
2910
2911static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2912 u32 cause)
2913{
2914 int queue = fls(cause) - 1;
2915
2916 return port->rxqs[queue];
2917}
2918
2919static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2920 u32 cause)
2921{
2922 int queue = fls(cause) - 1;
2923
2924 return port->txqs[queue];
2925}
2926
2927/* Rx/Tx queue initialization/cleanup methods */
2928
2929/* Allocate and initialize descriptors for aggr TXQ */
2930static int mvpp2_aggr_txq_init(struct udevice *dev,
2931 struct mvpp2_tx_queue *aggr_txq,
2932 int desc_num, int cpu,
2933 struct mvpp2 *priv)
2934{
2935 /* Allocate memory for TX descriptors */
2936 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002937 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01002938 if (!aggr_txq->descs)
2939 return -ENOMEM;
2940
2941 /* Make sure descriptor address is cache line size aligned */
2942 BUG_ON(aggr_txq->descs !=
2943 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
2944
2945 aggr_txq->last_desc = aggr_txq->size - 1;
2946
2947 /* Aggr TXQ no reset WA */
2948 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2949 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
2950
2951 /* Set Tx descriptors queue starting address */
2952 /* indirect access */
2953 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002954 aggr_txq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01002955 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
2956
2957 return 0;
2958}
2959
2960/* Create a specified Rx queue */
2961static int mvpp2_rxq_init(struct mvpp2_port *port,
2962 struct mvpp2_rx_queue *rxq)
2963
2964{
2965 rxq->size = port->rx_ring_size;
2966
2967 /* Allocate memory for RX descriptors */
2968 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002969 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01002970 if (!rxq->descs)
2971 return -ENOMEM;
2972
2973 BUG_ON(rxq->descs !=
2974 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
2975
2976 rxq->last_desc = rxq->size - 1;
2977
2978 /* Zero occupied and non-occupied counters - direct access */
2979 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2980
2981 /* Set Rx descriptors queue starting address - indirect access */
2982 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002983 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01002984 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2985 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
2986
2987 /* Set Offset */
2988 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2989
2990 /* Add number of descriptors ready for receiving packets */
2991 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2992
2993 return 0;
2994}
2995
2996/* Push packets received by the RXQ to BM pool */
2997static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2998 struct mvpp2_rx_queue *rxq)
2999{
3000 int rx_received, i;
3001
3002 rx_received = mvpp2_rxq_received(port, rxq->id);
3003 if (!rx_received)
3004 return;
3005
3006 for (i = 0; i < rx_received; i++) {
3007 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3008 u32 bm = mvpp2_bm_cookie_build(rx_desc);
3009
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003010 mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr,
Stefan Roese96c19042016-02-10 07:22:10 +01003011 rx_desc->buf_cookie);
3012 }
3013 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3014}
3015
3016/* Cleanup Rx queue */
3017static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3018 struct mvpp2_rx_queue *rxq)
3019{
3020 mvpp2_rxq_drop_pkts(port, rxq);
3021
3022 rxq->descs = NULL;
3023 rxq->last_desc = 0;
3024 rxq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003025 rxq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003026
3027 /* Clear Rx descriptors queue starting address and size;
3028 * free descriptor number
3029 */
3030 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3031 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3032 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3033 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3034}
3035
3036/* Create and initialize a Tx queue */
3037static int mvpp2_txq_init(struct mvpp2_port *port,
3038 struct mvpp2_tx_queue *txq)
3039{
3040 u32 val;
3041 int cpu, desc, desc_per_txq, tx_port_num;
3042 struct mvpp2_txq_pcpu *txq_pcpu;
3043
3044 txq->size = port->tx_ring_size;
3045
3046 /* Allocate memory for Tx descriptors */
3047 txq->descs = buffer_loc.tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003048 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003049 if (!txq->descs)
3050 return -ENOMEM;
3051
3052 /* Make sure descriptor address is cache line size aligned */
3053 BUG_ON(txq->descs !=
3054 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3055
3056 txq->last_desc = txq->size - 1;
3057
3058 /* Set Tx descriptors queue starting address - indirect access */
3059 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003060 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003061 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3062 MVPP2_TXQ_DESC_SIZE_MASK);
3063 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3064 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3065 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3066 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3067 val &= ~MVPP2_TXQ_PENDING_MASK;
3068 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3069
3070 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3071 * for each existing TXQ.
3072 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3073 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3074 */
3075 desc_per_txq = 16;
3076 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3077 (txq->log_id * desc_per_txq);
3078
3079 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3080 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3081 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
3082
3083 /* WRR / EJP configuration - indirect access */
3084 tx_port_num = mvpp2_egress_port(port);
3085 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3086
3087 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3088 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3089 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3090 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3091 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3092
3093 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3094 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3095 val);
3096
3097 for_each_present_cpu(cpu) {
3098 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3099 txq_pcpu->size = txq->size;
3100 }
3101
3102 return 0;
3103}
3104
3105/* Free allocated TXQ resources */
3106static void mvpp2_txq_deinit(struct mvpp2_port *port,
3107 struct mvpp2_tx_queue *txq)
3108{
3109 txq->descs = NULL;
3110 txq->last_desc = 0;
3111 txq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003112 txq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003113
3114 /* Set minimum bandwidth for disabled TXQs */
3115 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3116
3117 /* Set Tx descriptors queue starting address and size */
3118 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3119 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3120 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3121}
3122
3123/* Cleanup Tx ports */
3124static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3125{
3126 struct mvpp2_txq_pcpu *txq_pcpu;
3127 int delay, pending, cpu;
3128 u32 val;
3129
3130 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3131 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3132 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3133 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3134
3135 /* The napi queue has been stopped so wait for all packets
3136 * to be transmitted.
3137 */
3138 delay = 0;
3139 do {
3140 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3141 netdev_warn(port->dev,
3142 "port %d: cleaning queue %d timed out\n",
3143 port->id, txq->log_id);
3144 break;
3145 }
3146 mdelay(1);
3147 delay++;
3148
3149 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3150 } while (pending);
3151
3152 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3153 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3154
3155 for_each_present_cpu(cpu) {
3156 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3157
3158 /* Release all packets */
3159 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3160
3161 /* Reset queue */
3162 txq_pcpu->count = 0;
3163 txq_pcpu->txq_put_index = 0;
3164 txq_pcpu->txq_get_index = 0;
3165 }
3166}
3167
3168/* Cleanup all Tx queues */
3169static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3170{
3171 struct mvpp2_tx_queue *txq;
3172 int queue;
3173 u32 val;
3174
3175 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3176
3177 /* Reset Tx ports and delete Tx queues */
3178 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3179 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3180
3181 for (queue = 0; queue < txq_number; queue++) {
3182 txq = port->txqs[queue];
3183 mvpp2_txq_clean(port, txq);
3184 mvpp2_txq_deinit(port, txq);
3185 }
3186
3187 mvpp2_txq_sent_counter_clear(port);
3188
3189 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3190 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3191}
3192
3193/* Cleanup all Rx queues */
3194static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3195{
3196 int queue;
3197
3198 for (queue = 0; queue < rxq_number; queue++)
3199 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3200}
3201
3202/* Init all Rx queues for port */
3203static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3204{
3205 int queue, err;
3206
3207 for (queue = 0; queue < rxq_number; queue++) {
3208 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3209 if (err)
3210 goto err_cleanup;
3211 }
3212 return 0;
3213
3214err_cleanup:
3215 mvpp2_cleanup_rxqs(port);
3216 return err;
3217}
3218
3219/* Init all tx queues for port */
3220static int mvpp2_setup_txqs(struct mvpp2_port *port)
3221{
3222 struct mvpp2_tx_queue *txq;
3223 int queue, err;
3224
3225 for (queue = 0; queue < txq_number; queue++) {
3226 txq = port->txqs[queue];
3227 err = mvpp2_txq_init(port, txq);
3228 if (err)
3229 goto err_cleanup;
3230 }
3231
3232 mvpp2_txq_sent_counter_clear(port);
3233 return 0;
3234
3235err_cleanup:
3236 mvpp2_cleanup_txqs(port);
3237 return err;
3238}
3239
3240/* Adjust link */
3241static void mvpp2_link_event(struct mvpp2_port *port)
3242{
3243 struct phy_device *phydev = port->phy_dev;
3244 int status_change = 0;
3245 u32 val;
3246
3247 if (phydev->link) {
3248 if ((port->speed != phydev->speed) ||
3249 (port->duplex != phydev->duplex)) {
3250 u32 val;
3251
3252 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3253 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3254 MVPP2_GMAC_CONFIG_GMII_SPEED |
3255 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3256 MVPP2_GMAC_AN_SPEED_EN |
3257 MVPP2_GMAC_AN_DUPLEX_EN);
3258
3259 if (phydev->duplex)
3260 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3261
3262 if (phydev->speed == SPEED_1000)
3263 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3264 else if (phydev->speed == SPEED_100)
3265 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3266
3267 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3268
3269 port->duplex = phydev->duplex;
3270 port->speed = phydev->speed;
3271 }
3272 }
3273
3274 if (phydev->link != port->link) {
3275 if (!phydev->link) {
3276 port->duplex = -1;
3277 port->speed = 0;
3278 }
3279
3280 port->link = phydev->link;
3281 status_change = 1;
3282 }
3283
3284 if (status_change) {
3285 if (phydev->link) {
3286 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3287 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3288 MVPP2_GMAC_FORCE_LINK_DOWN);
3289 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3290 mvpp2_egress_enable(port);
3291 mvpp2_ingress_enable(port);
3292 } else {
3293 mvpp2_ingress_disable(port);
3294 mvpp2_egress_disable(port);
3295 }
3296 }
3297}
3298
3299/* Main RX/TX processing routines */
3300
3301/* Display more error info */
3302static void mvpp2_rx_error(struct mvpp2_port *port,
3303 struct mvpp2_rx_desc *rx_desc)
3304{
3305 u32 status = rx_desc->status;
3306
3307 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3308 case MVPP2_RXD_ERR_CRC:
3309 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
3310 status, rx_desc->data_size);
3311 break;
3312 case MVPP2_RXD_ERR_OVERRUN:
3313 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
3314 status, rx_desc->data_size);
3315 break;
3316 case MVPP2_RXD_ERR_RESOURCE:
3317 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
3318 status, rx_desc->data_size);
3319 break;
3320 }
3321}
3322
3323/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3324static int mvpp2_rx_refill(struct mvpp2_port *port,
3325 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003326 u32 bm, dma_addr_t dma_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01003327{
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003328 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01003329 return 0;
3330}
3331
3332/* Set hw internals when starting port */
3333static void mvpp2_start_dev(struct mvpp2_port *port)
3334{
3335 mvpp2_gmac_max_rx_size_set(port);
3336 mvpp2_txp_max_tx_size_set(port);
3337
3338 mvpp2_port_enable(port);
3339}
3340
3341/* Set hw internals when stopping port */
3342static void mvpp2_stop_dev(struct mvpp2_port *port)
3343{
3344 /* Stop new packets from arriving to RXQs */
3345 mvpp2_ingress_disable(port);
3346
3347 mvpp2_egress_disable(port);
3348 mvpp2_port_disable(port);
3349}
3350
3351static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3352{
3353 struct phy_device *phy_dev;
3354
3355 if (!port->init || port->link == 0) {
3356 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3357 port->phy_interface);
3358 port->phy_dev = phy_dev;
3359 if (!phy_dev) {
3360 netdev_err(port->dev, "cannot connect to phy\n");
3361 return -ENODEV;
3362 }
3363 phy_dev->supported &= PHY_GBIT_FEATURES;
3364 phy_dev->advertising = phy_dev->supported;
3365
3366 port->phy_dev = phy_dev;
3367 port->link = 0;
3368 port->duplex = 0;
3369 port->speed = 0;
3370
3371 phy_config(phy_dev);
3372 phy_startup(phy_dev);
3373 if (!phy_dev->link) {
3374 printf("%s: No link\n", phy_dev->dev->name);
3375 return -1;
3376 }
3377
3378 port->init = 1;
3379 } else {
3380 mvpp2_egress_enable(port);
3381 mvpp2_ingress_enable(port);
3382 }
3383
3384 return 0;
3385}
3386
3387static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3388{
3389 unsigned char mac_bcast[ETH_ALEN] = {
3390 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3391 int err;
3392
3393 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3394 if (err) {
3395 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3396 return err;
3397 }
3398 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3399 port->dev_addr, true);
3400 if (err) {
3401 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3402 return err;
3403 }
3404 err = mvpp2_prs_def_flow(port);
3405 if (err) {
3406 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3407 return err;
3408 }
3409
3410 /* Allocate the Rx/Tx queues */
3411 err = mvpp2_setup_rxqs(port);
3412 if (err) {
3413 netdev_err(port->dev, "cannot allocate Rx queues\n");
3414 return err;
3415 }
3416
3417 err = mvpp2_setup_txqs(port);
3418 if (err) {
3419 netdev_err(port->dev, "cannot allocate Tx queues\n");
3420 return err;
3421 }
3422
3423 err = mvpp2_phy_connect(dev, port);
3424 if (err < 0)
3425 return err;
3426
3427 mvpp2_link_event(port);
3428
3429 mvpp2_start_dev(port);
3430
3431 return 0;
3432}
3433
3434/* No Device ops here in U-Boot */
3435
3436/* Driver initialization */
3437
3438static void mvpp2_port_power_up(struct mvpp2_port *port)
3439{
3440 mvpp2_port_mii_set(port);
3441 mvpp2_port_periodic_xon_disable(port);
3442 mvpp2_port_fc_adv_enable(port);
3443 mvpp2_port_reset(port);
3444}
3445
3446/* Initialize port HW */
3447static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3448{
3449 struct mvpp2 *priv = port->priv;
3450 struct mvpp2_txq_pcpu *txq_pcpu;
3451 int queue, cpu, err;
3452
3453 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3454 return -EINVAL;
3455
3456 /* Disable port */
3457 mvpp2_egress_disable(port);
3458 mvpp2_port_disable(port);
3459
3460 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3461 GFP_KERNEL);
3462 if (!port->txqs)
3463 return -ENOMEM;
3464
3465 /* Associate physical Tx queues to this port and initialize.
3466 * The mapping is predefined.
3467 */
3468 for (queue = 0; queue < txq_number; queue++) {
3469 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3470 struct mvpp2_tx_queue *txq;
3471
3472 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3473 if (!txq)
3474 return -ENOMEM;
3475
3476 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3477 GFP_KERNEL);
3478 if (!txq->pcpu)
3479 return -ENOMEM;
3480
3481 txq->id = queue_phy_id;
3482 txq->log_id = queue;
3483 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3484 for_each_present_cpu(cpu) {
3485 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3486 txq_pcpu->cpu = cpu;
3487 }
3488
3489 port->txqs[queue] = txq;
3490 }
3491
3492 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3493 GFP_KERNEL);
3494 if (!port->rxqs)
3495 return -ENOMEM;
3496
3497 /* Allocate and initialize Rx queue for this port */
3498 for (queue = 0; queue < rxq_number; queue++) {
3499 struct mvpp2_rx_queue *rxq;
3500
3501 /* Map physical Rx queue to port's logical Rx queue */
3502 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3503 if (!rxq)
3504 return -ENOMEM;
3505 /* Map this Rx queue to a physical queue */
3506 rxq->id = port->first_rxq + queue;
3507 rxq->port = port->id;
3508 rxq->logic_rxq = queue;
3509
3510 port->rxqs[queue] = rxq;
3511 }
3512
3513 /* Configure Rx queue group interrupt for this port */
3514 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3515
3516 /* Create Rx descriptor rings */
3517 for (queue = 0; queue < rxq_number; queue++) {
3518 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3519
3520 rxq->size = port->rx_ring_size;
3521 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3522 rxq->time_coal = MVPP2_RX_COAL_USEC;
3523 }
3524
3525 mvpp2_ingress_disable(port);
3526
3527 /* Port default configuration */
3528 mvpp2_defaults_set(port);
3529
3530 /* Port's classifier configuration */
3531 mvpp2_cls_oversize_rxq_set(port);
3532 mvpp2_cls_port_config(port);
3533
3534 /* Provide an initial Rx packet size */
3535 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3536
3537 /* Initialize pools for swf */
3538 err = mvpp2_swf_bm_pool_init(port);
3539 if (err)
3540 return err;
3541
3542 return 0;
3543}
3544
3545/* Ports initialization */
3546static int mvpp2_port_probe(struct udevice *dev,
3547 struct mvpp2_port *port,
3548 int port_node,
3549 struct mvpp2 *priv,
3550 int *next_first_rxq)
3551{
3552 int phy_node;
3553 u32 id;
3554 u32 phyaddr;
3555 const char *phy_mode_str;
3556 int phy_mode = -1;
3557 int priv_common_regs_num = 2;
3558 int err;
3559
3560 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3561 if (phy_node < 0) {
3562 dev_err(&pdev->dev, "missing phy\n");
3563 return -ENODEV;
3564 }
3565
3566 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3567 if (phy_mode_str)
3568 phy_mode = phy_get_interface_by_name(phy_mode_str);
3569 if (phy_mode == -1) {
3570 dev_err(&pdev->dev, "incorrect phy mode\n");
3571 return -EINVAL;
3572 }
3573
3574 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3575 if (id == -1) {
3576 dev_err(&pdev->dev, "missing port-id value\n");
3577 return -EINVAL;
3578 }
3579
3580 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3581
3582 port->priv = priv;
3583 port->id = id;
3584 port->first_rxq = *next_first_rxq;
3585 port->phy_node = phy_node;
3586 port->phy_interface = phy_mode;
3587 port->phyaddr = phyaddr;
3588
3589 port->base = (void __iomem *)dev_get_addr_index(dev->parent,
3590 priv_common_regs_num
3591 + id);
3592 if (IS_ERR(port->base))
3593 return PTR_ERR(port->base);
3594
3595 port->tx_ring_size = MVPP2_MAX_TXD;
3596 port->rx_ring_size = MVPP2_MAX_RXD;
3597
3598 err = mvpp2_port_init(dev, port);
3599 if (err < 0) {
3600 dev_err(&pdev->dev, "failed to init port %d\n", id);
3601 return err;
3602 }
3603 mvpp2_port_power_up(port);
3604
3605 /* Increment the first Rx queue number to be used by the next port */
3606 *next_first_rxq += CONFIG_MV_ETH_RXQ;
3607 priv->port_list[id] = port;
3608 return 0;
3609}
3610
3611/* Initialize decoding windows */
3612static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3613 struct mvpp2 *priv)
3614{
3615 u32 win_enable;
3616 int i;
3617
3618 for (i = 0; i < 6; i++) {
3619 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3620 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3621
3622 if (i < 4)
3623 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3624 }
3625
3626 win_enable = 0;
3627
3628 for (i = 0; i < dram->num_cs; i++) {
3629 const struct mbus_dram_window *cs = dram->cs + i;
3630
3631 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3632 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3633 dram->mbus_dram_target_id);
3634
3635 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3636 (cs->size - 1) & 0xffff0000);
3637
3638 win_enable |= (1 << i);
3639 }
3640
3641 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3642}
3643
3644/* Initialize Rx FIFO's */
3645static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3646{
3647 int port;
3648
3649 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3650 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3651 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3652 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3653 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3654 }
3655
3656 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3657 MVPP2_RX_FIFO_PORT_MIN_PKT);
3658 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3659}
3660
3661/* Initialize network controller common part HW */
3662static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3663{
3664 const struct mbus_dram_target_info *dram_target_info;
3665 int err, i;
3666 u32 val;
3667
3668 /* Checks for hardware constraints (U-Boot uses only one rxq) */
3669 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3670 dev_err(&pdev->dev, "invalid queue size parameter\n");
3671 return -EINVAL;
3672 }
3673
3674 /* MBUS windows configuration */
3675 dram_target_info = mvebu_mbus_dram_info();
3676 if (dram_target_info)
3677 mvpp2_conf_mbus_windows(dram_target_info, priv);
3678
3679 /* Disable HW PHY polling */
3680 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3681 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3682 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3683
3684 /* Allocate and initialize aggregated TXQs */
3685 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3686 sizeof(struct mvpp2_tx_queue),
3687 GFP_KERNEL);
3688 if (!priv->aggr_txqs)
3689 return -ENOMEM;
3690
3691 for_each_present_cpu(i) {
3692 priv->aggr_txqs[i].id = i;
3693 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
3694 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
3695 MVPP2_AGGR_TXQ_SIZE, i, priv);
3696 if (err < 0)
3697 return err;
3698 }
3699
3700 /* Rx Fifo Init */
3701 mvpp2_rx_fifo_init(priv);
3702
3703 /* Reset Rx queue group interrupt configuration */
3704 for (i = 0; i < MVPP2_MAX_PORTS; i++)
3705 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
3706 CONFIG_MV_ETH_RXQ);
3707
3708 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
3709 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
3710
3711 /* Allow cache snoop when transmiting packets */
3712 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
3713
3714 /* Buffer Manager initialization */
3715 err = mvpp2_bm_init(dev, priv);
3716 if (err < 0)
3717 return err;
3718
3719 /* Parser default initialization */
3720 err = mvpp2_prs_default_init(dev, priv);
3721 if (err < 0)
3722 return err;
3723
3724 /* Classifier default initialization */
3725 mvpp2_cls_init(priv);
3726
3727 return 0;
3728}
3729
3730/* SMI / MDIO functions */
3731
3732static int smi_wait_ready(struct mvpp2 *priv)
3733{
3734 u32 timeout = MVPP2_SMI_TIMEOUT;
3735 u32 smi_reg;
3736
3737 /* wait till the SMI is not busy */
3738 do {
3739 /* read smi register */
3740 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3741 if (timeout-- == 0) {
3742 printf("Error: SMI busy timeout\n");
3743 return -EFAULT;
3744 }
3745 } while (smi_reg & MVPP2_SMI_BUSY);
3746
3747 return 0;
3748}
3749
3750/*
3751 * mpp2_mdio_read - miiphy_read callback function.
3752 *
3753 * Returns 16bit phy register value, or 0xffff on error
3754 */
3755static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
3756{
3757 struct mvpp2 *priv = bus->priv;
3758 u32 smi_reg;
3759 u32 timeout;
3760
3761 /* check parameters */
3762 if (addr > MVPP2_PHY_ADDR_MASK) {
3763 printf("Error: Invalid PHY address %d\n", addr);
3764 return -EFAULT;
3765 }
3766
3767 if (reg > MVPP2_PHY_REG_MASK) {
3768 printf("Err: Invalid register offset %d\n", reg);
3769 return -EFAULT;
3770 }
3771
3772 /* wait till the SMI is not busy */
3773 if (smi_wait_ready(priv) < 0)
3774 return -EFAULT;
3775
3776 /* fill the phy address and regiser offset and read opcode */
3777 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3778 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
3779 | MVPP2_SMI_OPCODE_READ;
3780
3781 /* write the smi register */
3782 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3783
3784 /* wait till read value is ready */
3785 timeout = MVPP2_SMI_TIMEOUT;
3786
3787 do {
3788 /* read smi register */
3789 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3790 if (timeout-- == 0) {
3791 printf("Err: SMI read ready timeout\n");
3792 return -EFAULT;
3793 }
3794 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
3795
3796 /* Wait for the data to update in the SMI register */
3797 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
3798 ;
3799
3800 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
3801}
3802
3803/*
3804 * mpp2_mdio_write - miiphy_write callback function.
3805 *
3806 * Returns 0 if write succeed, -EINVAL on bad parameters
3807 * -ETIME on timeout
3808 */
3809static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
3810 u16 value)
3811{
3812 struct mvpp2 *priv = bus->priv;
3813 u32 smi_reg;
3814
3815 /* check parameters */
3816 if (addr > MVPP2_PHY_ADDR_MASK) {
3817 printf("Error: Invalid PHY address %d\n", addr);
3818 return -EFAULT;
3819 }
3820
3821 if (reg > MVPP2_PHY_REG_MASK) {
3822 printf("Err: Invalid register offset %d\n", reg);
3823 return -EFAULT;
3824 }
3825
3826 /* wait till the SMI is not busy */
3827 if (smi_wait_ready(priv) < 0)
3828 return -EFAULT;
3829
3830 /* fill the phy addr and reg offset and write opcode and data */
3831 smi_reg = value << MVPP2_SMI_DATA_OFFS;
3832 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3833 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
3834 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
3835
3836 /* write the smi register */
3837 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3838
3839 return 0;
3840}
3841
3842static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
3843{
3844 struct mvpp2_port *port = dev_get_priv(dev);
3845 struct mvpp2_rx_desc *rx_desc;
3846 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003847 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01003848 u32 bm, rx_status;
3849 int pool, rx_bytes, err;
3850 int rx_received;
3851 struct mvpp2_rx_queue *rxq;
3852 u32 cause_rx_tx, cause_rx, cause_misc;
3853 u8 *data;
3854
3855 cause_rx_tx = mvpp2_read(port->priv,
3856 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3857 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3858 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3859 if (!cause_rx_tx && !cause_misc)
3860 return 0;
3861
3862 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
3863
3864 /* Process RX packets */
3865 cause_rx |= port->pending_cause_rx;
3866 rxq = mvpp2_get_rx_queue(port, cause_rx);
3867
3868 /* Get number of received packets and clamp the to-do */
3869 rx_received = mvpp2_rxq_received(port, rxq->id);
3870
3871 /* Return if no packets are received */
3872 if (!rx_received)
3873 return 0;
3874
3875 rx_desc = mvpp2_rxq_next_desc_get(rxq);
3876 rx_status = rx_desc->status;
3877 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003878 dma_addr = rx_desc->buf_dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01003879
3880 bm = mvpp2_bm_cookie_build(rx_desc);
3881 pool = mvpp2_bm_cookie_pool_get(bm);
3882 bm_pool = &port->priv->bm_pools[pool];
3883
Stefan Roese96c19042016-02-10 07:22:10 +01003884 /* In case of an error, release the requested buffer pointer
3885 * to the Buffer Manager. This request process is controlled
3886 * by the hardware, and the information about the buffer is
3887 * comprised by the RX descriptor.
3888 */
3889 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
3890 mvpp2_rx_error(port, rx_desc);
3891 /* Return the buffer to the pool */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003892 mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr,
Stefan Roese96c19042016-02-10 07:22:10 +01003893 rx_desc->buf_cookie);
3894 return 0;
3895 }
3896
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003897 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01003898 if (err) {
3899 netdev_err(port->dev, "failed to refill BM pools\n");
3900 return 0;
3901 }
3902
3903 /* Update Rx queue management counters */
3904 mb();
3905 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
3906
3907 /* give packet to stack - skip on first n bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003908 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese96c19042016-02-10 07:22:10 +01003909
3910 if (rx_bytes <= 0)
3911 return 0;
3912
3913 /*
3914 * No cache invalidation needed here, since the rx_buffer's are
3915 * located in a uncached memory region
3916 */
3917 *packetp = data;
3918
3919 return rx_bytes;
3920}
3921
3922/* Drain Txq */
3923static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3924 int enable)
3925{
3926 u32 val;
3927
3928 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3929 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3930 if (enable)
3931 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3932 else
3933 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3934 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3935}
3936
3937static int mvpp2_send(struct udevice *dev, void *packet, int length)
3938{
3939 struct mvpp2_port *port = dev_get_priv(dev);
3940 struct mvpp2_tx_queue *txq, *aggr_txq;
3941 struct mvpp2_tx_desc *tx_desc;
3942 int tx_done;
3943 int timeout;
3944
3945 txq = port->txqs[0];
3946 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
3947
3948 /* Get a descriptor for the first part of the packet */
3949 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3950 tx_desc->phys_txq = txq->id;
3951 tx_desc->data_size = length;
Stefan Roesefeb0b332017-02-15 12:46:18 +01003952 tx_desc->packet_offset = (unsigned long)packet & MVPP2_TX_DESC_ALIGN;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003953 tx_desc->buf_dma_addr = (unsigned long)packet & ~MVPP2_TX_DESC_ALIGN;
Stefan Roese96c19042016-02-10 07:22:10 +01003954 /* First and Last descriptor */
3955 tx_desc->command = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
3956 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3957
3958 /* Flush tx data */
Stefan Roeseb4268e22017-02-16 13:58:37 +01003959 flush_dcache_range((unsigned long)packet,
3960 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese96c19042016-02-10 07:22:10 +01003961
3962 /* Enable transmit */
3963 mb();
3964 mvpp2_aggr_txq_pend_desc_add(port, 1);
3965
3966 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3967
3968 timeout = 0;
3969 do {
3970 if (timeout++ > 10000) {
3971 printf("timeout: packet not sent from aggregated to phys TXQ\n");
3972 return 0;
3973 }
3974 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
3975 } while (tx_done);
3976
3977 /* Enable TXQ drain */
3978 mvpp2_txq_drain(port, txq, 1);
3979
3980 timeout = 0;
3981 do {
3982 if (timeout++ > 10000) {
3983 printf("timeout: packet not sent\n");
3984 return 0;
3985 }
3986 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
3987 } while (!tx_done);
3988
3989 /* Disable TXQ drain */
3990 mvpp2_txq_drain(port, txq, 0);
3991
3992 return 0;
3993}
3994
3995static int mvpp2_start(struct udevice *dev)
3996{
3997 struct eth_pdata *pdata = dev_get_platdata(dev);
3998 struct mvpp2_port *port = dev_get_priv(dev);
3999
4000 /* Load current MAC address */
4001 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4002
4003 /* Reconfigure parser accept the original MAC address */
4004 mvpp2_prs_update_mac_da(port, port->dev_addr);
4005
4006 mvpp2_port_power_up(port);
4007
4008 mvpp2_open(dev, port);
4009
4010 return 0;
4011}
4012
4013static void mvpp2_stop(struct udevice *dev)
4014{
4015 struct mvpp2_port *port = dev_get_priv(dev);
4016
4017 mvpp2_stop_dev(port);
4018 mvpp2_cleanup_rxqs(port);
4019 mvpp2_cleanup_txqs(port);
4020}
4021
4022static int mvpp2_probe(struct udevice *dev)
4023{
4024 struct mvpp2_port *port = dev_get_priv(dev);
4025 struct mvpp2 *priv = dev_get_priv(dev->parent);
4026 int err;
4027
4028 /* Initialize network controller */
4029 err = mvpp2_init(dev, priv);
4030 if (err < 0) {
4031 dev_err(&pdev->dev, "failed to initialize controller\n");
4032 return err;
4033 }
4034
Simon Glassdd79d6e2017-01-17 16:52:55 -07004035 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv,
Stefan Roese96c19042016-02-10 07:22:10 +01004036 &buffer_loc.first_rxq);
4037}
4038
4039static const struct eth_ops mvpp2_ops = {
4040 .start = mvpp2_start,
4041 .send = mvpp2_send,
4042 .recv = mvpp2_recv,
4043 .stop = mvpp2_stop,
4044};
4045
4046static struct driver mvpp2_driver = {
4047 .name = "mvpp2",
4048 .id = UCLASS_ETH,
4049 .probe = mvpp2_probe,
4050 .ops = &mvpp2_ops,
4051 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4052 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4053};
4054
4055/*
4056 * Use a MISC device to bind the n instances (child nodes) of the
4057 * network base controller in UCLASS_ETH.
4058 */
4059static int mvpp2_base_probe(struct udevice *dev)
4060{
4061 struct mvpp2 *priv = dev_get_priv(dev);
4062 struct mii_dev *bus;
4063 void *bd_space;
4064 u32 size = 0;
4065 int i;
4066
4067 /*
4068 * U-Boot special buffer handling:
4069 *
4070 * Allocate buffer area for descs and rx_buffers. This is only
4071 * done once for all interfaces. As only one interface can
4072 * be active. Make this area DMA-safe by disabling the D-cache
4073 */
4074
4075 /* Align buffer area for descs and rx_buffers to 1MiB */
4076 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesefeb0b332017-02-15 12:46:18 +01004077 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4078 BD_SPACE, DCACHE_OFF);
Stefan Roese96c19042016-02-10 07:22:10 +01004079
4080 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4081 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4082
Stefan Roesefeb0b332017-02-15 12:46:18 +01004083 buffer_loc.tx_descs =
4084 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004085 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4086
Stefan Roesefeb0b332017-02-15 12:46:18 +01004087 buffer_loc.rx_descs =
4088 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004089 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4090
4091 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004092 buffer_loc.bm_pool[i] =
4093 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004094 size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32);
4095 }
4096
4097 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004098 buffer_loc.rx_buffer[i] =
4099 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004100 size += RX_BUFFER_SIZE;
4101 }
4102
4103 /* Save base addresses for later use */
4104 priv->base = (void *)dev_get_addr_index(dev, 0);
4105 if (IS_ERR(priv->base))
4106 return PTR_ERR(priv->base);
4107
4108 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4109 if (IS_ERR(priv->lms_base))
4110 return PTR_ERR(priv->lms_base);
4111
4112 /* Finally create and register the MDIO bus driver */
4113 bus = mdio_alloc();
4114 if (!bus) {
4115 printf("Failed to allocate MDIO bus\n");
4116 return -ENOMEM;
4117 }
4118
4119 bus->read = mpp2_mdio_read;
4120 bus->write = mpp2_mdio_write;
4121 snprintf(bus->name, sizeof(bus->name), dev->name);
4122 bus->priv = (void *)priv;
4123 priv->bus = bus;
4124
4125 return mdio_register(bus);
4126}
4127
4128static int mvpp2_base_bind(struct udevice *parent)
4129{
4130 const void *blob = gd->fdt_blob;
Simon Glassdd79d6e2017-01-17 16:52:55 -07004131 int node = dev_of_offset(parent);
Stefan Roese96c19042016-02-10 07:22:10 +01004132 struct uclass_driver *drv;
4133 struct udevice *dev;
4134 struct eth_pdata *plat;
4135 char *name;
4136 int subnode;
4137 u32 id;
4138
4139 /* Lookup eth driver */
4140 drv = lists_uclass_lookup(UCLASS_ETH);
4141 if (!drv) {
4142 puts("Cannot find eth driver\n");
4143 return -ENOENT;
4144 }
4145
Simon Glass499c29e2016-10-02 17:59:29 -06004146 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roese96c19042016-02-10 07:22:10 +01004147 /* Skip disabled ports */
4148 if (!fdtdec_get_is_enabled(blob, subnode))
4149 continue;
4150
4151 plat = calloc(1, sizeof(*plat));
4152 if (!plat)
4153 return -ENOMEM;
4154
4155 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4156
4157 name = calloc(1, 16);
4158 sprintf(name, "mvpp2-%d", id);
4159
4160 /* Create child device UCLASS_ETH and bind it */
4161 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glassdd79d6e2017-01-17 16:52:55 -07004162 dev_set_of_offset(dev, subnode);
Stefan Roese96c19042016-02-10 07:22:10 +01004163 }
4164
4165 return 0;
4166}
4167
4168static const struct udevice_id mvpp2_ids[] = {
4169 { .compatible = "marvell,armada-375-pp2" },
4170 { }
4171};
4172
4173U_BOOT_DRIVER(mvpp2_base) = {
4174 .name = "mvpp2_base",
4175 .id = UCLASS_MISC,
4176 .of_match = mvpp2_ids,
4177 .bind = mvpp2_base_bind,
4178 .probe = mvpp2_base_probe,
4179 .priv_auto_alloc_size = sizeof(struct mvpp2),
4180};