blob: 06909e6a3c06e821df1a6e911a74dfdbe52bbe64 [file] [log] [blame]
Stefan Roese96c19042016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <dm/device-internal.h>
19#include <dm/lists.h>
20#include <net.h>
21#include <netdev.h>
22#include <config.h>
23#include <malloc.h>
24#include <asm/io.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090025#include <linux/errno.h>
Stefan Roese96c19042016-02-10 07:22:10 +010026#include <phy.h>
27#include <miiphy.h>
28#include <watchdog.h>
29#include <asm/arch/cpu.h>
30#include <asm/arch/soc.h>
31#include <linux/compat.h>
32#include <linux/mbus.h>
33
34DECLARE_GLOBAL_DATA_PTR;
35
36/* Some linux -> U-Boot compatibility stuff */
37#define netdev_err(dev, fmt, args...) \
38 printf(fmt, ##args)
39#define netdev_warn(dev, fmt, args...) \
40 printf(fmt, ##args)
41#define netdev_info(dev, fmt, args...) \
42 printf(fmt, ##args)
43#define netdev_dbg(dev, fmt, args...) \
44 printf(fmt, ##args)
45
46#define ETH_ALEN 6 /* Octets in one ethernet addr */
47
48#define __verify_pcpu_ptr(ptr) \
49do { \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
51 (void)__vpp_verify; \
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p) \
55({ \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
58})
59
60#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id() 0
62#define num_present_cpus() 1
63#define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
68#define CONFIG_NR_CPUS 1
69#define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
70
71/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72#define WRAP (2 + ETH_HLEN + 4 + 32)
73#define MTU 1500
74#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
75
76#define MVPP2_SMI_TIMEOUT 10000
77
78/* RX Fifo Registers */
79#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82#define MVPP2_RX_FIFO_INIT_REG 0x64
83
84/* RX DMA Top Registers */
85#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89#define MVPP2_POOL_BUF_SIZE_OFFSET 5
90#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni2321c922017-02-16 06:53:51 +010094#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
95#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Stefan Roese96c19042016-02-10 07:22:10 +010096#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni2321c922017-02-16 06:53:51 +010097#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
98#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Stefan Roese96c19042016-02-10 07:22:10 +010099#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
100#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
101#define MVPP2_RXQ_DISABLE_MASK BIT(31)
102
103/* Parser Registers */
104#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
105#define MVPP2_PRS_PORT_LU_MAX 0xf
106#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
107#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
108#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
109#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
110#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
111#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
112#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
113#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
114#define MVPP2_PRS_TCAM_IDX_REG 0x1100
115#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
116#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
117#define MVPP2_PRS_SRAM_IDX_REG 0x1200
118#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
119#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
120#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
121
122/* Classifier Registers */
123#define MVPP2_CLS_MODE_REG 0x1800
124#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
125#define MVPP2_CLS_PORT_WAY_REG 0x1810
126#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
127#define MVPP2_CLS_LKP_INDEX_REG 0x1814
128#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
129#define MVPP2_CLS_LKP_TBL_REG 0x1818
130#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
131#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
132#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
133#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
134#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
135#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
136#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
137#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
138#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
139#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
140#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
141#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
142
143/* Descriptor Manager Top Registers */
144#define MVPP2_RXQ_NUM_REG 0x2040
145#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100146#define MVPP22_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100147#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
148#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
149#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
150#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
151#define MVPP2_RXQ_NUM_NEW_OFFSET 16
152#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
153#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
154#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
155#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
156#define MVPP2_RXQ_THRESH_REG 0x204c
157#define MVPP2_OCCUPIED_THRESH_OFFSET 0
158#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
159#define MVPP2_RXQ_INDEX_REG 0x2050
160#define MVPP2_TXQ_NUM_REG 0x2080
161#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
162#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
163#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
164#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
165#define MVPP2_TXQ_THRESH_REG 0x2094
166#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
167#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
168#define MVPP2_TXQ_INDEX_REG 0x2098
169#define MVPP2_TXQ_PREF_BUF_REG 0x209c
170#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
171#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
172#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
173#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
174#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
175#define MVPP2_TXQ_PENDING_REG 0x20a0
176#define MVPP2_TXQ_PENDING_MASK 0x3fff
177#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
178#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
179#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
180#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
181#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
182#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
183#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
184#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
185#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
186#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
187#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100188#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100189#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
190#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
191#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
192#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
193#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
194
195/* MBUS bridge registers */
196#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
197#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
198#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
199#define MVPP2_BASE_ADDR_ENABLE 0x4060
200
Thomas Petazzonica560ab2017-02-16 08:41:07 +0100201/* AXI Bridge Registers */
202#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
203#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
204#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
205#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
206#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
207#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
208#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
209#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
210#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
211#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
212#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
213#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
214
215/* Values for AXI Bridge registers */
216#define MVPP22_AXI_ATTR_CACHE_OFFS 0
217#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
218
219#define MVPP22_AXI_CODE_CACHE_OFFS 0
220#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
221
222#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
223#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
224#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
225
226#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
227#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
228
Stefan Roese96c19042016-02-10 07:22:10 +0100229/* Interrupt Cause and Mask registers */
230#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzonif1077472017-02-16 08:46:37 +0100231#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
232
233#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
234#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
235#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
236#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
237
238#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
239#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
240
241#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
242#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
243#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
244#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
245
Stefan Roese96c19042016-02-10 07:22:10 +0100246#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
247#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
248#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
249#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
250#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
251#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
252#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
253#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
254#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
255#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
256#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
257#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
258#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
259#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
260#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
261#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
262#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
263#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
264
265/* Buffer Manager registers */
266#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
267#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
268#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
269#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
270#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
271#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
272#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
273#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
274#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
275#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
276#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
277#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
278#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
279#define MVPP2_BM_START_MASK BIT(0)
280#define MVPP2_BM_STOP_MASK BIT(1)
281#define MVPP2_BM_STATE_MASK BIT(4)
282#define MVPP2_BM_LOW_THRESH_OFFS 8
283#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
284#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
285 MVPP2_BM_LOW_THRESH_OFFS)
286#define MVPP2_BM_HIGH_THRESH_OFFS 16
287#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
288#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
289 MVPP2_BM_HIGH_THRESH_OFFS)
290#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
291#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
292#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
293#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
294#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
295#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
296#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
297#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
298#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
299#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100300#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
301#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
302#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
303#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
Stefan Roese96c19042016-02-10 07:22:10 +0100304#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
305#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
306#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
307#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
308#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100309#define MVPP21_BM_MC_RLS_REG 0x64c4
Stefan Roese96c19042016-02-10 07:22:10 +0100310#define MVPP2_BM_MC_ID_MASK 0xfff
311#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100312#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
313#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
314#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
315#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
316#define MVPP22_BM_MC_RLS_REG 0x64d4
Stefan Roese96c19042016-02-10 07:22:10 +0100317
318/* TX Scheduler registers */
319#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
320#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
321#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
322#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
323#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
324#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
325#define MVPP2_TXP_SCHED_MTU_REG 0x801c
326#define MVPP2_TXP_MTU_MAX 0x7FFFF
327#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
328#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
329#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
330#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
331#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
332#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
333#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
334#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
335#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
336#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
337#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
338#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
339#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
340#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
341
342/* TX general registers */
343#define MVPP2_TX_SNOOP_REG 0x8800
344#define MVPP2_TX_PORT_FLUSH_REG 0x8810
345#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
346
347/* LMS registers */
348#define MVPP2_SRC_ADDR_MIDDLE 0x24
349#define MVPP2_SRC_ADDR_HIGH 0x28
350#define MVPP2_PHY_AN_CFG0_REG 0x34
351#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese96c19042016-02-10 07:22:10 +0100352#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoniebbe76f2017-02-15 12:16:23 +0100353#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese96c19042016-02-10 07:22:10 +0100354
355/* Per-port registers */
356#define MVPP2_GMAC_CTRL_0_REG 0x0
357#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
358#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
359#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
360#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
361#define MVPP2_GMAC_CTRL_1_REG 0x4
362#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
363#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
364#define MVPP2_GMAC_PCS_LB_EN_BIT 6
365#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
366#define MVPP2_GMAC_SA_LOW_OFFS 7
367#define MVPP2_GMAC_CTRL_2_REG 0x8
368#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
369#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
370#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
371#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
372#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
373#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
374#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
375#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
376#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
377#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
378#define MVPP2_GMAC_FC_ADV_EN BIT(9)
379#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
380#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
381#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
382#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
383#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
384#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
385 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
386
Thomas Petazzonicc2445f2017-02-20 11:42:51 +0100387#define MVPP22_SMI_MISC_CFG_REG 0x1204
388#define MVPP22_SMI_POLLING_EN BIT(10)
389
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100390#define MVPP22_PORT_BASE 0x30e00
391#define MVPP22_PORT_OFFSET 0x1000
392
Stefan Roese96c19042016-02-10 07:22:10 +0100393#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
394
395/* Descriptor ring Macros */
396#define MVPP2_QUEUE_NEXT_DESC(q, index) \
397 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
398
399/* SMI: 0xc0054 -> offset 0x54 to lms_base */
Stefan Roeseb71c2a32017-02-16 08:31:32 +0100400#define MVPP21_SMI 0x0054
401/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
402#define MVPP22_SMI 0x1200
Stefan Roese96c19042016-02-10 07:22:10 +0100403#define MVPP2_PHY_REG_MASK 0x1f
404/* SMI register fields */
405#define MVPP2_SMI_DATA_OFFS 0 /* Data */
406#define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
407#define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
408#define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
409#define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
410#define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
411#define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
412#define MVPP2_SMI_BUSY (1 << 28) /* Busy */
413
414#define MVPP2_PHY_ADDR_MASK 0x1f
415#define MVPP2_PHY_REG_MASK 0x1f
416
417/* Various constants */
418
419/* Coalescing */
420#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
421#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
422#define MVPP2_RX_COAL_PKTS 32
423#define MVPP2_RX_COAL_USEC 100
424
425/* The two bytes Marvell header. Either contains a special value used
426 * by Marvell switches when a specific hardware mode is enabled (not
427 * supported by this driver) or is filled automatically by zeroes on
428 * the RX side. Those two bytes being at the front of the Ethernet
429 * header, they allow to have the IP header aligned on a 4 bytes
430 * boundary automatically: the hardware skips those two bytes on its
431 * own.
432 */
433#define MVPP2_MH_SIZE 2
434#define MVPP2_ETH_TYPE_LEN 2
435#define MVPP2_PPPOE_HDR_SIZE 8
436#define MVPP2_VLAN_TAG_LEN 4
437
438/* Lbtd 802.3 type */
439#define MVPP2_IP_LBDT_TYPE 0xfffa
440
441#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
442#define MVPP2_TX_CSUM_MAX_SIZE 9800
443
444/* Timeout constants */
445#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
446#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
447
448#define MVPP2_TX_MTU_MAX 0x7ffff
449
450/* Maximum number of T-CONTs of PON port */
451#define MVPP2_MAX_TCONT 16
452
453/* Maximum number of supported ports */
454#define MVPP2_MAX_PORTS 4
455
456/* Maximum number of TXQs used by single port */
457#define MVPP2_MAX_TXQ 8
458
Stefan Roese96c19042016-02-10 07:22:10 +0100459/* Default number of TXQs in use */
460#define MVPP2_DEFAULT_TXQ 1
461
462/* Dfault number of RXQs in use */
463#define MVPP2_DEFAULT_RXQ 1
464#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
465
Stefan Roese96c19042016-02-10 07:22:10 +0100466/* Max number of Rx descriptors */
467#define MVPP2_MAX_RXD 16
468
469/* Max number of Tx descriptors */
470#define MVPP2_MAX_TXD 16
471
472/* Amount of Tx descriptors that can be reserved at once by CPU */
473#define MVPP2_CPU_DESC_CHUNK 64
474
475/* Max number of Tx descriptors in each aggregated queue */
476#define MVPP2_AGGR_TXQ_SIZE 256
477
478/* Descriptor aligned size */
479#define MVPP2_DESC_ALIGNED_SIZE 32
480
481/* Descriptor alignment mask */
482#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
483
484/* RX FIFO constants */
485#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
486#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
487#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
488
489/* RX buffer constants */
490#define MVPP2_SKB_SHINFO_SIZE \
491 0
492
493#define MVPP2_RX_PKT_SIZE(mtu) \
494 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
495 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
496
497#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
498#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
499#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
500 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
501
502#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
503
504/* IPv6 max L3 address size */
505#define MVPP2_MAX_L3_ADDR_SIZE 16
506
507/* Port flags */
508#define MVPP2_F_LOOPBACK BIT(0)
509
510/* Marvell tag types */
511enum mvpp2_tag_type {
512 MVPP2_TAG_TYPE_NONE = 0,
513 MVPP2_TAG_TYPE_MH = 1,
514 MVPP2_TAG_TYPE_DSA = 2,
515 MVPP2_TAG_TYPE_EDSA = 3,
516 MVPP2_TAG_TYPE_VLAN = 4,
517 MVPP2_TAG_TYPE_LAST = 5
518};
519
520/* Parser constants */
521#define MVPP2_PRS_TCAM_SRAM_SIZE 256
522#define MVPP2_PRS_TCAM_WORDS 6
523#define MVPP2_PRS_SRAM_WORDS 4
524#define MVPP2_PRS_FLOW_ID_SIZE 64
525#define MVPP2_PRS_FLOW_ID_MASK 0x3f
526#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
527#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
528#define MVPP2_PRS_IPV4_HEAD 0x40
529#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
530#define MVPP2_PRS_IPV4_MC 0xe0
531#define MVPP2_PRS_IPV4_MC_MASK 0xf0
532#define MVPP2_PRS_IPV4_BC_MASK 0xff
533#define MVPP2_PRS_IPV4_IHL 0x5
534#define MVPP2_PRS_IPV4_IHL_MASK 0xf
535#define MVPP2_PRS_IPV6_MC 0xff
536#define MVPP2_PRS_IPV6_MC_MASK 0xff
537#define MVPP2_PRS_IPV6_HOP_MASK 0xff
538#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
539#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
540#define MVPP2_PRS_DBL_VLANS_MAX 100
541
542/* Tcam structure:
543 * - lookup ID - 4 bits
544 * - port ID - 1 byte
545 * - additional information - 1 byte
546 * - header data - 8 bytes
547 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
548 */
549#define MVPP2_PRS_AI_BITS 8
550#define MVPP2_PRS_PORT_MASK 0xff
551#define MVPP2_PRS_LU_MASK 0xf
552#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
553 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
554#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
555 (((offs) * 2) - ((offs) % 2) + 2)
556#define MVPP2_PRS_TCAM_AI_BYTE 16
557#define MVPP2_PRS_TCAM_PORT_BYTE 17
558#define MVPP2_PRS_TCAM_LU_BYTE 20
559#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
560#define MVPP2_PRS_TCAM_INV_WORD 5
561/* Tcam entries ID */
562#define MVPP2_PE_DROP_ALL 0
563#define MVPP2_PE_FIRST_FREE_TID 1
564#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
565#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
566#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
567#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
568#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
569#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
570#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
571#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
572#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
573#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
574#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
575#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
576#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
577#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
578#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
579#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
580#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
581#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
582#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
583#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
584#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
585#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
586#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
587#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
588#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
589
590/* Sram structure
591 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
592 */
593#define MVPP2_PRS_SRAM_RI_OFFS 0
594#define MVPP2_PRS_SRAM_RI_WORD 0
595#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
596#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
597#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
598#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
599#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
600#define MVPP2_PRS_SRAM_UDF_OFFS 73
601#define MVPP2_PRS_SRAM_UDF_BITS 8
602#define MVPP2_PRS_SRAM_UDF_MASK 0xff
603#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
604#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
605#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
606#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
607#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
608#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
609#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
610#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
611#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
612#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
613#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
614#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
615#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
616#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
617#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
618#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
619#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
620#define MVPP2_PRS_SRAM_AI_OFFS 90
621#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
622#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
623#define MVPP2_PRS_SRAM_AI_MASK 0xff
624#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
625#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
626#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
627#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
628
629/* Sram result info bits assignment */
630#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
631#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100632#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
633#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100634#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
635#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
636#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
637#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
638#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100639#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
640#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100641#define MVPP2_PRS_RI_L2_MCAST BIT(9)
642#define MVPP2_PRS_RI_L2_BCAST BIT(10)
643#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100644#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
645#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100646#define MVPP2_PRS_RI_L3_IP4 BIT(12)
647#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
648#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
649#define MVPP2_PRS_RI_L3_IP6 BIT(14)
650#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
651#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100652#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
653#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100654#define MVPP2_PRS_RI_L3_MCAST BIT(15)
655#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
656#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
657#define MVPP2_PRS_RI_UDF3_MASK 0x300000
658#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
659#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
660#define MVPP2_PRS_RI_L4_TCP BIT(22)
661#define MVPP2_PRS_RI_L4_UDP BIT(23)
662#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
663#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
664#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
665#define MVPP2_PRS_RI_DROP_MASK 0x80000000
666
667/* Sram additional info bits assignment */
668#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
669#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
670#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
671#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
672#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
673#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
674#define MVPP2_PRS_SINGLE_VLAN_AI 0
675#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
676
677/* DSA/EDSA type */
678#define MVPP2_PRS_TAGGED true
679#define MVPP2_PRS_UNTAGGED false
680#define MVPP2_PRS_EDSA true
681#define MVPP2_PRS_DSA false
682
683/* MAC entries, shadow udf */
684enum mvpp2_prs_udf {
685 MVPP2_PRS_UDF_MAC_DEF,
686 MVPP2_PRS_UDF_MAC_RANGE,
687 MVPP2_PRS_UDF_L2_DEF,
688 MVPP2_PRS_UDF_L2_DEF_COPY,
689 MVPP2_PRS_UDF_L2_USER,
690};
691
692/* Lookup ID */
693enum mvpp2_prs_lookup {
694 MVPP2_PRS_LU_MH,
695 MVPP2_PRS_LU_MAC,
696 MVPP2_PRS_LU_DSA,
697 MVPP2_PRS_LU_VLAN,
698 MVPP2_PRS_LU_L2,
699 MVPP2_PRS_LU_PPPOE,
700 MVPP2_PRS_LU_IP4,
701 MVPP2_PRS_LU_IP6,
702 MVPP2_PRS_LU_FLOWS,
703 MVPP2_PRS_LU_LAST,
704};
705
706/* L3 cast enum */
707enum mvpp2_prs_l3_cast {
708 MVPP2_PRS_L3_UNI_CAST,
709 MVPP2_PRS_L3_MULTI_CAST,
710 MVPP2_PRS_L3_BROAD_CAST
711};
712
713/* Classifier constants */
714#define MVPP2_CLS_FLOWS_TBL_SIZE 512
715#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
716#define MVPP2_CLS_LKP_TBL_SIZE 64
717
718/* BM constants */
719#define MVPP2_BM_POOLS_NUM 1
720#define MVPP2_BM_LONG_BUF_NUM 16
721#define MVPP2_BM_SHORT_BUF_NUM 16
722#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
723#define MVPP2_BM_POOL_PTR_ALIGN 128
724#define MVPP2_BM_SWF_LONG_POOL(port) 0
725
726/* BM cookie (32 bits) definition */
727#define MVPP2_BM_COOKIE_POOL_OFFS 8
728#define MVPP2_BM_COOKIE_CPU_OFFS 24
729
730/* BM short pool packet size
731 * These value assure that for SWF the total number
732 * of bytes allocated for each buffer will be 512
733 */
734#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
735
736enum mvpp2_bm_type {
737 MVPP2_BM_FREE,
738 MVPP2_BM_SWF_LONG,
739 MVPP2_BM_SWF_SHORT
740};
741
742/* Definitions */
743
744/* Shared Packet Processor resources */
745struct mvpp2 {
746 /* Shared registers' base addresses */
747 void __iomem *base;
748 void __iomem *lms_base;
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100749 void __iomem *iface_base;
Stefan Roeseb71c2a32017-02-16 08:31:32 +0100750 void __iomem *mdio_base;
Stefan Roese96c19042016-02-10 07:22:10 +0100751
752 /* List of pointers to port structures */
753 struct mvpp2_port **port_list;
754
755 /* Aggregated TXQs */
756 struct mvpp2_tx_queue *aggr_txqs;
757
758 /* BM pools */
759 struct mvpp2_bm_pool *bm_pools;
760
761 /* PRS shadow table */
762 struct mvpp2_prs_shadow *prs_shadow;
763 /* PRS auxiliary table for double vlan entries control */
764 bool *prs_double_vlans;
765
766 /* Tclk value */
767 u32 tclk;
768
Thomas Petazzoni51ccb412017-02-15 14:08:59 +0100769 /* HW version */
770 enum { MVPP21, MVPP22 } hw_version;
771
Thomas Petazzoni38a23282017-02-16 09:03:16 +0100772 /* Maximum number of RXQs per port */
773 unsigned int max_port_rxqs;
774
Stefan Roese96c19042016-02-10 07:22:10 +0100775 struct mii_dev *bus;
Stefan Roesed017cdf2017-02-16 15:26:06 +0100776
777 int probe_done;
Stefan Roese96c19042016-02-10 07:22:10 +0100778};
779
780struct mvpp2_pcpu_stats {
781 u64 rx_packets;
782 u64 rx_bytes;
783 u64 tx_packets;
784 u64 tx_bytes;
785};
786
787struct mvpp2_port {
788 u8 id;
789
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100790 /* Index of the port from the "group of ports" complex point
791 * of view
792 */
793 int gop_id;
794
Stefan Roese96c19042016-02-10 07:22:10 +0100795 int irq;
796
797 struct mvpp2 *priv;
798
799 /* Per-port registers' base address */
800 void __iomem *base;
801
802 struct mvpp2_rx_queue **rxqs;
803 struct mvpp2_tx_queue **txqs;
804
805 int pkt_size;
806
807 u32 pending_cause_rx;
808
809 /* Per-CPU port control */
810 struct mvpp2_port_pcpu __percpu *pcpu;
811
812 /* Flags */
813 unsigned long flags;
814
815 u16 tx_ring_size;
816 u16 rx_ring_size;
817 struct mvpp2_pcpu_stats __percpu *stats;
818
819 struct phy_device *phy_dev;
820 phy_interface_t phy_interface;
821 int phy_node;
822 int phyaddr;
823 int init;
824 unsigned int link;
825 unsigned int duplex;
826 unsigned int speed;
827
828 struct mvpp2_bm_pool *pool_long;
829 struct mvpp2_bm_pool *pool_short;
830
831 /* Index of first port's physical RXQ */
832 u8 first_rxq;
833
834 u8 dev_addr[ETH_ALEN];
835};
836
837/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
838 * layout of the transmit and reception DMA descriptors, and their
839 * layout is therefore defined by the hardware design
840 */
841
842#define MVPP2_TXD_L3_OFF_SHIFT 0
843#define MVPP2_TXD_IP_HLEN_SHIFT 8
844#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
845#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
846#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
847#define MVPP2_TXD_PADDING_DISABLE BIT(23)
848#define MVPP2_TXD_L4_UDP BIT(24)
849#define MVPP2_TXD_L3_IP6 BIT(26)
850#define MVPP2_TXD_L_DESC BIT(28)
851#define MVPP2_TXD_F_DESC BIT(29)
852
853#define MVPP2_RXD_ERR_SUMMARY BIT(15)
854#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
855#define MVPP2_RXD_ERR_CRC 0x0
856#define MVPP2_RXD_ERR_OVERRUN BIT(13)
857#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
858#define MVPP2_RXD_BM_POOL_ID_OFFS 16
859#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
860#define MVPP2_RXD_HWF_SYNC BIT(21)
861#define MVPP2_RXD_L4_CSUM_OK BIT(22)
862#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
863#define MVPP2_RXD_L4_TCP BIT(25)
864#define MVPP2_RXD_L4_UDP BIT(26)
865#define MVPP2_RXD_L3_IP4 BIT(28)
866#define MVPP2_RXD_L3_IP6 BIT(30)
867#define MVPP2_RXD_BUF_HDR BIT(31)
868
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100869/* HW TX descriptor for PPv2.1 */
870struct mvpp21_tx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +0100871 u32 command; /* Options used by HW for packet transmitting.*/
872 u8 packet_offset; /* the offset from the buffer beginning */
873 u8 phys_txq; /* destination queue ID */
874 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100875 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100876 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
877 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
878 u32 reserved2; /* reserved (for future use) */
879};
880
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100881/* HW RX descriptor for PPv2.1 */
882struct mvpp21_rx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +0100883 u32 status; /* info about received packet */
884 u16 reserved1; /* parser_info (for future use, PnC) */
885 u16 data_size; /* size of received packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100886 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese96c19042016-02-10 07:22:10 +0100887 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
888 u16 reserved2; /* gem_port_id (for future use, PON) */
889 u16 reserved3; /* csum_l4 (for future use, PnC) */
890 u8 reserved4; /* bm_qset (for future use, BM) */
891 u8 reserved5;
892 u16 reserved6; /* classify_info (for future use, PnC) */
893 u32 reserved7; /* flow_id (for future use, PnC) */
894 u32 reserved8;
895};
896
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100897/* HW TX descriptor for PPv2.2 */
898struct mvpp22_tx_desc {
899 u32 command;
900 u8 packet_offset;
901 u8 phys_txq;
902 u16 data_size;
903 u64 reserved1;
904 u64 buf_dma_addr_ptp;
905 u64 buf_cookie_misc;
906};
907
908/* HW RX descriptor for PPv2.2 */
909struct mvpp22_rx_desc {
910 u32 status;
911 u16 reserved1;
912 u16 data_size;
913 u32 reserved2;
914 u32 reserved3;
915 u64 buf_dma_addr_key_hash;
916 u64 buf_cookie_misc;
917};
918
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100919/* Opaque type used by the driver to manipulate the HW TX and RX
920 * descriptors
921 */
922struct mvpp2_tx_desc {
923 union {
924 struct mvpp21_tx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100925 struct mvpp22_tx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100926 };
927};
928
929struct mvpp2_rx_desc {
930 union {
931 struct mvpp21_rx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +0100932 struct mvpp22_rx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +0100933 };
934};
935
Stefan Roese96c19042016-02-10 07:22:10 +0100936/* Per-CPU Tx queue control */
937struct mvpp2_txq_pcpu {
938 int cpu;
939
940 /* Number of Tx DMA descriptors in the descriptor ring */
941 int size;
942
943 /* Number of currently used Tx DMA descriptor in the
944 * descriptor ring
945 */
946 int count;
947
948 /* Number of Tx DMA descriptors reserved for each CPU */
949 int reserved_num;
950
951 /* Index of last TX DMA descriptor that was inserted */
952 int txq_put_index;
953
954 /* Index of the TX DMA descriptor to be cleaned up */
955 int txq_get_index;
956};
957
958struct mvpp2_tx_queue {
959 /* Physical number of this Tx queue */
960 u8 id;
961
962 /* Logical number of this Tx queue */
963 u8 log_id;
964
965 /* Number of Tx DMA descriptors in the descriptor ring */
966 int size;
967
968 /* Number of currently used Tx DMA descriptor in the descriptor ring */
969 int count;
970
971 /* Per-CPU control of physical Tx queues */
972 struct mvpp2_txq_pcpu __percpu *pcpu;
973
974 u32 done_pkts_coal;
975
976 /* Virtual address of thex Tx DMA descriptors array */
977 struct mvpp2_tx_desc *descs;
978
979 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +0100980 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +0100981
982 /* Index of the last Tx DMA descriptor */
983 int last_desc;
984
985 /* Index of the next Tx DMA descriptor to process */
986 int next_desc_to_proc;
987};
988
989struct mvpp2_rx_queue {
990 /* RX queue number, in the range 0-31 for physical RXQs */
991 u8 id;
992
993 /* Num of rx descriptors in the rx descriptor ring */
994 int size;
995
996 u32 pkts_coal;
997 u32 time_coal;
998
999 /* Virtual address of the RX DMA descriptors array */
1000 struct mvpp2_rx_desc *descs;
1001
1002 /* DMA address of the RX DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001003 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +01001004
1005 /* Index of the last RX DMA descriptor */
1006 int last_desc;
1007
1008 /* Index of the next RX DMA descriptor to process */
1009 int next_desc_to_proc;
1010
1011 /* ID of port to which physical RXQ is mapped */
1012 int port;
1013
1014 /* Port's logic RXQ number to which physical RXQ is mapped */
1015 int logic_rxq;
1016};
1017
1018union mvpp2_prs_tcam_entry {
1019 u32 word[MVPP2_PRS_TCAM_WORDS];
1020 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1021};
1022
1023union mvpp2_prs_sram_entry {
1024 u32 word[MVPP2_PRS_SRAM_WORDS];
1025 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1026};
1027
1028struct mvpp2_prs_entry {
1029 u32 index;
1030 union mvpp2_prs_tcam_entry tcam;
1031 union mvpp2_prs_sram_entry sram;
1032};
1033
1034struct mvpp2_prs_shadow {
1035 bool valid;
1036 bool finish;
1037
1038 /* Lookup ID */
1039 int lu;
1040
1041 /* User defined offset */
1042 int udf;
1043
1044 /* Result info */
1045 u32 ri;
1046 u32 ri_mask;
1047};
1048
1049struct mvpp2_cls_flow_entry {
1050 u32 index;
1051 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1052};
1053
1054struct mvpp2_cls_lookup_entry {
1055 u32 lkpid;
1056 u32 way;
1057 u32 data;
1058};
1059
1060struct mvpp2_bm_pool {
1061 /* Pool number in the range 0-7 */
1062 int id;
1063 enum mvpp2_bm_type type;
1064
1065 /* Buffer Pointers Pool External (BPPE) size */
1066 int size;
1067 /* Number of buffers for this pool */
1068 int buf_num;
1069 /* Pool buffer size */
1070 int buf_size;
1071 /* Packet size */
1072 int pkt_size;
1073
1074 /* BPPE virtual base address */
Stefan Roesefeb0b332017-02-15 12:46:18 +01001075 unsigned long *virt_addr;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001076 /* BPPE DMA base address */
1077 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01001078
1079 /* Ports using BM pool */
1080 u32 port_map;
1081
1082 /* Occupied buffers indicator */
1083 int in_use_thresh;
1084};
1085
Stefan Roese96c19042016-02-10 07:22:10 +01001086/* Static declaractions */
1087
1088/* Number of RXQs used by single port */
1089static int rxq_number = MVPP2_DEFAULT_RXQ;
1090/* Number of TXQs used by single port */
1091static int txq_number = MVPP2_DEFAULT_TXQ;
1092
1093#define MVPP2_DRIVER_NAME "mvpp2"
1094#define MVPP2_DRIVER_VERSION "1.0"
1095
1096/*
1097 * U-Boot internal data, mostly uncached buffers for descriptors and data
1098 */
1099struct buffer_location {
1100 struct mvpp2_tx_desc *aggr_tx_descs;
1101 struct mvpp2_tx_desc *tx_descs;
1102 struct mvpp2_rx_desc *rx_descs;
Stefan Roesefeb0b332017-02-15 12:46:18 +01001103 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1104 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese96c19042016-02-10 07:22:10 +01001105 int first_rxq;
1106};
1107
1108/*
1109 * All 4 interfaces use the same global buffer, since only one interface
1110 * can be enabled at once
1111 */
1112static struct buffer_location buffer_loc;
1113
1114/*
1115 * Page table entries are set to 1MB, or multiples of 1MB
1116 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1117 */
1118#define BD_SPACE (1 << 20)
1119
1120/* Utility/helper methods */
1121
1122static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1123{
1124 writel(data, priv->base + offset);
1125}
1126
1127static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1128{
1129 return readl(priv->base + offset);
1130}
1131
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001132static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1133 struct mvpp2_tx_desc *tx_desc,
1134 dma_addr_t dma_addr)
1135{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001136 if (port->priv->hw_version == MVPP21) {
1137 tx_desc->pp21.buf_dma_addr = dma_addr;
1138 } else {
1139 u64 val = (u64)dma_addr;
1140
1141 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1142 tx_desc->pp22.buf_dma_addr_ptp |= val;
1143 }
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001144}
1145
1146static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1147 struct mvpp2_tx_desc *tx_desc,
1148 size_t size)
1149{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001150 if (port->priv->hw_version == MVPP21)
1151 tx_desc->pp21.data_size = size;
1152 else
1153 tx_desc->pp22.data_size = size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001154}
1155
1156static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1157 struct mvpp2_tx_desc *tx_desc,
1158 unsigned int txq)
1159{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001160 if (port->priv->hw_version == MVPP21)
1161 tx_desc->pp21.phys_txq = txq;
1162 else
1163 tx_desc->pp22.phys_txq = txq;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001164}
1165
1166static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1167 struct mvpp2_tx_desc *tx_desc,
1168 unsigned int command)
1169{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001170 if (port->priv->hw_version == MVPP21)
1171 tx_desc->pp21.command = command;
1172 else
1173 tx_desc->pp22.command = command;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001174}
1175
1176static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1177 struct mvpp2_tx_desc *tx_desc,
1178 unsigned int offset)
1179{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001180 if (port->priv->hw_version == MVPP21)
1181 tx_desc->pp21.packet_offset = offset;
1182 else
1183 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001184}
1185
1186static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1187 struct mvpp2_rx_desc *rx_desc)
1188{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001189 if (port->priv->hw_version == MVPP21)
1190 return rx_desc->pp21.buf_dma_addr;
1191 else
1192 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001193}
1194
1195static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1196 struct mvpp2_rx_desc *rx_desc)
1197{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001198 if (port->priv->hw_version == MVPP21)
1199 return rx_desc->pp21.buf_cookie;
1200 else
1201 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001202}
1203
1204static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1205 struct mvpp2_rx_desc *rx_desc)
1206{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001207 if (port->priv->hw_version == MVPP21)
1208 return rx_desc->pp21.data_size;
1209 else
1210 return rx_desc->pp22.data_size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001211}
1212
1213static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1214 struct mvpp2_rx_desc *rx_desc)
1215{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001216 if (port->priv->hw_version == MVPP21)
1217 return rx_desc->pp21.status;
1218 else
1219 return rx_desc->pp22.status;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001220}
1221
Stefan Roese96c19042016-02-10 07:22:10 +01001222static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1223{
1224 txq_pcpu->txq_get_index++;
1225 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1226 txq_pcpu->txq_get_index = 0;
1227}
1228
1229/* Get number of physical egress port */
1230static inline int mvpp2_egress_port(struct mvpp2_port *port)
1231{
1232 return MVPP2_MAX_TCONT + port->id;
1233}
1234
1235/* Get number of physical TXQ */
1236static inline int mvpp2_txq_phys(int port, int txq)
1237{
1238 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1239}
1240
1241/* Parser configuration routines */
1242
1243/* Update parser tcam and sram hw entries */
1244static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1245{
1246 int i;
1247
1248 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1249 return -EINVAL;
1250
1251 /* Clear entry invalidation bit */
1252 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1253
1254 /* Write tcam index - indirect access */
1255 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1256 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1257 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1258
1259 /* Write sram index - indirect access */
1260 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1261 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1262 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1263
1264 return 0;
1265}
1266
1267/* Read tcam entry from hw */
1268static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1269{
1270 int i;
1271
1272 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1273 return -EINVAL;
1274
1275 /* Write tcam index - indirect access */
1276 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1277
1278 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1279 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1280 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1281 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1282
1283 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1284 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1285
1286 /* Write sram index - indirect access */
1287 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1288 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1289 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1290
1291 return 0;
1292}
1293
1294/* Invalidate tcam hw entry */
1295static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1296{
1297 /* Write index - indirect access */
1298 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1299 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1300 MVPP2_PRS_TCAM_INV_MASK);
1301}
1302
1303/* Enable shadow table entry and set its lookup ID */
1304static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1305{
1306 priv->prs_shadow[index].valid = true;
1307 priv->prs_shadow[index].lu = lu;
1308}
1309
1310/* Update ri fields in shadow table entry */
1311static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1312 unsigned int ri, unsigned int ri_mask)
1313{
1314 priv->prs_shadow[index].ri_mask = ri_mask;
1315 priv->prs_shadow[index].ri = ri;
1316}
1317
1318/* Update lookup field in tcam sw entry */
1319static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1320{
1321 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1322
1323 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1324 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1325}
1326
1327/* Update mask for single port in tcam sw entry */
1328static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1329 unsigned int port, bool add)
1330{
1331 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1332
1333 if (add)
1334 pe->tcam.byte[enable_off] &= ~(1 << port);
1335 else
1336 pe->tcam.byte[enable_off] |= 1 << port;
1337}
1338
1339/* Update port map in tcam sw entry */
1340static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1341 unsigned int ports)
1342{
1343 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1344 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1345
1346 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1347 pe->tcam.byte[enable_off] &= ~port_mask;
1348 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1349}
1350
1351/* Obtain port map from tcam sw entry */
1352static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1353{
1354 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1355
1356 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1357}
1358
1359/* Set byte of data and its enable bits in tcam sw entry */
1360static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1361 unsigned int offs, unsigned char byte,
1362 unsigned char enable)
1363{
1364 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1365 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1366}
1367
1368/* Get byte of data and its enable bits from tcam sw entry */
1369static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1370 unsigned int offs, unsigned char *byte,
1371 unsigned char *enable)
1372{
1373 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1374 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1375}
1376
1377/* Set ethertype in tcam sw entry */
1378static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1379 unsigned short ethertype)
1380{
1381 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1382 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1383}
1384
1385/* Set bits in sram sw entry */
1386static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1387 int val)
1388{
1389 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1390}
1391
1392/* Clear bits in sram sw entry */
1393static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1394 int val)
1395{
1396 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1397}
1398
1399/* Update ri bits in sram sw entry */
1400static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1401 unsigned int bits, unsigned int mask)
1402{
1403 unsigned int i;
1404
1405 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1406 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1407
1408 if (!(mask & BIT(i)))
1409 continue;
1410
1411 if (bits & BIT(i))
1412 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1413 else
1414 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1415
1416 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1417 }
1418}
1419
1420/* Update ai bits in sram sw entry */
1421static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1422 unsigned int bits, unsigned int mask)
1423{
1424 unsigned int i;
1425 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1426
1427 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1428
1429 if (!(mask & BIT(i)))
1430 continue;
1431
1432 if (bits & BIT(i))
1433 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1434 else
1435 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1436
1437 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1438 }
1439}
1440
1441/* Read ai bits from sram sw entry */
1442static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1443{
1444 u8 bits;
1445 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1446 int ai_en_off = ai_off + 1;
1447 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1448
1449 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1450 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1451
1452 return bits;
1453}
1454
1455/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1456 * lookup interation
1457 */
1458static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1459 unsigned int lu)
1460{
1461 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1462
1463 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1464 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1465 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1466}
1467
1468/* In the sram sw entry set sign and value of the next lookup offset
1469 * and the offset value generated to the classifier
1470 */
1471static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1472 unsigned int op)
1473{
1474 /* Set sign */
1475 if (shift < 0) {
1476 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1477 shift = 0 - shift;
1478 } else {
1479 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1480 }
1481
1482 /* Set value */
1483 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1484 (unsigned char)shift;
1485
1486 /* Reset and set operation */
1487 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1488 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1489 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1490
1491 /* Set base offset as current */
1492 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1493}
1494
1495/* In the sram sw entry set sign and value of the user defined offset
1496 * generated to the classifier
1497 */
1498static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1499 unsigned int type, int offset,
1500 unsigned int op)
1501{
1502 /* Set sign */
1503 if (offset < 0) {
1504 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1505 offset = 0 - offset;
1506 } else {
1507 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1508 }
1509
1510 /* Set value */
1511 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1512 MVPP2_PRS_SRAM_UDF_MASK);
1513 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1514 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1515 MVPP2_PRS_SRAM_UDF_BITS)] &=
1516 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1517 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1518 MVPP2_PRS_SRAM_UDF_BITS)] |=
1519 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1520
1521 /* Set offset type */
1522 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1523 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1524 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1525
1526 /* Set offset operation */
1527 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1528 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1529 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1530
1531 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1532 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1533 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1534 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1535
1536 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1537 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1538 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1539
1540 /* Set base offset as current */
1541 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1542}
1543
1544/* Find parser flow entry */
1545static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1546{
1547 struct mvpp2_prs_entry *pe;
1548 int tid;
1549
1550 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1551 if (!pe)
1552 return NULL;
1553 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1554
1555 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1556 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1557 u8 bits;
1558
1559 if (!priv->prs_shadow[tid].valid ||
1560 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1561 continue;
1562
1563 pe->index = tid;
1564 mvpp2_prs_hw_read(priv, pe);
1565 bits = mvpp2_prs_sram_ai_get(pe);
1566
1567 /* Sram store classification lookup ID in AI bits [5:0] */
1568 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1569 return pe;
1570 }
1571 kfree(pe);
1572
1573 return NULL;
1574}
1575
1576/* Return first free tcam index, seeking from start to end */
1577static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1578 unsigned char end)
1579{
1580 int tid;
1581
1582 if (start > end)
1583 swap(start, end);
1584
1585 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1586 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1587
1588 for (tid = start; tid <= end; tid++) {
1589 if (!priv->prs_shadow[tid].valid)
1590 return tid;
1591 }
1592
1593 return -EINVAL;
1594}
1595
1596/* Enable/disable dropping all mac da's */
1597static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1598{
1599 struct mvpp2_prs_entry pe;
1600
1601 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1602 /* Entry exist - update port only */
1603 pe.index = MVPP2_PE_DROP_ALL;
1604 mvpp2_prs_hw_read(priv, &pe);
1605 } else {
1606 /* Entry doesn't exist - create new */
1607 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1608 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1609 pe.index = MVPP2_PE_DROP_ALL;
1610
1611 /* Non-promiscuous mode for all ports - DROP unknown packets */
1612 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1613 MVPP2_PRS_RI_DROP_MASK);
1614
1615 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1616 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1617
1618 /* Update shadow table */
1619 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1620
1621 /* Mask all ports */
1622 mvpp2_prs_tcam_port_map_set(&pe, 0);
1623 }
1624
1625 /* Update port mask */
1626 mvpp2_prs_tcam_port_set(&pe, port, add);
1627
1628 mvpp2_prs_hw_write(priv, &pe);
1629}
1630
1631/* Set port to promiscuous mode */
1632static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1633{
1634 struct mvpp2_prs_entry pe;
1635
1636 /* Promiscuous mode - Accept unknown packets */
1637
1638 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1639 /* Entry exist - update port only */
1640 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1641 mvpp2_prs_hw_read(priv, &pe);
1642 } else {
1643 /* Entry doesn't exist - create new */
1644 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1645 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1646 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1647
1648 /* Continue - set next lookup */
1649 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1650
1651 /* Set result info bits */
1652 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1653 MVPP2_PRS_RI_L2_CAST_MASK);
1654
1655 /* Shift to ethertype */
1656 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1657 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1658
1659 /* Mask all ports */
1660 mvpp2_prs_tcam_port_map_set(&pe, 0);
1661
1662 /* Update shadow table */
1663 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1664 }
1665
1666 /* Update port mask */
1667 mvpp2_prs_tcam_port_set(&pe, port, add);
1668
1669 mvpp2_prs_hw_write(priv, &pe);
1670}
1671
1672/* Accept multicast */
1673static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1674 bool add)
1675{
1676 struct mvpp2_prs_entry pe;
1677 unsigned char da_mc;
1678
1679 /* Ethernet multicast address first byte is
1680 * 0x01 for IPv4 and 0x33 for IPv6
1681 */
1682 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1683
1684 if (priv->prs_shadow[index].valid) {
1685 /* Entry exist - update port only */
1686 pe.index = index;
1687 mvpp2_prs_hw_read(priv, &pe);
1688 } else {
1689 /* Entry doesn't exist - create new */
1690 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1691 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1692 pe.index = index;
1693
1694 /* Continue - set next lookup */
1695 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1696
1697 /* Set result info bits */
1698 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1699 MVPP2_PRS_RI_L2_CAST_MASK);
1700
1701 /* Update tcam entry data first byte */
1702 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1703
1704 /* Shift to ethertype */
1705 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1706 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1707
1708 /* Mask all ports */
1709 mvpp2_prs_tcam_port_map_set(&pe, 0);
1710
1711 /* Update shadow table */
1712 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1713 }
1714
1715 /* Update port mask */
1716 mvpp2_prs_tcam_port_set(&pe, port, add);
1717
1718 mvpp2_prs_hw_write(priv, &pe);
1719}
1720
1721/* Parser per-port initialization */
1722static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1723 int lu_max, int offset)
1724{
1725 u32 val;
1726
1727 /* Set lookup ID */
1728 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1729 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1730 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1731 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1732
1733 /* Set maximum number of loops for packet received from port */
1734 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1735 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1736 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1737 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1738
1739 /* Set initial offset for packet header extraction for the first
1740 * searching loop
1741 */
1742 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1743 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1744 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1745 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1746}
1747
1748/* Default flow entries initialization for all ports */
1749static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1750{
1751 struct mvpp2_prs_entry pe;
1752 int port;
1753
1754 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1755 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1756 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1757 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1758
1759 /* Mask all ports */
1760 mvpp2_prs_tcam_port_map_set(&pe, 0);
1761
1762 /* Set flow ID*/
1763 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1764 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1765
1766 /* Update shadow table and hw entry */
1767 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1768 mvpp2_prs_hw_write(priv, &pe);
1769 }
1770}
1771
1772/* Set default entry for Marvell Header field */
1773static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1774{
1775 struct mvpp2_prs_entry pe;
1776
1777 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1778
1779 pe.index = MVPP2_PE_MH_DEFAULT;
1780 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1781 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1782 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1783 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1784
1785 /* Unmask all ports */
1786 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1787
1788 /* Update shadow table and hw entry */
1789 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1790 mvpp2_prs_hw_write(priv, &pe);
1791}
1792
1793/* Set default entires (place holder) for promiscuous, non-promiscuous and
1794 * multicast MAC addresses
1795 */
1796static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1797{
1798 struct mvpp2_prs_entry pe;
1799
1800 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1801
1802 /* Non-promiscuous mode for all ports - DROP unknown packets */
1803 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1804 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1805
1806 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1807 MVPP2_PRS_RI_DROP_MASK);
1808 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1809 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1810
1811 /* Unmask all ports */
1812 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1813
1814 /* Update shadow table and hw entry */
1815 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1816 mvpp2_prs_hw_write(priv, &pe);
1817
1818 /* place holders only - no ports */
1819 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1820 mvpp2_prs_mac_promisc_set(priv, 0, false);
1821 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1822 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1823}
1824
1825/* Match basic ethertypes */
1826static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1827{
1828 struct mvpp2_prs_entry pe;
1829 int tid;
1830
1831 /* Ethertype: PPPoE */
1832 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1833 MVPP2_PE_LAST_FREE_TID);
1834 if (tid < 0)
1835 return tid;
1836
1837 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1838 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1839 pe.index = tid;
1840
1841 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1842
1843 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1844 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1845 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1847 MVPP2_PRS_RI_PPPOE_MASK);
1848
1849 /* Update shadow table and hw entry */
1850 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1851 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1852 priv->prs_shadow[pe.index].finish = false;
1853 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1854 MVPP2_PRS_RI_PPPOE_MASK);
1855 mvpp2_prs_hw_write(priv, &pe);
1856
1857 /* Ethertype: ARP */
1858 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1859 MVPP2_PE_LAST_FREE_TID);
1860 if (tid < 0)
1861 return tid;
1862
1863 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1865 pe.index = tid;
1866
1867 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1868
1869 /* Generate flow in the next iteration*/
1870 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1871 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1872 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1873 MVPP2_PRS_RI_L3_PROTO_MASK);
1874 /* Set L3 offset */
1875 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1876 MVPP2_ETH_TYPE_LEN,
1877 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1878
1879 /* Update shadow table and hw entry */
1880 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1881 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1882 priv->prs_shadow[pe.index].finish = true;
1883 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1884 MVPP2_PRS_RI_L3_PROTO_MASK);
1885 mvpp2_prs_hw_write(priv, &pe);
1886
1887 /* Ethertype: LBTD */
1888 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1889 MVPP2_PE_LAST_FREE_TID);
1890 if (tid < 0)
1891 return tid;
1892
1893 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1894 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1895 pe.index = tid;
1896
1897 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1898
1899 /* Generate flow in the next iteration*/
1900 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1901 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1902 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1903 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1904 MVPP2_PRS_RI_CPU_CODE_MASK |
1905 MVPP2_PRS_RI_UDF3_MASK);
1906 /* Set L3 offset */
1907 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1908 MVPP2_ETH_TYPE_LEN,
1909 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1910
1911 /* Update shadow table and hw entry */
1912 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1913 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1914 priv->prs_shadow[pe.index].finish = true;
1915 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1916 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1917 MVPP2_PRS_RI_CPU_CODE_MASK |
1918 MVPP2_PRS_RI_UDF3_MASK);
1919 mvpp2_prs_hw_write(priv, &pe);
1920
1921 /* Ethertype: IPv4 without options */
1922 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1923 MVPP2_PE_LAST_FREE_TID);
1924 if (tid < 0)
1925 return tid;
1926
1927 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1928 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1929 pe.index = tid;
1930
1931 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1932 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1933 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1934 MVPP2_PRS_IPV4_HEAD_MASK |
1935 MVPP2_PRS_IPV4_IHL_MASK);
1936
1937 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1938 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1939 MVPP2_PRS_RI_L3_PROTO_MASK);
1940 /* Skip eth_type + 4 bytes of IP header */
1941 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1942 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1943 /* Set L3 offset */
1944 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1945 MVPP2_ETH_TYPE_LEN,
1946 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1947
1948 /* Update shadow table and hw entry */
1949 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1950 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1951 priv->prs_shadow[pe.index].finish = false;
1952 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1953 MVPP2_PRS_RI_L3_PROTO_MASK);
1954 mvpp2_prs_hw_write(priv, &pe);
1955
1956 /* Ethertype: IPv4 with options */
1957 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1958 MVPP2_PE_LAST_FREE_TID);
1959 if (tid < 0)
1960 return tid;
1961
1962 pe.index = tid;
1963
1964 /* Clear tcam data before updating */
1965 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1966 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1967
1968 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1969 MVPP2_PRS_IPV4_HEAD,
1970 MVPP2_PRS_IPV4_HEAD_MASK);
1971
1972 /* Clear ri before updating */
1973 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1974 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1975 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1976 MVPP2_PRS_RI_L3_PROTO_MASK);
1977
1978 /* Update shadow table and hw entry */
1979 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1980 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1981 priv->prs_shadow[pe.index].finish = false;
1982 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1983 MVPP2_PRS_RI_L3_PROTO_MASK);
1984 mvpp2_prs_hw_write(priv, &pe);
1985
1986 /* Ethertype: IPv6 without options */
1987 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1988 MVPP2_PE_LAST_FREE_TID);
1989 if (tid < 0)
1990 return tid;
1991
1992 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1993 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1994 pe.index = tid;
1995
1996 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1997
1998 /* Skip DIP of IPV6 header */
1999 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2000 MVPP2_MAX_L3_ADDR_SIZE,
2001 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2002 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2003 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2004 MVPP2_PRS_RI_L3_PROTO_MASK);
2005 /* Set L3 offset */
2006 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2007 MVPP2_ETH_TYPE_LEN,
2008 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2009
2010 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2011 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2012 priv->prs_shadow[pe.index].finish = false;
2013 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2014 MVPP2_PRS_RI_L3_PROTO_MASK);
2015 mvpp2_prs_hw_write(priv, &pe);
2016
2017 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2018 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2019 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2020 pe.index = MVPP2_PE_ETH_TYPE_UN;
2021
2022 /* Unmask all ports */
2023 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2024
2025 /* Generate flow in the next iteration*/
2026 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2027 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2028 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2029 MVPP2_PRS_RI_L3_PROTO_MASK);
2030 /* Set L3 offset even it's unknown L3 */
2031 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2032 MVPP2_ETH_TYPE_LEN,
2033 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2034
2035 /* Update shadow table and hw entry */
2036 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2037 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2038 priv->prs_shadow[pe.index].finish = true;
2039 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2040 MVPP2_PRS_RI_L3_PROTO_MASK);
2041 mvpp2_prs_hw_write(priv, &pe);
2042
2043 return 0;
2044}
2045
2046/* Parser default initialization */
2047static int mvpp2_prs_default_init(struct udevice *dev,
2048 struct mvpp2 *priv)
2049{
2050 int err, index, i;
2051
2052 /* Enable tcam table */
2053 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2054
2055 /* Clear all tcam and sram entries */
2056 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2057 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2058 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2059 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2060
2061 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2062 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2063 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2064 }
2065
2066 /* Invalidate all tcam entries */
2067 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2068 mvpp2_prs_hw_inv(priv, index);
2069
2070 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2071 sizeof(struct mvpp2_prs_shadow),
2072 GFP_KERNEL);
2073 if (!priv->prs_shadow)
2074 return -ENOMEM;
2075
2076 /* Always start from lookup = 0 */
2077 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2078 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2079 MVPP2_PRS_PORT_LU_MAX, 0);
2080
2081 mvpp2_prs_def_flow_init(priv);
2082
2083 mvpp2_prs_mh_init(priv);
2084
2085 mvpp2_prs_mac_init(priv);
2086
2087 err = mvpp2_prs_etype_init(priv);
2088 if (err)
2089 return err;
2090
2091 return 0;
2092}
2093
2094/* Compare MAC DA with tcam entry data */
2095static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2096 const u8 *da, unsigned char *mask)
2097{
2098 unsigned char tcam_byte, tcam_mask;
2099 int index;
2100
2101 for (index = 0; index < ETH_ALEN; index++) {
2102 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2103 if (tcam_mask != mask[index])
2104 return false;
2105
2106 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2107 return false;
2108 }
2109
2110 return true;
2111}
2112
2113/* Find tcam entry with matched pair <MAC DA, port> */
2114static struct mvpp2_prs_entry *
2115mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2116 unsigned char *mask, int udf_type)
2117{
2118 struct mvpp2_prs_entry *pe;
2119 int tid;
2120
2121 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2122 if (!pe)
2123 return NULL;
2124 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2125
2126 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2127 for (tid = MVPP2_PE_FIRST_FREE_TID;
2128 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2129 unsigned int entry_pmap;
2130
2131 if (!priv->prs_shadow[tid].valid ||
2132 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2133 (priv->prs_shadow[tid].udf != udf_type))
2134 continue;
2135
2136 pe->index = tid;
2137 mvpp2_prs_hw_read(priv, pe);
2138 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2139
2140 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2141 entry_pmap == pmap)
2142 return pe;
2143 }
2144 kfree(pe);
2145
2146 return NULL;
2147}
2148
2149/* Update parser's mac da entry */
2150static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2151 const u8 *da, bool add)
2152{
2153 struct mvpp2_prs_entry *pe;
2154 unsigned int pmap, len, ri;
2155 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2156 int tid;
2157
2158 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2159 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2160 MVPP2_PRS_UDF_MAC_DEF);
2161
2162 /* No such entry */
2163 if (!pe) {
2164 if (!add)
2165 return 0;
2166
2167 /* Create new TCAM entry */
2168 /* Find first range mac entry*/
2169 for (tid = MVPP2_PE_FIRST_FREE_TID;
2170 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2171 if (priv->prs_shadow[tid].valid &&
2172 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2173 (priv->prs_shadow[tid].udf ==
2174 MVPP2_PRS_UDF_MAC_RANGE))
2175 break;
2176
2177 /* Go through the all entries from first to last */
2178 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2179 tid - 1);
2180 if (tid < 0)
2181 return tid;
2182
2183 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2184 if (!pe)
2185 return -1;
2186 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2187 pe->index = tid;
2188
2189 /* Mask all ports */
2190 mvpp2_prs_tcam_port_map_set(pe, 0);
2191 }
2192
2193 /* Update port mask */
2194 mvpp2_prs_tcam_port_set(pe, port, add);
2195
2196 /* Invalidate the entry if no ports are left enabled */
2197 pmap = mvpp2_prs_tcam_port_map_get(pe);
2198 if (pmap == 0) {
2199 if (add) {
2200 kfree(pe);
2201 return -1;
2202 }
2203 mvpp2_prs_hw_inv(priv, pe->index);
2204 priv->prs_shadow[pe->index].valid = false;
2205 kfree(pe);
2206 return 0;
2207 }
2208
2209 /* Continue - set next lookup */
2210 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2211
2212 /* Set match on DA */
2213 len = ETH_ALEN;
2214 while (len--)
2215 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2216
2217 /* Set result info bits */
2218 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2219
2220 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2221 MVPP2_PRS_RI_MAC_ME_MASK);
2222 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2223 MVPP2_PRS_RI_MAC_ME_MASK);
2224
2225 /* Shift to ethertype */
2226 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2227 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2228
2229 /* Update shadow table and hw entry */
2230 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2231 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2232 mvpp2_prs_hw_write(priv, pe);
2233
2234 kfree(pe);
2235
2236 return 0;
2237}
2238
2239static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2240{
2241 int err;
2242
2243 /* Remove old parser entry */
2244 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2245 false);
2246 if (err)
2247 return err;
2248
2249 /* Add new parser entry */
2250 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2251 if (err)
2252 return err;
2253
2254 /* Set addr in the device */
2255 memcpy(port->dev_addr, da, ETH_ALEN);
2256
2257 return 0;
2258}
2259
2260/* Set prs flow for the port */
2261static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2262{
2263 struct mvpp2_prs_entry *pe;
2264 int tid;
2265
2266 pe = mvpp2_prs_flow_find(port->priv, port->id);
2267
2268 /* Such entry not exist */
2269 if (!pe) {
2270 /* Go through the all entires from last to first */
2271 tid = mvpp2_prs_tcam_first_free(port->priv,
2272 MVPP2_PE_LAST_FREE_TID,
2273 MVPP2_PE_FIRST_FREE_TID);
2274 if (tid < 0)
2275 return tid;
2276
2277 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2278 if (!pe)
2279 return -ENOMEM;
2280
2281 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2282 pe->index = tid;
2283
2284 /* Set flow ID*/
2285 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2286 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2287
2288 /* Update shadow table */
2289 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2290 }
2291
2292 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2293 mvpp2_prs_hw_write(port->priv, pe);
2294 kfree(pe);
2295
2296 return 0;
2297}
2298
2299/* Classifier configuration routines */
2300
2301/* Update classification flow table registers */
2302static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2303 struct mvpp2_cls_flow_entry *fe)
2304{
2305 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2306 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2307 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2308 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2309}
2310
2311/* Update classification lookup table register */
2312static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2313 struct mvpp2_cls_lookup_entry *le)
2314{
2315 u32 val;
2316
2317 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2318 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2319 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2320}
2321
2322/* Classifier default initialization */
2323static void mvpp2_cls_init(struct mvpp2 *priv)
2324{
2325 struct mvpp2_cls_lookup_entry le;
2326 struct mvpp2_cls_flow_entry fe;
2327 int index;
2328
2329 /* Enable classifier */
2330 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2331
2332 /* Clear classifier flow table */
2333 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2334 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2335 fe.index = index;
2336 mvpp2_cls_flow_write(priv, &fe);
2337 }
2338
2339 /* Clear classifier lookup table */
2340 le.data = 0;
2341 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2342 le.lkpid = index;
2343 le.way = 0;
2344 mvpp2_cls_lookup_write(priv, &le);
2345
2346 le.way = 1;
2347 mvpp2_cls_lookup_write(priv, &le);
2348 }
2349}
2350
2351static void mvpp2_cls_port_config(struct mvpp2_port *port)
2352{
2353 struct mvpp2_cls_lookup_entry le;
2354 u32 val;
2355
2356 /* Set way for the port */
2357 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2358 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2359 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2360
2361 /* Pick the entry to be accessed in lookup ID decoding table
2362 * according to the way and lkpid.
2363 */
2364 le.lkpid = port->id;
2365 le.way = 0;
2366 le.data = 0;
2367
2368 /* Set initial CPU queue for receiving packets */
2369 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2370 le.data |= port->first_rxq;
2371
2372 /* Disable classification engines */
2373 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2374
2375 /* Update lookup ID table entry */
2376 mvpp2_cls_lookup_write(port->priv, &le);
2377}
2378
2379/* Set CPU queue number for oversize packets */
2380static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2381{
2382 u32 val;
2383
2384 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2385 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2386
2387 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2388 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2389
2390 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2391 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2392 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2393}
2394
2395/* Buffer Manager configuration routines */
2396
2397/* Create pool */
2398static int mvpp2_bm_pool_create(struct udevice *dev,
2399 struct mvpp2 *priv,
2400 struct mvpp2_bm_pool *bm_pool, int size)
2401{
2402 u32 val;
2403
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002404 /* Number of buffer pointers must be a multiple of 16, as per
2405 * hardware constraints
2406 */
2407 if (!IS_ALIGNED(size, 16))
2408 return -EINVAL;
2409
Stefan Roese96c19042016-02-10 07:22:10 +01002410 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002411 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese96c19042016-02-10 07:22:10 +01002412 if (!bm_pool->virt_addr)
2413 return -ENOMEM;
2414
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002415 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2416 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese96c19042016-02-10 07:22:10 +01002417 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2418 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2419 return -ENOMEM;
2420 }
2421
2422 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002423 lower_32_bits(bm_pool->dma_addr));
Stefan Roese96c19042016-02-10 07:22:10 +01002424 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2425
2426 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2427 val |= MVPP2_BM_START_MASK;
2428 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2429
2430 bm_pool->type = MVPP2_BM_FREE;
2431 bm_pool->size = size;
2432 bm_pool->pkt_size = 0;
2433 bm_pool->buf_num = 0;
2434
2435 return 0;
2436}
2437
2438/* Set pool buffer size */
2439static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2440 struct mvpp2_bm_pool *bm_pool,
2441 int buf_size)
2442{
2443 u32 val;
2444
2445 bm_pool->buf_size = buf_size;
2446
2447 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2448 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2449}
2450
2451/* Free all buffers from the pool */
2452static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2453 struct mvpp2_bm_pool *bm_pool)
2454{
2455 bm_pool->buf_num = 0;
2456}
2457
2458/* Cleanup pool */
2459static int mvpp2_bm_pool_destroy(struct udevice *dev,
2460 struct mvpp2 *priv,
2461 struct mvpp2_bm_pool *bm_pool)
2462{
2463 u32 val;
2464
2465 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2466 if (bm_pool->buf_num) {
2467 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2468 return 0;
2469 }
2470
2471 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2472 val |= MVPP2_BM_STOP_MASK;
2473 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2474
2475 return 0;
2476}
2477
2478static int mvpp2_bm_pools_init(struct udevice *dev,
2479 struct mvpp2 *priv)
2480{
2481 int i, err, size;
2482 struct mvpp2_bm_pool *bm_pool;
2483
2484 /* Create all pools with maximum size */
2485 size = MVPP2_BM_POOL_SIZE_MAX;
2486 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2487 bm_pool = &priv->bm_pools[i];
2488 bm_pool->id = i;
2489 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2490 if (err)
2491 goto err_unroll_pools;
2492 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2493 }
2494 return 0;
2495
2496err_unroll_pools:
2497 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2498 for (i = i - 1; i >= 0; i--)
2499 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2500 return err;
2501}
2502
2503static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2504{
2505 int i, err;
2506
2507 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2508 /* Mask BM all interrupts */
2509 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2510 /* Clear BM cause register */
2511 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2512 }
2513
2514 /* Allocate and initialize BM pools */
2515 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2516 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2517 if (!priv->bm_pools)
2518 return -ENOMEM;
2519
2520 err = mvpp2_bm_pools_init(dev, priv);
2521 if (err < 0)
2522 return err;
2523 return 0;
2524}
2525
2526/* Attach long pool to rxq */
2527static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2528 int lrxq, int long_pool)
2529{
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002530 u32 val, mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002531 int prxq;
2532
2533 /* Get queue physical ID */
2534 prxq = port->rxqs[lrxq]->id;
2535
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002536 if (port->priv->hw_version == MVPP21)
2537 mask = MVPP21_RXQ_POOL_LONG_MASK;
2538 else
2539 mask = MVPP22_RXQ_POOL_LONG_MASK;
Stefan Roese96c19042016-02-10 07:22:10 +01002540
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002541 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2542 val &= ~mask;
2543 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002544 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2545}
2546
2547/* Set pool number in a BM cookie */
2548static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2549{
2550 u32 bm;
2551
2552 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2553 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2554
2555 return bm;
2556}
2557
2558/* Get pool number from a BM cookie */
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002559static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese96c19042016-02-10 07:22:10 +01002560{
2561 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2562}
2563
2564/* Release buffer to BM */
2565static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002566 dma_addr_t buf_dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002567 unsigned long buf_phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002568{
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002569 if (port->priv->hw_version == MVPP22) {
2570 u32 val = 0;
2571
2572 if (sizeof(dma_addr_t) == 8)
2573 val |= upper_32_bits(buf_dma_addr) &
2574 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2575
2576 if (sizeof(phys_addr_t) == 8)
2577 val |= (upper_32_bits(buf_phys_addr)
2578 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2579 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2580
2581 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2582 }
2583
Thomas Petazzoni09831762017-02-20 10:37:59 +01002584 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2585 * returned in the "cookie" field of the RX
2586 * descriptor. Instead of storing the virtual address, we
2587 * store the physical address
2588 */
2589 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002590 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002591}
2592
2593/* Refill BM pool */
2594static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002595 dma_addr_t dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002596 phys_addr_t phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002597{
2598 int pool = mvpp2_bm_cookie_pool_get(bm);
2599
Thomas Petazzoni09831762017-02-20 10:37:59 +01002600 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002601}
2602
2603/* Allocate buffers for the pool */
2604static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2605 struct mvpp2_bm_pool *bm_pool, int buf_num)
2606{
2607 int i;
Stefan Roese96c19042016-02-10 07:22:10 +01002608
2609 if (buf_num < 0 ||
2610 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2611 netdev_err(port->dev,
2612 "cannot allocate %d buffers for pool %d\n",
2613 buf_num, bm_pool->id);
2614 return 0;
2615 }
2616
Stefan Roese96c19042016-02-10 07:22:10 +01002617 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002618 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002619 (dma_addr_t)buffer_loc.rx_buffer[i],
2620 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002621
Stefan Roese96c19042016-02-10 07:22:10 +01002622 }
2623
2624 /* Update BM driver with number of buffers added to pool */
2625 bm_pool->buf_num += i;
2626 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2627
2628 return i;
2629}
2630
2631/* Notify the driver that BM pool is being used as specific type and return the
2632 * pool pointer on success
2633 */
2634static struct mvpp2_bm_pool *
2635mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2636 int pkt_size)
2637{
2638 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2639 int num;
2640
2641 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2642 netdev_err(port->dev, "mixing pool types is forbidden\n");
2643 return NULL;
2644 }
2645
2646 if (new_pool->type == MVPP2_BM_FREE)
2647 new_pool->type = type;
2648
2649 /* Allocate buffers in case BM pool is used as long pool, but packet
2650 * size doesn't match MTU or BM pool hasn't being used yet
2651 */
2652 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2653 (new_pool->pkt_size == 0)) {
2654 int pkts_num;
2655
2656 /* Set default buffer number or free all the buffers in case
2657 * the pool is not empty
2658 */
2659 pkts_num = new_pool->buf_num;
2660 if (pkts_num == 0)
2661 pkts_num = type == MVPP2_BM_SWF_LONG ?
2662 MVPP2_BM_LONG_BUF_NUM :
2663 MVPP2_BM_SHORT_BUF_NUM;
2664 else
2665 mvpp2_bm_bufs_free(NULL,
2666 port->priv, new_pool);
2667
2668 new_pool->pkt_size = pkt_size;
2669
2670 /* Allocate buffers for this pool */
2671 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2672 if (num != pkts_num) {
2673 dev_err(dev, "pool %d: %d of %d allocated\n",
2674 new_pool->id, num, pkts_num);
2675 return NULL;
2676 }
2677 }
2678
2679 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2680 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2681
2682 return new_pool;
2683}
2684
2685/* Initialize pools for swf */
2686static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2687{
2688 int rxq;
2689
2690 if (!port->pool_long) {
2691 port->pool_long =
2692 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2693 MVPP2_BM_SWF_LONG,
2694 port->pkt_size);
2695 if (!port->pool_long)
2696 return -ENOMEM;
2697
2698 port->pool_long->port_map |= (1 << port->id);
2699
2700 for (rxq = 0; rxq < rxq_number; rxq++)
2701 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2702 }
2703
2704 return 0;
2705}
2706
2707/* Port configuration routines */
2708
2709static void mvpp2_port_mii_set(struct mvpp2_port *port)
2710{
2711 u32 val;
2712
2713 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2714
2715 switch (port->phy_interface) {
2716 case PHY_INTERFACE_MODE_SGMII:
2717 val |= MVPP2_GMAC_INBAND_AN_MASK;
2718 break;
2719 case PHY_INTERFACE_MODE_RGMII:
2720 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2721 default:
2722 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2723 }
2724
2725 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2726}
2727
2728static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2729{
2730 u32 val;
2731
2732 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2733 val |= MVPP2_GMAC_FC_ADV_EN;
2734 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2735}
2736
2737static void mvpp2_port_enable(struct mvpp2_port *port)
2738{
2739 u32 val;
2740
2741 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2742 val |= MVPP2_GMAC_PORT_EN_MASK;
2743 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2744 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2745}
2746
2747static void mvpp2_port_disable(struct mvpp2_port *port)
2748{
2749 u32 val;
2750
2751 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2752 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2753 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2754}
2755
2756/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2757static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2758{
2759 u32 val;
2760
2761 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2762 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2763 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2764}
2765
2766/* Configure loopback port */
2767static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2768{
2769 u32 val;
2770
2771 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2772
2773 if (port->speed == 1000)
2774 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2775 else
2776 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2777
2778 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2779 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2780 else
2781 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2782
2783 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2784}
2785
2786static void mvpp2_port_reset(struct mvpp2_port *port)
2787{
2788 u32 val;
2789
2790 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2791 ~MVPP2_GMAC_PORT_RESET_MASK;
2792 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2793
2794 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2795 MVPP2_GMAC_PORT_RESET_MASK)
2796 continue;
2797}
2798
2799/* Change maximum receive size of the port */
2800static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2801{
2802 u32 val;
2803
2804 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2805 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2806 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2807 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2808 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2809}
2810
2811/* Set defaults to the MVPP2 port */
2812static void mvpp2_defaults_set(struct mvpp2_port *port)
2813{
2814 int tx_port_num, val, queue, ptxq, lrxq;
2815
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01002816 if (port->priv->hw_version == MVPP21) {
2817 /* Configure port to loopback if needed */
2818 if (port->flags & MVPP2_F_LOOPBACK)
2819 mvpp2_port_loopback_set(port);
Stefan Roese96c19042016-02-10 07:22:10 +01002820
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01002821 /* Update TX FIFO MIN Threshold */
2822 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2823 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2824 /* Min. TX threshold must be less than minimal packet length */
2825 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2826 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2827 }
Stefan Roese96c19042016-02-10 07:22:10 +01002828
2829 /* Disable Legacy WRR, Disable EJP, Release from reset */
2830 tx_port_num = mvpp2_egress_port(port);
2831 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2832 tx_port_num);
2833 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2834
2835 /* Close bandwidth for all queues */
2836 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2837 ptxq = mvpp2_txq_phys(port->id, queue);
2838 mvpp2_write(port->priv,
2839 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2840 }
2841
2842 /* Set refill period to 1 usec, refill tokens
2843 * and bucket size to maximum
2844 */
2845 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2846 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2847 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2848 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2849 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2850 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2851 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2852 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2853
2854 /* Set MaximumLowLatencyPacketSize value to 256 */
2855 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2856 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2857 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2858
2859 /* Enable Rx cache snoop */
2860 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2861 queue = port->rxqs[lrxq]->id;
2862 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2863 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2864 MVPP2_SNOOP_BUF_HDR_MASK;
2865 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2866 }
2867}
2868
2869/* Enable/disable receiving packets */
2870static void mvpp2_ingress_enable(struct mvpp2_port *port)
2871{
2872 u32 val;
2873 int lrxq, queue;
2874
2875 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2876 queue = port->rxqs[lrxq]->id;
2877 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2878 val &= ~MVPP2_RXQ_DISABLE_MASK;
2879 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2880 }
2881}
2882
2883static void mvpp2_ingress_disable(struct mvpp2_port *port)
2884{
2885 u32 val;
2886 int lrxq, queue;
2887
2888 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2889 queue = port->rxqs[lrxq]->id;
2890 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2891 val |= MVPP2_RXQ_DISABLE_MASK;
2892 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2893 }
2894}
2895
2896/* Enable transmit via physical egress queue
2897 * - HW starts take descriptors from DRAM
2898 */
2899static void mvpp2_egress_enable(struct mvpp2_port *port)
2900{
2901 u32 qmap;
2902 int queue;
2903 int tx_port_num = mvpp2_egress_port(port);
2904
2905 /* Enable all initialized TXs. */
2906 qmap = 0;
2907 for (queue = 0; queue < txq_number; queue++) {
2908 struct mvpp2_tx_queue *txq = port->txqs[queue];
2909
2910 if (txq->descs != NULL)
2911 qmap |= (1 << queue);
2912 }
2913
2914 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2915 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2916}
2917
2918/* Disable transmit via physical egress queue
2919 * - HW doesn't take descriptors from DRAM
2920 */
2921static void mvpp2_egress_disable(struct mvpp2_port *port)
2922{
2923 u32 reg_data;
2924 int delay;
2925 int tx_port_num = mvpp2_egress_port(port);
2926
2927 /* Issue stop command for active channels only */
2928 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2929 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2930 MVPP2_TXP_SCHED_ENQ_MASK;
2931 if (reg_data != 0)
2932 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2933 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2934
2935 /* Wait for all Tx activity to terminate. */
2936 delay = 0;
2937 do {
2938 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2939 netdev_warn(port->dev,
2940 "Tx stop timed out, status=0x%08x\n",
2941 reg_data);
2942 break;
2943 }
2944 mdelay(1);
2945 delay++;
2946
2947 /* Check port TX Command register that all
2948 * Tx queues are stopped
2949 */
2950 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2951 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2952}
2953
2954/* Rx descriptors helper methods */
2955
2956/* Get number of Rx descriptors occupied by received packets */
2957static inline int
2958mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2959{
2960 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2961
2962 return val & MVPP2_RXQ_OCCUPIED_MASK;
2963}
2964
2965/* Update Rx queue status with the number of occupied and available
2966 * Rx descriptor slots.
2967 */
2968static inline void
2969mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2970 int used_count, int free_count)
2971{
2972 /* Decrement the number of used descriptors and increment count
2973 * increment the number of free descriptors.
2974 */
2975 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2976
2977 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2978}
2979
2980/* Get pointer to next RX descriptor to be processed by SW */
2981static inline struct mvpp2_rx_desc *
2982mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2983{
2984 int rx_desc = rxq->next_desc_to_proc;
2985
2986 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2987 prefetch(rxq->descs + rxq->next_desc_to_proc);
2988 return rxq->descs + rx_desc;
2989}
2990
2991/* Set rx queue offset */
2992static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2993 int prxq, int offset)
2994{
2995 u32 val;
2996
2997 /* Convert offset from bytes to units of 32 bytes */
2998 offset = offset >> 5;
2999
3000 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3001 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3002
3003 /* Offset is in */
3004 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3005 MVPP2_RXQ_PACKET_OFFSET_MASK);
3006
3007 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3008}
3009
3010/* Obtain BM cookie information from descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003011static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3012 struct mvpp2_rx_desc *rx_desc)
Stefan Roese96c19042016-02-10 07:22:10 +01003013{
Stefan Roese96c19042016-02-10 07:22:10 +01003014 int cpu = smp_processor_id();
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003015 int pool;
3016
3017 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3018 MVPP2_RXD_BM_POOL_ID_MASK) >>
3019 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese96c19042016-02-10 07:22:10 +01003020
3021 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3022 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3023}
3024
3025/* Tx descriptors helper methods */
3026
3027/* Get number of Tx descriptors waiting to be transmitted by HW */
3028static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3029 struct mvpp2_tx_queue *txq)
3030{
3031 u32 val;
3032
3033 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3034 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3035
3036 return val & MVPP2_TXQ_PENDING_MASK;
3037}
3038
3039/* Get pointer to next Tx descriptor to be processed (send) by HW */
3040static struct mvpp2_tx_desc *
3041mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3042{
3043 int tx_desc = txq->next_desc_to_proc;
3044
3045 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3046 return txq->descs + tx_desc;
3047}
3048
3049/* Update HW with number of aggregated Tx descriptors to be sent */
3050static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3051{
3052 /* aggregated access - relevant TXQ number is written in TX desc */
3053 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3054}
3055
3056/* Get number of sent descriptors and decrement counter.
3057 * The number of sent descriptors is returned.
3058 * Per-CPU access
3059 */
3060static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3061 struct mvpp2_tx_queue *txq)
3062{
3063 u32 val;
3064
3065 /* Reading status reg resets transmitted descriptor counter */
3066 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3067
3068 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3069 MVPP2_TRANSMITTED_COUNT_OFFSET;
3070}
3071
3072static void mvpp2_txq_sent_counter_clear(void *arg)
3073{
3074 struct mvpp2_port *port = arg;
3075 int queue;
3076
3077 for (queue = 0; queue < txq_number; queue++) {
3078 int id = port->txqs[queue]->id;
3079
3080 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3081 }
3082}
3083
3084/* Set max sizes for Tx queues */
3085static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3086{
3087 u32 val, size, mtu;
3088 int txq, tx_port_num;
3089
3090 mtu = port->pkt_size * 8;
3091 if (mtu > MVPP2_TXP_MTU_MAX)
3092 mtu = MVPP2_TXP_MTU_MAX;
3093
3094 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3095 mtu = 3 * mtu;
3096
3097 /* Indirect access to registers */
3098 tx_port_num = mvpp2_egress_port(port);
3099 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3100
3101 /* Set MTU */
3102 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3103 val &= ~MVPP2_TXP_MTU_MAX;
3104 val |= mtu;
3105 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3106
3107 /* TXP token size and all TXQs token size must be larger that MTU */
3108 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3109 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3110 if (size < mtu) {
3111 size = mtu;
3112 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3113 val |= size;
3114 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3115 }
3116
3117 for (txq = 0; txq < txq_number; txq++) {
3118 val = mvpp2_read(port->priv,
3119 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3120 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3121
3122 if (size < mtu) {
3123 size = mtu;
3124 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3125 val |= size;
3126 mvpp2_write(port->priv,
3127 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3128 val);
3129 }
3130 }
3131}
3132
3133/* Free Tx queue skbuffs */
3134static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
3135 struct mvpp2_tx_queue *txq,
3136 struct mvpp2_txq_pcpu *txq_pcpu, int num)
3137{
3138 int i;
3139
3140 for (i = 0; i < num; i++)
3141 mvpp2_txq_inc_get(txq_pcpu);
3142}
3143
3144static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
3145 u32 cause)
3146{
3147 int queue = fls(cause) - 1;
3148
3149 return port->rxqs[queue];
3150}
3151
3152static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
3153 u32 cause)
3154{
3155 int queue = fls(cause) - 1;
3156
3157 return port->txqs[queue];
3158}
3159
3160/* Rx/Tx queue initialization/cleanup methods */
3161
3162/* Allocate and initialize descriptors for aggr TXQ */
3163static int mvpp2_aggr_txq_init(struct udevice *dev,
3164 struct mvpp2_tx_queue *aggr_txq,
3165 int desc_num, int cpu,
3166 struct mvpp2 *priv)
3167{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003168 u32 txq_dma;
3169
Stefan Roese96c19042016-02-10 07:22:10 +01003170 /* Allocate memory for TX descriptors */
3171 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003172 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003173 if (!aggr_txq->descs)
3174 return -ENOMEM;
3175
3176 /* Make sure descriptor address is cache line size aligned */
3177 BUG_ON(aggr_txq->descs !=
3178 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3179
3180 aggr_txq->last_desc = aggr_txq->size - 1;
3181
3182 /* Aggr TXQ no reset WA */
3183 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
3184 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
3185
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003186 /* Set Tx descriptors queue starting address indirect
3187 * access
3188 */
3189 if (priv->hw_version == MVPP21)
3190 txq_dma = aggr_txq->descs_dma;
3191 else
3192 txq_dma = aggr_txq->descs_dma >>
3193 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
3194
3195 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003196 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
3197
3198 return 0;
3199}
3200
3201/* Create a specified Rx queue */
3202static int mvpp2_rxq_init(struct mvpp2_port *port,
3203 struct mvpp2_rx_queue *rxq)
3204
3205{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003206 u32 rxq_dma;
3207
Stefan Roese96c19042016-02-10 07:22:10 +01003208 rxq->size = port->rx_ring_size;
3209
3210 /* Allocate memory for RX descriptors */
3211 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003212 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003213 if (!rxq->descs)
3214 return -ENOMEM;
3215
3216 BUG_ON(rxq->descs !=
3217 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3218
3219 rxq->last_desc = rxq->size - 1;
3220
3221 /* Zero occupied and non-occupied counters - direct access */
3222 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3223
3224 /* Set Rx descriptors queue starting address - indirect access */
3225 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01003226 if (port->priv->hw_version == MVPP21)
3227 rxq_dma = rxq->descs_dma;
3228 else
3229 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
3230 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003231 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
3232 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
3233
3234 /* Set Offset */
3235 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3236
3237 /* Add number of descriptors ready for receiving packets */
3238 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3239
3240 return 0;
3241}
3242
3243/* Push packets received by the RXQ to BM pool */
3244static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3245 struct mvpp2_rx_queue *rxq)
3246{
3247 int rx_received, i;
3248
3249 rx_received = mvpp2_rxq_received(port, rxq->id);
3250 if (!rx_received)
3251 return;
3252
3253 for (i = 0; i < rx_received; i++) {
3254 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003255 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01003256
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003257 mvpp2_pool_refill(port, bm,
3258 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3259 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese96c19042016-02-10 07:22:10 +01003260 }
3261 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3262}
3263
3264/* Cleanup Rx queue */
3265static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3266 struct mvpp2_rx_queue *rxq)
3267{
3268 mvpp2_rxq_drop_pkts(port, rxq);
3269
3270 rxq->descs = NULL;
3271 rxq->last_desc = 0;
3272 rxq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003273 rxq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003274
3275 /* Clear Rx descriptors queue starting address and size;
3276 * free descriptor number
3277 */
3278 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3279 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3280 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3281 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3282}
3283
3284/* Create and initialize a Tx queue */
3285static int mvpp2_txq_init(struct mvpp2_port *port,
3286 struct mvpp2_tx_queue *txq)
3287{
3288 u32 val;
3289 int cpu, desc, desc_per_txq, tx_port_num;
3290 struct mvpp2_txq_pcpu *txq_pcpu;
3291
3292 txq->size = port->tx_ring_size;
3293
3294 /* Allocate memory for Tx descriptors */
3295 txq->descs = buffer_loc.tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003296 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01003297 if (!txq->descs)
3298 return -ENOMEM;
3299
3300 /* Make sure descriptor address is cache line size aligned */
3301 BUG_ON(txq->descs !=
3302 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3303
3304 txq->last_desc = txq->size - 1;
3305
3306 /* Set Tx descriptors queue starting address - indirect access */
3307 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003308 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01003309 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3310 MVPP2_TXQ_DESC_SIZE_MASK);
3311 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3312 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3313 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3314 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3315 val &= ~MVPP2_TXQ_PENDING_MASK;
3316 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3317
3318 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3319 * for each existing TXQ.
3320 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3321 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3322 */
3323 desc_per_txq = 16;
3324 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3325 (txq->log_id * desc_per_txq);
3326
3327 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3328 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
Thomas Petazzoni5555f072017-02-16 08:03:37 +01003329 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Stefan Roese96c19042016-02-10 07:22:10 +01003330
3331 /* WRR / EJP configuration - indirect access */
3332 tx_port_num = mvpp2_egress_port(port);
3333 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3334
3335 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3336 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3337 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3338 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3339 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3340
3341 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3342 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3343 val);
3344
3345 for_each_present_cpu(cpu) {
3346 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3347 txq_pcpu->size = txq->size;
3348 }
3349
3350 return 0;
3351}
3352
3353/* Free allocated TXQ resources */
3354static void mvpp2_txq_deinit(struct mvpp2_port *port,
3355 struct mvpp2_tx_queue *txq)
3356{
3357 txq->descs = NULL;
3358 txq->last_desc = 0;
3359 txq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003360 txq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01003361
3362 /* Set minimum bandwidth for disabled TXQs */
3363 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3364
3365 /* Set Tx descriptors queue starting address and size */
3366 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3367 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3368 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3369}
3370
3371/* Cleanup Tx ports */
3372static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3373{
3374 struct mvpp2_txq_pcpu *txq_pcpu;
3375 int delay, pending, cpu;
3376 u32 val;
3377
3378 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3379 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3380 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3381 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3382
3383 /* The napi queue has been stopped so wait for all packets
3384 * to be transmitted.
3385 */
3386 delay = 0;
3387 do {
3388 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3389 netdev_warn(port->dev,
3390 "port %d: cleaning queue %d timed out\n",
3391 port->id, txq->log_id);
3392 break;
3393 }
3394 mdelay(1);
3395 delay++;
3396
3397 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3398 } while (pending);
3399
3400 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3401 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3402
3403 for_each_present_cpu(cpu) {
3404 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3405
3406 /* Release all packets */
3407 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3408
3409 /* Reset queue */
3410 txq_pcpu->count = 0;
3411 txq_pcpu->txq_put_index = 0;
3412 txq_pcpu->txq_get_index = 0;
3413 }
3414}
3415
3416/* Cleanup all Tx queues */
3417static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3418{
3419 struct mvpp2_tx_queue *txq;
3420 int queue;
3421 u32 val;
3422
3423 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3424
3425 /* Reset Tx ports and delete Tx queues */
3426 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3427 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3428
3429 for (queue = 0; queue < txq_number; queue++) {
3430 txq = port->txqs[queue];
3431 mvpp2_txq_clean(port, txq);
3432 mvpp2_txq_deinit(port, txq);
3433 }
3434
3435 mvpp2_txq_sent_counter_clear(port);
3436
3437 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3438 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3439}
3440
3441/* Cleanup all Rx queues */
3442static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3443{
3444 int queue;
3445
3446 for (queue = 0; queue < rxq_number; queue++)
3447 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3448}
3449
3450/* Init all Rx queues for port */
3451static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3452{
3453 int queue, err;
3454
3455 for (queue = 0; queue < rxq_number; queue++) {
3456 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3457 if (err)
3458 goto err_cleanup;
3459 }
3460 return 0;
3461
3462err_cleanup:
3463 mvpp2_cleanup_rxqs(port);
3464 return err;
3465}
3466
3467/* Init all tx queues for port */
3468static int mvpp2_setup_txqs(struct mvpp2_port *port)
3469{
3470 struct mvpp2_tx_queue *txq;
3471 int queue, err;
3472
3473 for (queue = 0; queue < txq_number; queue++) {
3474 txq = port->txqs[queue];
3475 err = mvpp2_txq_init(port, txq);
3476 if (err)
3477 goto err_cleanup;
3478 }
3479
3480 mvpp2_txq_sent_counter_clear(port);
3481 return 0;
3482
3483err_cleanup:
3484 mvpp2_cleanup_txqs(port);
3485 return err;
3486}
3487
3488/* Adjust link */
3489static void mvpp2_link_event(struct mvpp2_port *port)
3490{
3491 struct phy_device *phydev = port->phy_dev;
3492 int status_change = 0;
3493 u32 val;
3494
3495 if (phydev->link) {
3496 if ((port->speed != phydev->speed) ||
3497 (port->duplex != phydev->duplex)) {
3498 u32 val;
3499
3500 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3501 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3502 MVPP2_GMAC_CONFIG_GMII_SPEED |
3503 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3504 MVPP2_GMAC_AN_SPEED_EN |
3505 MVPP2_GMAC_AN_DUPLEX_EN);
3506
3507 if (phydev->duplex)
3508 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3509
3510 if (phydev->speed == SPEED_1000)
3511 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3512 else if (phydev->speed == SPEED_100)
3513 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3514
3515 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3516
3517 port->duplex = phydev->duplex;
3518 port->speed = phydev->speed;
3519 }
3520 }
3521
3522 if (phydev->link != port->link) {
3523 if (!phydev->link) {
3524 port->duplex = -1;
3525 port->speed = 0;
3526 }
3527
3528 port->link = phydev->link;
3529 status_change = 1;
3530 }
3531
3532 if (status_change) {
3533 if (phydev->link) {
3534 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3535 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3536 MVPP2_GMAC_FORCE_LINK_DOWN);
3537 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3538 mvpp2_egress_enable(port);
3539 mvpp2_ingress_enable(port);
3540 } else {
3541 mvpp2_ingress_disable(port);
3542 mvpp2_egress_disable(port);
3543 }
3544 }
3545}
3546
3547/* Main RX/TX processing routines */
3548
3549/* Display more error info */
3550static void mvpp2_rx_error(struct mvpp2_port *port,
3551 struct mvpp2_rx_desc *rx_desc)
3552{
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003553 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3554 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01003555
3556 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3557 case MVPP2_RXD_ERR_CRC:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003558 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
3559 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003560 break;
3561 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003562 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
3563 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003564 break;
3565 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003566 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
3567 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01003568 break;
3569 }
3570}
3571
3572/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3573static int mvpp2_rx_refill(struct mvpp2_port *port,
3574 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003575 u32 bm, dma_addr_t dma_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01003576{
Thomas Petazzonic49aff22017-02-20 10:27:51 +01003577 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01003578 return 0;
3579}
3580
3581/* Set hw internals when starting port */
3582static void mvpp2_start_dev(struct mvpp2_port *port)
3583{
3584 mvpp2_gmac_max_rx_size_set(port);
3585 mvpp2_txp_max_tx_size_set(port);
3586
3587 mvpp2_port_enable(port);
3588}
3589
3590/* Set hw internals when stopping port */
3591static void mvpp2_stop_dev(struct mvpp2_port *port)
3592{
3593 /* Stop new packets from arriving to RXQs */
3594 mvpp2_ingress_disable(port);
3595
3596 mvpp2_egress_disable(port);
3597 mvpp2_port_disable(port);
3598}
3599
3600static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3601{
3602 struct phy_device *phy_dev;
3603
3604 if (!port->init || port->link == 0) {
3605 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3606 port->phy_interface);
3607 port->phy_dev = phy_dev;
3608 if (!phy_dev) {
3609 netdev_err(port->dev, "cannot connect to phy\n");
3610 return -ENODEV;
3611 }
3612 phy_dev->supported &= PHY_GBIT_FEATURES;
3613 phy_dev->advertising = phy_dev->supported;
3614
3615 port->phy_dev = phy_dev;
3616 port->link = 0;
3617 port->duplex = 0;
3618 port->speed = 0;
3619
3620 phy_config(phy_dev);
3621 phy_startup(phy_dev);
3622 if (!phy_dev->link) {
3623 printf("%s: No link\n", phy_dev->dev->name);
3624 return -1;
3625 }
3626
3627 port->init = 1;
3628 } else {
3629 mvpp2_egress_enable(port);
3630 mvpp2_ingress_enable(port);
3631 }
3632
3633 return 0;
3634}
3635
3636static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3637{
3638 unsigned char mac_bcast[ETH_ALEN] = {
3639 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3640 int err;
3641
3642 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3643 if (err) {
3644 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3645 return err;
3646 }
3647 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3648 port->dev_addr, true);
3649 if (err) {
3650 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3651 return err;
3652 }
3653 err = mvpp2_prs_def_flow(port);
3654 if (err) {
3655 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3656 return err;
3657 }
3658
3659 /* Allocate the Rx/Tx queues */
3660 err = mvpp2_setup_rxqs(port);
3661 if (err) {
3662 netdev_err(port->dev, "cannot allocate Rx queues\n");
3663 return err;
3664 }
3665
3666 err = mvpp2_setup_txqs(port);
3667 if (err) {
3668 netdev_err(port->dev, "cannot allocate Tx queues\n");
3669 return err;
3670 }
3671
3672 err = mvpp2_phy_connect(dev, port);
3673 if (err < 0)
3674 return err;
3675
3676 mvpp2_link_event(port);
3677
3678 mvpp2_start_dev(port);
3679
3680 return 0;
3681}
3682
3683/* No Device ops here in U-Boot */
3684
3685/* Driver initialization */
3686
3687static void mvpp2_port_power_up(struct mvpp2_port *port)
3688{
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01003689 struct mvpp2 *priv = port->priv;
3690
Stefan Roese96c19042016-02-10 07:22:10 +01003691 mvpp2_port_mii_set(port);
3692 mvpp2_port_periodic_xon_disable(port);
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01003693 if (priv->hw_version == MVPP21)
3694 mvpp2_port_fc_adv_enable(port);
Stefan Roese96c19042016-02-10 07:22:10 +01003695 mvpp2_port_reset(port);
3696}
3697
3698/* Initialize port HW */
3699static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3700{
3701 struct mvpp2 *priv = port->priv;
3702 struct mvpp2_txq_pcpu *txq_pcpu;
3703 int queue, cpu, err;
3704
Thomas Petazzoni38a23282017-02-16 09:03:16 +01003705 if (port->first_rxq + rxq_number >
3706 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Stefan Roese96c19042016-02-10 07:22:10 +01003707 return -EINVAL;
3708
3709 /* Disable port */
3710 mvpp2_egress_disable(port);
3711 mvpp2_port_disable(port);
3712
3713 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3714 GFP_KERNEL);
3715 if (!port->txqs)
3716 return -ENOMEM;
3717
3718 /* Associate physical Tx queues to this port and initialize.
3719 * The mapping is predefined.
3720 */
3721 for (queue = 0; queue < txq_number; queue++) {
3722 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3723 struct mvpp2_tx_queue *txq;
3724
3725 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3726 if (!txq)
3727 return -ENOMEM;
3728
3729 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3730 GFP_KERNEL);
3731 if (!txq->pcpu)
3732 return -ENOMEM;
3733
3734 txq->id = queue_phy_id;
3735 txq->log_id = queue;
3736 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3737 for_each_present_cpu(cpu) {
3738 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3739 txq_pcpu->cpu = cpu;
3740 }
3741
3742 port->txqs[queue] = txq;
3743 }
3744
3745 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3746 GFP_KERNEL);
3747 if (!port->rxqs)
3748 return -ENOMEM;
3749
3750 /* Allocate and initialize Rx queue for this port */
3751 for (queue = 0; queue < rxq_number; queue++) {
3752 struct mvpp2_rx_queue *rxq;
3753
3754 /* Map physical Rx queue to port's logical Rx queue */
3755 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3756 if (!rxq)
3757 return -ENOMEM;
3758 /* Map this Rx queue to a physical queue */
3759 rxq->id = port->first_rxq + queue;
3760 rxq->port = port->id;
3761 rxq->logic_rxq = queue;
3762
3763 port->rxqs[queue] = rxq;
3764 }
3765
3766 /* Configure Rx queue group interrupt for this port */
Thomas Petazzonif1077472017-02-16 08:46:37 +01003767 if (priv->hw_version == MVPP21) {
3768 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
3769 CONFIG_MV_ETH_RXQ);
3770 } else {
3771 u32 val;
3772
3773 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
3774 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
3775
3776 val = (CONFIG_MV_ETH_RXQ <<
3777 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
3778 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
3779 }
Stefan Roese96c19042016-02-10 07:22:10 +01003780
3781 /* Create Rx descriptor rings */
3782 for (queue = 0; queue < rxq_number; queue++) {
3783 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3784
3785 rxq->size = port->rx_ring_size;
3786 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3787 rxq->time_coal = MVPP2_RX_COAL_USEC;
3788 }
3789
3790 mvpp2_ingress_disable(port);
3791
3792 /* Port default configuration */
3793 mvpp2_defaults_set(port);
3794
3795 /* Port's classifier configuration */
3796 mvpp2_cls_oversize_rxq_set(port);
3797 mvpp2_cls_port_config(port);
3798
3799 /* Provide an initial Rx packet size */
3800 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3801
3802 /* Initialize pools for swf */
3803 err = mvpp2_swf_bm_pool_init(port);
3804 if (err)
3805 return err;
3806
3807 return 0;
3808}
3809
3810/* Ports initialization */
3811static int mvpp2_port_probe(struct udevice *dev,
3812 struct mvpp2_port *port,
3813 int port_node,
Thomas Petazzoni38a23282017-02-16 09:03:16 +01003814 struct mvpp2 *priv)
Stefan Roese96c19042016-02-10 07:22:10 +01003815{
3816 int phy_node;
3817 u32 id;
3818 u32 phyaddr;
3819 const char *phy_mode_str;
3820 int phy_mode = -1;
3821 int priv_common_regs_num = 2;
3822 int err;
3823
3824 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3825 if (phy_node < 0) {
3826 dev_err(&pdev->dev, "missing phy\n");
3827 return -ENODEV;
3828 }
3829
3830 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3831 if (phy_mode_str)
3832 phy_mode = phy_get_interface_by_name(phy_mode_str);
3833 if (phy_mode == -1) {
3834 dev_err(&pdev->dev, "incorrect phy mode\n");
3835 return -EINVAL;
3836 }
3837
3838 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3839 if (id == -1) {
3840 dev_err(&pdev->dev, "missing port-id value\n");
3841 return -EINVAL;
3842 }
3843
3844 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3845
3846 port->priv = priv;
3847 port->id = id;
Thomas Petazzoni38a23282017-02-16 09:03:16 +01003848 if (priv->hw_version == MVPP21)
3849 port->first_rxq = port->id * rxq_number;
3850 else
3851 port->first_rxq = port->id * priv->max_port_rxqs;
Stefan Roese96c19042016-02-10 07:22:10 +01003852 port->phy_node = phy_node;
3853 port->phy_interface = phy_mode;
3854 port->phyaddr = phyaddr;
3855
Thomas Petazzoni5555f072017-02-16 08:03:37 +01003856 if (priv->hw_version == MVPP21) {
3857 port->base = (void __iomem *)dev_get_addr_index(
3858 dev->parent, priv_common_regs_num + id);
3859 if (IS_ERR(port->base))
3860 return PTR_ERR(port->base);
3861 } else {
3862 u32 gop_id;
3863
3864 gop_id = fdtdec_get_int(gd->fdt_blob, port_node,
3865 "gop-port-id", -1);
3866 if (id == -1) {
3867 dev_err(&pdev->dev, "missing gop-port-id value\n");
3868 return -EINVAL;
3869 }
3870
3871 port->base = priv->iface_base + MVPP22_PORT_BASE +
3872 gop_id * MVPP22_PORT_OFFSET;
3873 }
Stefan Roese96c19042016-02-10 07:22:10 +01003874
3875 port->tx_ring_size = MVPP2_MAX_TXD;
3876 port->rx_ring_size = MVPP2_MAX_RXD;
3877
3878 err = mvpp2_port_init(dev, port);
3879 if (err < 0) {
3880 dev_err(&pdev->dev, "failed to init port %d\n", id);
3881 return err;
3882 }
3883 mvpp2_port_power_up(port);
3884
Stefan Roese96c19042016-02-10 07:22:10 +01003885 priv->port_list[id] = port;
3886 return 0;
3887}
3888
3889/* Initialize decoding windows */
3890static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3891 struct mvpp2 *priv)
3892{
3893 u32 win_enable;
3894 int i;
3895
3896 for (i = 0; i < 6; i++) {
3897 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3898 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3899
3900 if (i < 4)
3901 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3902 }
3903
3904 win_enable = 0;
3905
3906 for (i = 0; i < dram->num_cs; i++) {
3907 const struct mbus_dram_window *cs = dram->cs + i;
3908
3909 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3910 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3911 dram->mbus_dram_target_id);
3912
3913 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3914 (cs->size - 1) & 0xffff0000);
3915
3916 win_enable |= (1 << i);
3917 }
3918
3919 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3920}
3921
3922/* Initialize Rx FIFO's */
3923static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3924{
3925 int port;
3926
3927 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3928 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3929 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3930 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3931 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3932 }
3933
3934 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3935 MVPP2_RX_FIFO_PORT_MIN_PKT);
3936 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3937}
3938
Thomas Petazzonica560ab2017-02-16 08:41:07 +01003939static void mvpp2_axi_init(struct mvpp2 *priv)
3940{
3941 u32 val, rdval, wrval;
3942
3943 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
3944
3945 /* AXI Bridge Configuration */
3946
3947 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
3948 << MVPP22_AXI_ATTR_CACHE_OFFS;
3949 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3950 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
3951
3952 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
3953 << MVPP22_AXI_ATTR_CACHE_OFFS;
3954 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3955 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
3956
3957 /* BM */
3958 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
3959 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
3960
3961 /* Descriptors */
3962 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
3963 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
3964 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
3965 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
3966
3967 /* Buffer Data */
3968 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
3969 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
3970
3971 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
3972 << MVPP22_AXI_CODE_CACHE_OFFS;
3973 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
3974 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3975 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
3976 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
3977
3978 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
3979 << MVPP22_AXI_CODE_CACHE_OFFS;
3980 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3981 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3982
3983 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
3984
3985 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
3986 << MVPP22_AXI_CODE_CACHE_OFFS;
3987 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
3988 << MVPP22_AXI_CODE_DOMAIN_OFFS;
3989
3990 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
3991}
3992
Stefan Roese96c19042016-02-10 07:22:10 +01003993/* Initialize network controller common part HW */
3994static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3995{
3996 const struct mbus_dram_target_info *dram_target_info;
3997 int err, i;
3998 u32 val;
3999
4000 /* Checks for hardware constraints (U-Boot uses only one rxq) */
Thomas Petazzoni38a23282017-02-16 09:03:16 +01004001 if ((rxq_number > priv->max_port_rxqs) ||
4002 (txq_number > MVPP2_MAX_TXQ)) {
Stefan Roese96c19042016-02-10 07:22:10 +01004003 dev_err(&pdev->dev, "invalid queue size parameter\n");
4004 return -EINVAL;
4005 }
4006
4007 /* MBUS windows configuration */
4008 dram_target_info = mvebu_mbus_dram_info();
4009 if (dram_target_info)
4010 mvpp2_conf_mbus_windows(dram_target_info, priv);
4011
Thomas Petazzonica560ab2017-02-16 08:41:07 +01004012 if (priv->hw_version == MVPP22)
4013 mvpp2_axi_init(priv);
4014
Stefan Roese96c19042016-02-10 07:22:10 +01004015 /* Disable HW PHY polling */
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01004016 if (priv->hw_version == MVPP21) {
4017 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4018 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
4019 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4020 } else {
4021 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4022 val &= ~MVPP22_SMI_POLLING_EN;
4023 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4024 }
Stefan Roese96c19042016-02-10 07:22:10 +01004025
4026 /* Allocate and initialize aggregated TXQs */
4027 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
4028 sizeof(struct mvpp2_tx_queue),
4029 GFP_KERNEL);
4030 if (!priv->aggr_txqs)
4031 return -ENOMEM;
4032
4033 for_each_present_cpu(i) {
4034 priv->aggr_txqs[i].id = i;
4035 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
4036 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
4037 MVPP2_AGGR_TXQ_SIZE, i, priv);
4038 if (err < 0)
4039 return err;
4040 }
4041
4042 /* Rx Fifo Init */
4043 mvpp2_rx_fifo_init(priv);
4044
4045 /* Reset Rx queue group interrupt configuration */
Thomas Petazzonif1077472017-02-16 08:46:37 +01004046 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
4047 if (priv->hw_version == MVPP21) {
4048 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
4049 CONFIG_MV_ETH_RXQ);
4050 continue;
4051 } else {
4052 u32 val;
4053
4054 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
4055 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
4056
4057 val = (CONFIG_MV_ETH_RXQ <<
4058 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
4059 mvpp2_write(priv,
4060 MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
4061 }
4062 }
Stefan Roese96c19042016-02-10 07:22:10 +01004063
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01004064 if (priv->hw_version == MVPP21)
4065 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
4066 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Stefan Roese96c19042016-02-10 07:22:10 +01004067
4068 /* Allow cache snoop when transmiting packets */
4069 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
4070
4071 /* Buffer Manager initialization */
4072 err = mvpp2_bm_init(dev, priv);
4073 if (err < 0)
4074 return err;
4075
4076 /* Parser default initialization */
4077 err = mvpp2_prs_default_init(dev, priv);
4078 if (err < 0)
4079 return err;
4080
4081 /* Classifier default initialization */
4082 mvpp2_cls_init(priv);
4083
4084 return 0;
4085}
4086
4087/* SMI / MDIO functions */
4088
4089static int smi_wait_ready(struct mvpp2 *priv)
4090{
4091 u32 timeout = MVPP2_SMI_TIMEOUT;
4092 u32 smi_reg;
4093
4094 /* wait till the SMI is not busy */
4095 do {
4096 /* read smi register */
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004097 smi_reg = readl(priv->mdio_base);
Stefan Roese96c19042016-02-10 07:22:10 +01004098 if (timeout-- == 0) {
4099 printf("Error: SMI busy timeout\n");
4100 return -EFAULT;
4101 }
4102 } while (smi_reg & MVPP2_SMI_BUSY);
4103
4104 return 0;
4105}
4106
4107/*
4108 * mpp2_mdio_read - miiphy_read callback function.
4109 *
4110 * Returns 16bit phy register value, or 0xffff on error
4111 */
4112static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
4113{
4114 struct mvpp2 *priv = bus->priv;
4115 u32 smi_reg;
4116 u32 timeout;
4117
4118 /* check parameters */
4119 if (addr > MVPP2_PHY_ADDR_MASK) {
4120 printf("Error: Invalid PHY address %d\n", addr);
4121 return -EFAULT;
4122 }
4123
4124 if (reg > MVPP2_PHY_REG_MASK) {
4125 printf("Err: Invalid register offset %d\n", reg);
4126 return -EFAULT;
4127 }
4128
4129 /* wait till the SMI is not busy */
4130 if (smi_wait_ready(priv) < 0)
4131 return -EFAULT;
4132
4133 /* fill the phy address and regiser offset and read opcode */
4134 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4135 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
4136 | MVPP2_SMI_OPCODE_READ;
4137
4138 /* write the smi register */
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004139 writel(smi_reg, priv->mdio_base);
Stefan Roese96c19042016-02-10 07:22:10 +01004140
4141 /* wait till read value is ready */
4142 timeout = MVPP2_SMI_TIMEOUT;
4143
4144 do {
4145 /* read smi register */
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004146 smi_reg = readl(priv->mdio_base);
Stefan Roese96c19042016-02-10 07:22:10 +01004147 if (timeout-- == 0) {
4148 printf("Err: SMI read ready timeout\n");
4149 return -EFAULT;
4150 }
4151 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
4152
4153 /* Wait for the data to update in the SMI register */
4154 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
4155 ;
4156
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004157 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK;
Stefan Roese96c19042016-02-10 07:22:10 +01004158}
4159
4160/*
4161 * mpp2_mdio_write - miiphy_write callback function.
4162 *
4163 * Returns 0 if write succeed, -EINVAL on bad parameters
4164 * -ETIME on timeout
4165 */
4166static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
4167 u16 value)
4168{
4169 struct mvpp2 *priv = bus->priv;
4170 u32 smi_reg;
4171
4172 /* check parameters */
4173 if (addr > MVPP2_PHY_ADDR_MASK) {
4174 printf("Error: Invalid PHY address %d\n", addr);
4175 return -EFAULT;
4176 }
4177
4178 if (reg > MVPP2_PHY_REG_MASK) {
4179 printf("Err: Invalid register offset %d\n", reg);
4180 return -EFAULT;
4181 }
4182
4183 /* wait till the SMI is not busy */
4184 if (smi_wait_ready(priv) < 0)
4185 return -EFAULT;
4186
4187 /* fill the phy addr and reg offset and write opcode and data */
4188 smi_reg = value << MVPP2_SMI_DATA_OFFS;
4189 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
4190 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
4191 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
4192
4193 /* write the smi register */
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004194 writel(smi_reg, priv->mdio_base);
Stefan Roese96c19042016-02-10 07:22:10 +01004195
4196 return 0;
4197}
4198
4199static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
4200{
4201 struct mvpp2_port *port = dev_get_priv(dev);
4202 struct mvpp2_rx_desc *rx_desc;
4203 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004204 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01004205 u32 bm, rx_status;
4206 int pool, rx_bytes, err;
4207 int rx_received;
4208 struct mvpp2_rx_queue *rxq;
4209 u32 cause_rx_tx, cause_rx, cause_misc;
4210 u8 *data;
4211
4212 cause_rx_tx = mvpp2_read(port->priv,
4213 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4214 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4215 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4216 if (!cause_rx_tx && !cause_misc)
4217 return 0;
4218
4219 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4220
4221 /* Process RX packets */
4222 cause_rx |= port->pending_cause_rx;
4223 rxq = mvpp2_get_rx_queue(port, cause_rx);
4224
4225 /* Get number of received packets and clamp the to-do */
4226 rx_received = mvpp2_rxq_received(port, rxq->id);
4227
4228 /* Return if no packets are received */
4229 if (!rx_received)
4230 return 0;
4231
4232 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004233 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
4234 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
4235 rx_bytes -= MVPP2_MH_SIZE;
4236 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004237
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004238 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004239 pool = mvpp2_bm_cookie_pool_get(bm);
4240 bm_pool = &port->priv->bm_pools[pool];
4241
Stefan Roese96c19042016-02-10 07:22:10 +01004242 /* In case of an error, release the requested buffer pointer
4243 * to the Buffer Manager. This request process is controlled
4244 * by the hardware, and the information about the buffer is
4245 * comprised by the RX descriptor.
4246 */
4247 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
4248 mvpp2_rx_error(port, rx_desc);
4249 /* Return the buffer to the pool */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004250 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01004251 return 0;
4252 }
4253
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004254 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01004255 if (err) {
4256 netdev_err(port->dev, "failed to refill BM pools\n");
4257 return 0;
4258 }
4259
4260 /* Update Rx queue management counters */
4261 mb();
4262 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
4263
4264 /* give packet to stack - skip on first n bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004265 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese96c19042016-02-10 07:22:10 +01004266
4267 if (rx_bytes <= 0)
4268 return 0;
4269
4270 /*
4271 * No cache invalidation needed here, since the rx_buffer's are
4272 * located in a uncached memory region
4273 */
4274 *packetp = data;
4275
4276 return rx_bytes;
4277}
4278
4279/* Drain Txq */
4280static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4281 int enable)
4282{
4283 u32 val;
4284
4285 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4286 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4287 if (enable)
4288 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4289 else
4290 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4291 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4292}
4293
4294static int mvpp2_send(struct udevice *dev, void *packet, int length)
4295{
4296 struct mvpp2_port *port = dev_get_priv(dev);
4297 struct mvpp2_tx_queue *txq, *aggr_txq;
4298 struct mvpp2_tx_desc *tx_desc;
4299 int tx_done;
4300 int timeout;
4301
4302 txq = port->txqs[0];
4303 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
4304
4305 /* Get a descriptor for the first part of the packet */
4306 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004307 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4308 mvpp2_txdesc_size_set(port, tx_desc, length);
4309 mvpp2_txdesc_offset_set(port, tx_desc,
4310 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
4311 mvpp2_txdesc_dma_addr_set(port, tx_desc,
4312 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese96c19042016-02-10 07:22:10 +01004313 /* First and Last descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004314 mvpp2_txdesc_cmd_set(port, tx_desc,
4315 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
4316 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese96c19042016-02-10 07:22:10 +01004317
4318 /* Flush tx data */
Stefan Roeseb4268e22017-02-16 13:58:37 +01004319 flush_dcache_range((unsigned long)packet,
4320 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese96c19042016-02-10 07:22:10 +01004321
4322 /* Enable transmit */
4323 mb();
4324 mvpp2_aggr_txq_pend_desc_add(port, 1);
4325
4326 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4327
4328 timeout = 0;
4329 do {
4330 if (timeout++ > 10000) {
4331 printf("timeout: packet not sent from aggregated to phys TXQ\n");
4332 return 0;
4333 }
4334 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
4335 } while (tx_done);
4336
4337 /* Enable TXQ drain */
4338 mvpp2_txq_drain(port, txq, 1);
4339
4340 timeout = 0;
4341 do {
4342 if (timeout++ > 10000) {
4343 printf("timeout: packet not sent\n");
4344 return 0;
4345 }
4346 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4347 } while (!tx_done);
4348
4349 /* Disable TXQ drain */
4350 mvpp2_txq_drain(port, txq, 0);
4351
4352 return 0;
4353}
4354
4355static int mvpp2_start(struct udevice *dev)
4356{
4357 struct eth_pdata *pdata = dev_get_platdata(dev);
4358 struct mvpp2_port *port = dev_get_priv(dev);
4359
4360 /* Load current MAC address */
4361 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4362
4363 /* Reconfigure parser accept the original MAC address */
4364 mvpp2_prs_update_mac_da(port, port->dev_addr);
4365
4366 mvpp2_port_power_up(port);
4367
4368 mvpp2_open(dev, port);
4369
4370 return 0;
4371}
4372
4373static void mvpp2_stop(struct udevice *dev)
4374{
4375 struct mvpp2_port *port = dev_get_priv(dev);
4376
4377 mvpp2_stop_dev(port);
4378 mvpp2_cleanup_rxqs(port);
4379 mvpp2_cleanup_txqs(port);
4380}
4381
Stefan Roese96c19042016-02-10 07:22:10 +01004382static int mvpp2_base_probe(struct udevice *dev)
4383{
4384 struct mvpp2 *priv = dev_get_priv(dev);
4385 struct mii_dev *bus;
4386 void *bd_space;
4387 u32 size = 0;
4388 int i;
4389
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01004390 /* Save hw-version */
4391 priv->hw_version = dev_get_driver_data(dev);
4392
Stefan Roese96c19042016-02-10 07:22:10 +01004393 /*
4394 * U-Boot special buffer handling:
4395 *
4396 * Allocate buffer area for descs and rx_buffers. This is only
4397 * done once for all interfaces. As only one interface can
4398 * be active. Make this area DMA-safe by disabling the D-cache
4399 */
4400
4401 /* Align buffer area for descs and rx_buffers to 1MiB */
4402 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesefeb0b332017-02-15 12:46:18 +01004403 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4404 BD_SPACE, DCACHE_OFF);
Stefan Roese96c19042016-02-10 07:22:10 +01004405
4406 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4407 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4408
Stefan Roesefeb0b332017-02-15 12:46:18 +01004409 buffer_loc.tx_descs =
4410 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004411 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4412
Stefan Roesefeb0b332017-02-15 12:46:18 +01004413 buffer_loc.rx_descs =
4414 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004415 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4416
4417 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004418 buffer_loc.bm_pool[i] =
4419 (unsigned long *)((unsigned long)bd_space + size);
Thomas Petazzoni3520a332017-02-20 11:29:16 +01004420 if (priv->hw_version == MVPP21)
4421 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
4422 else
4423 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
Stefan Roese96c19042016-02-10 07:22:10 +01004424 }
4425
4426 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesefeb0b332017-02-15 12:46:18 +01004427 buffer_loc.rx_buffer[i] =
4428 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese96c19042016-02-10 07:22:10 +01004429 size += RX_BUFFER_SIZE;
4430 }
4431
4432 /* Save base addresses for later use */
4433 priv->base = (void *)dev_get_addr_index(dev, 0);
4434 if (IS_ERR(priv->base))
4435 return PTR_ERR(priv->base);
4436
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004437 if (priv->hw_version == MVPP21) {
4438 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4439 if (IS_ERR(priv->lms_base))
4440 return PTR_ERR(priv->lms_base);
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004441
4442 priv->mdio_base = priv->lms_base + MVPP21_SMI;
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004443 } else {
4444 priv->iface_base = (void *)dev_get_addr_index(dev, 1);
4445 if (IS_ERR(priv->iface_base))
4446 return PTR_ERR(priv->iface_base);
Stefan Roeseb71c2a32017-02-16 08:31:32 +01004447
4448 priv->mdio_base = priv->iface_base + MVPP22_SMI;
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004449 }
Stefan Roese96c19042016-02-10 07:22:10 +01004450
Thomas Petazzoni38a23282017-02-16 09:03:16 +01004451 if (priv->hw_version == MVPP21)
4452 priv->max_port_rxqs = 8;
4453 else
4454 priv->max_port_rxqs = 32;
4455
Stefan Roese96c19042016-02-10 07:22:10 +01004456 /* Finally create and register the MDIO bus driver */
4457 bus = mdio_alloc();
4458 if (!bus) {
4459 printf("Failed to allocate MDIO bus\n");
4460 return -ENOMEM;
4461 }
4462
4463 bus->read = mpp2_mdio_read;
4464 bus->write = mpp2_mdio_write;
4465 snprintf(bus->name, sizeof(bus->name), dev->name);
4466 bus->priv = (void *)priv;
4467 priv->bus = bus;
4468
4469 return mdio_register(bus);
4470}
4471
Stefan Roesed017cdf2017-02-16 15:26:06 +01004472static int mvpp2_probe(struct udevice *dev)
4473{
4474 struct mvpp2_port *port = dev_get_priv(dev);
4475 struct mvpp2 *priv = dev_get_priv(dev->parent);
4476 int err;
4477
4478 /* Only call the probe function for the parent once */
4479 if (!priv->probe_done) {
4480 err = mvpp2_base_probe(dev->parent);
4481 priv->probe_done = 1;
4482 }
4483 /* Initialize network controller */
4484 err = mvpp2_init(dev, priv);
4485 if (err < 0) {
4486 dev_err(&pdev->dev, "failed to initialize controller\n");
4487 return err;
4488 }
4489
4490 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
4491}
4492
4493static const struct eth_ops mvpp2_ops = {
4494 .start = mvpp2_start,
4495 .send = mvpp2_send,
4496 .recv = mvpp2_recv,
4497 .stop = mvpp2_stop,
4498};
4499
4500static struct driver mvpp2_driver = {
4501 .name = "mvpp2",
4502 .id = UCLASS_ETH,
4503 .probe = mvpp2_probe,
4504 .ops = &mvpp2_ops,
4505 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4506 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4507};
4508
4509/*
4510 * Use a MISC device to bind the n instances (child nodes) of the
4511 * network base controller in UCLASS_ETH.
4512 */
Stefan Roese96c19042016-02-10 07:22:10 +01004513static int mvpp2_base_bind(struct udevice *parent)
4514{
4515 const void *blob = gd->fdt_blob;
Simon Glassdd79d6e2017-01-17 16:52:55 -07004516 int node = dev_of_offset(parent);
Stefan Roese96c19042016-02-10 07:22:10 +01004517 struct uclass_driver *drv;
4518 struct udevice *dev;
4519 struct eth_pdata *plat;
4520 char *name;
4521 int subnode;
4522 u32 id;
4523
4524 /* Lookup eth driver */
4525 drv = lists_uclass_lookup(UCLASS_ETH);
4526 if (!drv) {
4527 puts("Cannot find eth driver\n");
4528 return -ENOENT;
4529 }
4530
Simon Glass499c29e2016-10-02 17:59:29 -06004531 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roese96c19042016-02-10 07:22:10 +01004532 /* Skip disabled ports */
4533 if (!fdtdec_get_is_enabled(blob, subnode))
4534 continue;
4535
4536 plat = calloc(1, sizeof(*plat));
4537 if (!plat)
4538 return -ENOMEM;
4539
4540 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4541
4542 name = calloc(1, 16);
4543 sprintf(name, "mvpp2-%d", id);
4544
4545 /* Create child device UCLASS_ETH and bind it */
4546 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glassdd79d6e2017-01-17 16:52:55 -07004547 dev_set_of_offset(dev, subnode);
Stefan Roese96c19042016-02-10 07:22:10 +01004548 }
4549
4550 return 0;
4551}
4552
4553static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01004554 {
4555 .compatible = "marvell,armada-375-pp2",
4556 .data = MVPP21,
4557 },
Thomas Petazzonie595a232017-02-20 11:54:31 +01004558 {
4559 .compatible = "marvell,armada-7k-pp22",
4560 .data = MVPP22,
4561 },
Stefan Roese96c19042016-02-10 07:22:10 +01004562 { }
4563};
4564
4565U_BOOT_DRIVER(mvpp2_base) = {
4566 .name = "mvpp2_base",
4567 .id = UCLASS_MISC,
4568 .of_match = mvpp2_ids,
4569 .bind = mvpp2_base_bind,
Stefan Roese96c19042016-02-10 07:22:10 +01004570 .priv_auto_alloc_size = sizeof(struct mvpp2),
4571};