blob: ae545fe229cf450069be63cd84039cd541c3b482 [file] [log] [blame]
Stefan Roese96c19042016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
Stefan Roese38801d42017-02-24 10:12:41 +01009 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
Stefan Roese96c19042016-02-10 07:22:10 +010010 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
Simon Glass63334482019-11-14 12:57:39 -070016#include <cpu_func.h>
Stefan Roese96c19042016-02-10 07:22:10 +010017#include <dm.h>
Simon Glass274e0b02020-05-10 11:39:56 -060018#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060019#include <asm/global_data.h>
Stefan Roese96c19042016-02-10 07:22:10 +010020#include <dm/device-internal.h>
Simon Glass9bc15642020-02-03 07:36:16 -070021#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070022#include <dm/devres.h>
Stefan Roese96c19042016-02-10 07:22:10 +010023#include <dm/lists.h>
24#include <net.h>
25#include <netdev.h>
26#include <config.h>
27#include <malloc.h>
28#include <asm/io.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060029#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060030#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060031#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070032#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090033#include <linux/errno.h>
Stefan Roese96c19042016-02-10 07:22:10 +010034#include <phy.h>
35#include <miiphy.h>
36#include <watchdog.h>
37#include <asm/arch/cpu.h>
38#include <asm/arch/soc.h>
39#include <linux/compat.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060040#include <linux/libfdt.h>
Stefan Roese96c19042016-02-10 07:22:10 +010041#include <linux/mbus.h>
Stefan Chulski0d65eb62017-08-09 10:37:43 +030042#include <asm-generic/gpio.h>
Stefan Chulski4bc456a2017-08-09 10:37:44 +030043#include <fdt_support.h>
Nevo Hed5e975612019-08-15 18:08:44 -040044#include <linux/mdio.h>
Stefan Roese96c19042016-02-10 07:22:10 +010045
46DECLARE_GLOBAL_DATA_PTR;
47
Stefan Roese96c19042016-02-10 07:22:10 +010048#define __verify_pcpu_ptr(ptr) \
49do { \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
51 (void)__vpp_verify; \
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p) \
55({ \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
58})
59
60#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id() 0
62#define num_present_cpus() 1
63#define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
Stefan Roese96c19042016-02-10 07:22:10 +010068/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
69#define WRAP (2 + ETH_HLEN + 4 + 32)
70#define MTU 1500
71#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
72
Stefan Roese96c19042016-02-10 07:22:10 +010073/* RX Fifo Registers */
74#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
75#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
76#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
77#define MVPP2_RX_FIFO_INIT_REG 0x64
78
79/* RX DMA Top Registers */
80#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
81#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
82#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
83#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
84#define MVPP2_POOL_BUF_SIZE_OFFSET 5
85#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
86#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
87#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
88#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni2321c922017-02-16 06:53:51 +010089#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
90#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Stefan Roese96c19042016-02-10 07:22:10 +010091#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni2321c922017-02-16 06:53:51 +010092#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
93#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Stefan Roese96c19042016-02-10 07:22:10 +010094#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
95#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
96#define MVPP2_RXQ_DISABLE_MASK BIT(31)
97
98/* Parser Registers */
99#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
100#define MVPP2_PRS_PORT_LU_MAX 0xf
101#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
102#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
103#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
104#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
105#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
106#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
107#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
108#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
109#define MVPP2_PRS_TCAM_IDX_REG 0x1100
110#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
111#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
112#define MVPP2_PRS_SRAM_IDX_REG 0x1200
113#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
114#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
115#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
116
117/* Classifier Registers */
118#define MVPP2_CLS_MODE_REG 0x1800
119#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
120#define MVPP2_CLS_PORT_WAY_REG 0x1810
121#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
122#define MVPP2_CLS_LKP_INDEX_REG 0x1814
123#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
124#define MVPP2_CLS_LKP_TBL_REG 0x1818
125#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
126#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
127#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
128#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
129#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
130#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
131#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
132#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
133#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
134#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
135#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
136#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
137
138/* Descriptor Manager Top Registers */
139#define MVPP2_RXQ_NUM_REG 0x2040
140#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100141#define MVPP22_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100142#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
143#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
144#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
145#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
146#define MVPP2_RXQ_NUM_NEW_OFFSET 16
147#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
148#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
149#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
150#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
151#define MVPP2_RXQ_THRESH_REG 0x204c
152#define MVPP2_OCCUPIED_THRESH_OFFSET 0
153#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
154#define MVPP2_RXQ_INDEX_REG 0x2050
155#define MVPP2_TXQ_NUM_REG 0x2080
156#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
157#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
158#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
159#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
160#define MVPP2_TXQ_THRESH_REG 0x2094
161#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
162#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
163#define MVPP2_TXQ_INDEX_REG 0x2098
164#define MVPP2_TXQ_PREF_BUF_REG 0x209c
165#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
166#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
167#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
168#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
169#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
170#define MVPP2_TXQ_PENDING_REG 0x20a0
171#define MVPP2_TXQ_PENDING_MASK 0x3fff
172#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
173#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
174#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
175#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
176#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
177#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
178#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
179#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
180#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
181#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
182#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzoni7f215c72017-02-20 11:36:57 +0100183#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Stefan Roese96c19042016-02-10 07:22:10 +0100184#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
185#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
186#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
187#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
188#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
189
190/* MBUS bridge registers */
191#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
192#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
193#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
194#define MVPP2_BASE_ADDR_ENABLE 0x4060
195
Thomas Petazzonica560ab2017-02-16 08:41:07 +0100196/* AXI Bridge Registers */
197#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
198#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
199#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
200#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
201#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
202#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
203#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
204#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
205#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
206#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
207#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
208#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
209
210/* Values for AXI Bridge registers */
211#define MVPP22_AXI_ATTR_CACHE_OFFS 0
212#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
213
214#define MVPP22_AXI_CODE_CACHE_OFFS 0
215#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
216
217#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
218#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
219#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
220
221#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
222#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
223
Stefan Roese96c19042016-02-10 07:22:10 +0100224/* Interrupt Cause and Mask registers */
225#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzonif1077472017-02-16 08:46:37 +0100226#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
227
228#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
229#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
230#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
231#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
232
233#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
234#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
235
236#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
237#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
238#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
239#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
240
Stefan Roese96c19042016-02-10 07:22:10 +0100241#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
242#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
243#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
244#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
245#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
246#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
247#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
248#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
249#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
250#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
251#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
252#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
253#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
254#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
255#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
256#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
257#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
258#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
259
260/* Buffer Manager registers */
261#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
262#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
263#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
264#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
265#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
266#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
267#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
268#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
269#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
270#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
271#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
272#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
273#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
274#define MVPP2_BM_START_MASK BIT(0)
275#define MVPP2_BM_STOP_MASK BIT(1)
276#define MVPP2_BM_STATE_MASK BIT(4)
277#define MVPP2_BM_LOW_THRESH_OFFS 8
278#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
279#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
280 MVPP2_BM_LOW_THRESH_OFFS)
281#define MVPP2_BM_HIGH_THRESH_OFFS 16
282#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
283#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
284 MVPP2_BM_HIGH_THRESH_OFFS)
285#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
286#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
287#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
288#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
289#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
290#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
291#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
292#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
293#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
294#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100295#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
296#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
297#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
298#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
Stefan Roese96c19042016-02-10 07:22:10 +0100299#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
300#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
301#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
302#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
303#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100304#define MVPP21_BM_MC_RLS_REG 0x64c4
Stefan Roese96c19042016-02-10 07:22:10 +0100305#define MVPP2_BM_MC_ID_MASK 0xfff
306#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
Thomas Petazzoni3520a332017-02-20 11:29:16 +0100307#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
308#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
309#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
310#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
311#define MVPP22_BM_MC_RLS_REG 0x64d4
Stefan Chulski115f76f2017-08-09 10:37:50 +0300312#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
313#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
Stefan Roese96c19042016-02-10 07:22:10 +0100314
315/* TX Scheduler registers */
316#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
317#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
318#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
319#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
320#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
321#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
322#define MVPP2_TXP_SCHED_MTU_REG 0x801c
323#define MVPP2_TXP_MTU_MAX 0x7FFFF
324#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
325#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
326#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
327#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
328#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
329#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
330#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
331#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
332#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
333#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
334#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
335#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
336#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
337#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
338
339/* TX general registers */
340#define MVPP2_TX_SNOOP_REG 0x8800
341#define MVPP2_TX_PORT_FLUSH_REG 0x8810
342#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
343
344/* LMS registers */
345#define MVPP2_SRC_ADDR_MIDDLE 0x24
346#define MVPP2_SRC_ADDR_HIGH 0x28
347#define MVPP2_PHY_AN_CFG0_REG 0x34
348#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese96c19042016-02-10 07:22:10 +0100349#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoniebbe76f2017-02-15 12:16:23 +0100350#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese96c19042016-02-10 07:22:10 +0100351
352/* Per-port registers */
353#define MVPP2_GMAC_CTRL_0_REG 0x0
354#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Stefan Roese40e749b2017-03-22 15:07:30 +0100355#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Stefan Roese96c19042016-02-10 07:22:10 +0100356#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
357#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
358#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
359#define MVPP2_GMAC_CTRL_1_REG 0x4
360#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
361#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
362#define MVPP2_GMAC_PCS_LB_EN_BIT 6
363#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
364#define MVPP2_GMAC_SA_LOW_OFFS 7
365#define MVPP2_GMAC_CTRL_2_REG 0x8
366#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Stefan Roese40e749b2017-03-22 15:07:30 +0100367#define MVPP2_GMAC_SGMII_MODE_MASK BIT(0)
Stefan Roese96c19042016-02-10 07:22:10 +0100368#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
369#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
Stefan Roese40e749b2017-03-22 15:07:30 +0100370#define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5)
Stefan Roese96c19042016-02-10 07:22:10 +0100371#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Stefan Roese40e749b2017-03-22 15:07:30 +0100372#define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9)
Stefan Roese96c19042016-02-10 07:22:10 +0100373#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
374#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
375#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Stefan Roese40e749b2017-03-22 15:07:30 +0100376#define MVPP2_GMAC_EN_PCS_AN BIT(2)
377#define MVPP2_GMAC_AN_BYPASS_EN BIT(3)
Stefan Roese96c19042016-02-10 07:22:10 +0100378#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
379#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
380#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
381#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Stefan Roese40e749b2017-03-22 15:07:30 +0100382#define MVPP2_GMAC_EN_FC_AN BIT(11)
Stefan Roese96c19042016-02-10 07:22:10 +0100383#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
384#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Stefan Roese40e749b2017-03-22 15:07:30 +0100385#define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15)
Stefan Roese96c19042016-02-10 07:22:10 +0100386#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
387#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
388#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
389#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
390 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Stefan Roese40e749b2017-03-22 15:07:30 +0100391#define MVPP2_GMAC_CTRL_4_REG 0x90
392#define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0)
393#define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5)
394#define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6)
395#define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7)
Stefan Roese96c19042016-02-10 07:22:10 +0100396
Stefan Roese40e749b2017-03-22 15:07:30 +0100397/*
398 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
399 * relative to port->base.
400 */
401
402/* Port Mac Control0 */
403#define MVPP22_XLG_CTRL0_REG 0x100
404#define MVPP22_XLG_PORT_EN BIT(0)
405#define MVPP22_XLG_MAC_RESETN BIT(1)
406#define MVPP22_XLG_RX_FC_EN BIT(7)
407#define MVPP22_XLG_MIBCNT_DIS BIT(13)
408/* Port Mac Control1 */
409#define MVPP22_XLG_CTRL1_REG 0x104
410#define MVPP22_XLG_MAX_RX_SIZE_OFFS 0
411#define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff
412/* Port Interrupt Mask */
413#define MVPP22_XLG_INTERRUPT_MASK_REG 0x118
414#define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1)
415/* Port Mac Control3 */
416#define MVPP22_XLG_CTRL3_REG 0x11c
417#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
418#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
419#define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13)
420/* Port Mac Control4 */
421#define MVPP22_XLG_CTRL4_REG 0x184
422#define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5)
423#define MVPP22_XLG_FORWARD_PFC_EN BIT(6)
424#define MVPP22_XLG_MODE_DMA_1G BIT(12)
425#define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14)
426
427/* XPCS registers */
428
429/* Global Configuration 0 */
430#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
431#define MVPP22_XPCS_PCSRESET BIT(0)
432#define MVPP22_XPCS_PCSMODE_OFFS 3
433#define MVPP22_XPCS_PCSMODE_MASK (0x3 << \
434 MVPP22_XPCS_PCSMODE_OFFS)
435#define MVPP22_XPCS_LANEACTIVE_OFFS 5
436#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \
437 MVPP22_XPCS_LANEACTIVE_OFFS)
438
439/* MPCS registers */
440
441#define PCS40G_COMMON_CONTROL 0x14
Stefan Chulskia27adcb2017-04-06 15:39:08 +0200442#define FORWARD_ERROR_CORRECTION_MASK BIT(10)
Stefan Roese40e749b2017-03-22 15:07:30 +0100443
444#define PCS_CLOCK_RESET 0x14c
445#define TX_SD_CLK_RESET_MASK BIT(0)
446#define RX_SD_CLK_RESET_MASK BIT(1)
447#define MAC_CLK_RESET_MASK BIT(2)
448#define CLK_DIVISION_RATIO_OFFS 4
449#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS)
450#define CLK_DIV_PHASE_SET_MASK BIT(11)
451
452/* System Soft Reset 1 */
453#define GOP_SOFT_RESET_1_REG 0x108
454#define NETC_GOP_SOFT_RESET_OFFS 6
455#define NETC_GOP_SOFT_RESET_MASK (0x1 << \
456 NETC_GOP_SOFT_RESET_OFFS)
457
458/* Ports Control 0 */
459#define NETCOMP_PORTS_CONTROL_0_REG 0x110
460#define NETC_BUS_WIDTH_SELECT_OFFS 1
461#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \
462 NETC_BUS_WIDTH_SELECT_OFFS)
463#define NETC_GIG_RX_DATA_SAMPLE_OFFS 29
464#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \
465 NETC_GIG_RX_DATA_SAMPLE_OFFS)
466#define NETC_CLK_DIV_PHASE_OFFS 31
467#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS)
468/* Ports Control 1 */
469#define NETCOMP_PORTS_CONTROL_1_REG 0x114
470#define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p)
471#define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \
472 NETC_PORTS_ACTIVE_OFFSET(p))
473#define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p)
474#define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \
475 NETC_PORT_GIG_RF_RESET_OFFS(p))
476#define NETCOMP_CONTROL_0_REG 0x120
477#define NETC_GBE_PORT0_SGMII_MODE_OFFS 0
478#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \
479 NETC_GBE_PORT0_SGMII_MODE_OFFS)
480#define NETC_GBE_PORT1_SGMII_MODE_OFFS 1
481#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \
482 NETC_GBE_PORT1_SGMII_MODE_OFFS)
483#define NETC_GBE_PORT1_MII_MODE_OFFS 2
484#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \
485 NETC_GBE_PORT1_MII_MODE_OFFS)
486
487#define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04)
Thomas Petazzonicc2445f2017-02-20 11:42:51 +0100488#define MVPP22_SMI_POLLING_EN BIT(10)
489
Stefan Roese96c19042016-02-10 07:22:10 +0100490#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
491
492/* Descriptor ring Macros */
493#define MVPP2_QUEUE_NEXT_DESC(q, index) \
494 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
495
Stefan Roeseb71c2a32017-02-16 08:31:32 +0100496/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
497#define MVPP22_SMI 0x1200
Stefan Roese96c19042016-02-10 07:22:10 +0100498
Stefan Roese40e749b2017-03-22 15:07:30 +0100499/* Additional PPv2.2 offsets */
500#define MVPP22_MPCS 0x007000
501#define MVPP22_XPCS 0x007400
502#define MVPP22_PORT_BASE 0x007e00
503#define MVPP22_PORT_OFFSET 0x001000
504#define MVPP22_RFU1 0x318000
505
506/* Maximum number of ports */
507#define MVPP22_GOP_MAC_NUM 4
508
509/* Sets the field located at the specified in data */
510#define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41
511#define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5
512#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb
513
514/* Net Complex */
515enum mv_netc_topology {
516 MV_NETC_GE_MAC2_SGMII = BIT(0),
Stefan Chulskib3f12b52021-05-03 08:08:45 +0200517 MV_NETC_GE_MAC2_RGMII = BIT(1),
518 MV_NETC_GE_MAC3_SGMII = BIT(2),
519 MV_NETC_GE_MAC3_RGMII = BIT(3),
Stefan Roese40e749b2017-03-22 15:07:30 +0100520};
521
522enum mv_netc_phase {
523 MV_NETC_FIRST_PHASE,
524 MV_NETC_SECOND_PHASE,
525};
526
527enum mv_netc_sgmii_xmi_mode {
528 MV_NETC_GBE_SGMII,
529 MV_NETC_GBE_XMII,
530};
531
532enum mv_netc_mii_mode {
533 MV_NETC_GBE_RGMII,
534 MV_NETC_GBE_MII,
535};
536
537enum mv_netc_lanes {
538 MV_NETC_LANE_23,
539 MV_NETC_LANE_45,
540};
541
Stefan Roese96c19042016-02-10 07:22:10 +0100542/* Various constants */
543
544/* Coalescing */
545#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
546#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
547#define MVPP2_RX_COAL_PKTS 32
548#define MVPP2_RX_COAL_USEC 100
549
550/* The two bytes Marvell header. Either contains a special value used
551 * by Marvell switches when a specific hardware mode is enabled (not
552 * supported by this driver) or is filled automatically by zeroes on
553 * the RX side. Those two bytes being at the front of the Ethernet
554 * header, they allow to have the IP header aligned on a 4 bytes
555 * boundary automatically: the hardware skips those two bytes on its
556 * own.
557 */
558#define MVPP2_MH_SIZE 2
559#define MVPP2_ETH_TYPE_LEN 2
560#define MVPP2_PPPOE_HDR_SIZE 8
561#define MVPP2_VLAN_TAG_LEN 4
562
563/* Lbtd 802.3 type */
564#define MVPP2_IP_LBDT_TYPE 0xfffa
565
566#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
567#define MVPP2_TX_CSUM_MAX_SIZE 9800
568
569/* Timeout constants */
570#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
571#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
572
573#define MVPP2_TX_MTU_MAX 0x7ffff
574
575/* Maximum number of T-CONTs of PON port */
576#define MVPP2_MAX_TCONT 16
577
578/* Maximum number of supported ports */
579#define MVPP2_MAX_PORTS 4
580
581/* Maximum number of TXQs used by single port */
582#define MVPP2_MAX_TXQ 8
583
Stefan Roese96c19042016-02-10 07:22:10 +0100584/* Default number of TXQs in use */
585#define MVPP2_DEFAULT_TXQ 1
586
Flavio Suligoi50b91ce2020-01-29 09:38:56 +0100587/* Default number of RXQs in use */
Stefan Roese96c19042016-02-10 07:22:10 +0100588#define MVPP2_DEFAULT_RXQ 1
Tom Rini364d0022023-01-10 11:19:45 -0500589#define CFG_MV_ETH_RXQ 8 /* increment by 8 */
Stefan Roese96c19042016-02-10 07:22:10 +0100590
Stefan Roese96c19042016-02-10 07:22:10 +0100591/* Max number of Rx descriptors */
592#define MVPP2_MAX_RXD 16
593
594/* Max number of Tx descriptors */
595#define MVPP2_MAX_TXD 16
596
597/* Amount of Tx descriptors that can be reserved at once by CPU */
Stefan Chulskib528d122017-08-09 10:37:47 +0300598#define MVPP2_CPU_DESC_CHUNK 16
Stefan Roese96c19042016-02-10 07:22:10 +0100599
600/* Max number of Tx descriptors in each aggregated queue */
Stefan Chulskib528d122017-08-09 10:37:47 +0300601#define MVPP2_AGGR_TXQ_SIZE 16
Stefan Roese96c19042016-02-10 07:22:10 +0100602
603/* Descriptor aligned size */
604#define MVPP2_DESC_ALIGNED_SIZE 32
605
606/* Descriptor alignment mask */
607#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
608
609/* RX FIFO constants */
Stefan Roesea8801ed2017-03-01 13:09:42 +0100610#define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000
611#define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80
612#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000
613#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000
614#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000
615#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200
616#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80
617#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40
618#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
619
620/* TX general registers */
621#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
622#define MVPP22_TX_FIFO_SIZE_MASK 0xf
623
624/* TX FIFO constants */
625#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
626#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
Stefan Roese96c19042016-02-10 07:22:10 +0100627
628/* RX buffer constants */
629#define MVPP2_SKB_SHINFO_SIZE \
630 0
631
632#define MVPP2_RX_PKT_SIZE(mtu) \
633 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
634 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
635
636#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
637#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
638#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
639 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
640
641#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
642
643/* IPv6 max L3 address size */
644#define MVPP2_MAX_L3_ADDR_SIZE 16
645
646/* Port flags */
647#define MVPP2_F_LOOPBACK BIT(0)
648
649/* Marvell tag types */
650enum mvpp2_tag_type {
651 MVPP2_TAG_TYPE_NONE = 0,
652 MVPP2_TAG_TYPE_MH = 1,
653 MVPP2_TAG_TYPE_DSA = 2,
654 MVPP2_TAG_TYPE_EDSA = 3,
655 MVPP2_TAG_TYPE_VLAN = 4,
656 MVPP2_TAG_TYPE_LAST = 5
657};
658
659/* Parser constants */
660#define MVPP2_PRS_TCAM_SRAM_SIZE 256
661#define MVPP2_PRS_TCAM_WORDS 6
662#define MVPP2_PRS_SRAM_WORDS 4
663#define MVPP2_PRS_FLOW_ID_SIZE 64
664#define MVPP2_PRS_FLOW_ID_MASK 0x3f
665#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
666#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
667#define MVPP2_PRS_IPV4_HEAD 0x40
668#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
669#define MVPP2_PRS_IPV4_MC 0xe0
670#define MVPP2_PRS_IPV4_MC_MASK 0xf0
671#define MVPP2_PRS_IPV4_BC_MASK 0xff
672#define MVPP2_PRS_IPV4_IHL 0x5
673#define MVPP2_PRS_IPV4_IHL_MASK 0xf
674#define MVPP2_PRS_IPV6_MC 0xff
675#define MVPP2_PRS_IPV6_MC_MASK 0xff
676#define MVPP2_PRS_IPV6_HOP_MASK 0xff
677#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
678#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
679#define MVPP2_PRS_DBL_VLANS_MAX 100
680
681/* Tcam structure:
682 * - lookup ID - 4 bits
683 * - port ID - 1 byte
684 * - additional information - 1 byte
685 * - header data - 8 bytes
686 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
687 */
688#define MVPP2_PRS_AI_BITS 8
689#define MVPP2_PRS_PORT_MASK 0xff
690#define MVPP2_PRS_LU_MASK 0xf
691#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
692 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
693#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
694 (((offs) * 2) - ((offs) % 2) + 2)
695#define MVPP2_PRS_TCAM_AI_BYTE 16
696#define MVPP2_PRS_TCAM_PORT_BYTE 17
697#define MVPP2_PRS_TCAM_LU_BYTE 20
698#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
699#define MVPP2_PRS_TCAM_INV_WORD 5
700/* Tcam entries ID */
701#define MVPP2_PE_DROP_ALL 0
702#define MVPP2_PE_FIRST_FREE_TID 1
703#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
704#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
705#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
706#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
707#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
708#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
709#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
710#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
711#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
712#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
713#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
714#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
715#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
716#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
717#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
718#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
719#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
720#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
721#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
722#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
723#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
724#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
725#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
726#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
727#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
728
729/* Sram structure
730 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
731 */
732#define MVPP2_PRS_SRAM_RI_OFFS 0
733#define MVPP2_PRS_SRAM_RI_WORD 0
734#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
735#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
736#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
737#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
738#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
739#define MVPP2_PRS_SRAM_UDF_OFFS 73
740#define MVPP2_PRS_SRAM_UDF_BITS 8
741#define MVPP2_PRS_SRAM_UDF_MASK 0xff
742#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
743#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
744#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
745#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
746#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
747#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
748#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
749#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
750#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
751#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
752#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
753#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
754#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
755#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
756#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
757#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
758#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
759#define MVPP2_PRS_SRAM_AI_OFFS 90
760#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
761#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
762#define MVPP2_PRS_SRAM_AI_MASK 0xff
763#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
764#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
765#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
766#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
767
768/* Sram result info bits assignment */
769#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
770#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100771#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
772#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100773#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
774#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
775#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
776#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
777#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100778#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
779#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100780#define MVPP2_PRS_RI_L2_MCAST BIT(9)
781#define MVPP2_PRS_RI_L2_BCAST BIT(10)
782#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100783#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
784#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100785#define MVPP2_PRS_RI_L3_IP4 BIT(12)
786#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
787#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
788#define MVPP2_PRS_RI_L3_IP6 BIT(14)
789#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
790#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni265b3c62017-02-15 12:19:36 +0100791#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
792#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese96c19042016-02-10 07:22:10 +0100793#define MVPP2_PRS_RI_L3_MCAST BIT(15)
794#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
795#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
796#define MVPP2_PRS_RI_UDF3_MASK 0x300000
797#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
798#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
799#define MVPP2_PRS_RI_L4_TCP BIT(22)
800#define MVPP2_PRS_RI_L4_UDP BIT(23)
801#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
802#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
803#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
804#define MVPP2_PRS_RI_DROP_MASK 0x80000000
805
806/* Sram additional info bits assignment */
807#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
808#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
809#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
810#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
811#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
812#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
813#define MVPP2_PRS_SINGLE_VLAN_AI 0
814#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
815
816/* DSA/EDSA type */
817#define MVPP2_PRS_TAGGED true
818#define MVPP2_PRS_UNTAGGED false
819#define MVPP2_PRS_EDSA true
820#define MVPP2_PRS_DSA false
821
822/* MAC entries, shadow udf */
823enum mvpp2_prs_udf {
824 MVPP2_PRS_UDF_MAC_DEF,
825 MVPP2_PRS_UDF_MAC_RANGE,
826 MVPP2_PRS_UDF_L2_DEF,
827 MVPP2_PRS_UDF_L2_DEF_COPY,
828 MVPP2_PRS_UDF_L2_USER,
829};
830
831/* Lookup ID */
832enum mvpp2_prs_lookup {
833 MVPP2_PRS_LU_MH,
834 MVPP2_PRS_LU_MAC,
835 MVPP2_PRS_LU_DSA,
836 MVPP2_PRS_LU_VLAN,
837 MVPP2_PRS_LU_L2,
838 MVPP2_PRS_LU_PPPOE,
839 MVPP2_PRS_LU_IP4,
840 MVPP2_PRS_LU_IP6,
841 MVPP2_PRS_LU_FLOWS,
842 MVPP2_PRS_LU_LAST,
843};
844
845/* L3 cast enum */
846enum mvpp2_prs_l3_cast {
847 MVPP2_PRS_L3_UNI_CAST,
848 MVPP2_PRS_L3_MULTI_CAST,
849 MVPP2_PRS_L3_BROAD_CAST
850};
851
852/* Classifier constants */
853#define MVPP2_CLS_FLOWS_TBL_SIZE 512
854#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
855#define MVPP2_CLS_LKP_TBL_SIZE 64
856
857/* BM constants */
858#define MVPP2_BM_POOLS_NUM 1
859#define MVPP2_BM_LONG_BUF_NUM 16
860#define MVPP2_BM_SHORT_BUF_NUM 16
861#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
862#define MVPP2_BM_POOL_PTR_ALIGN 128
863#define MVPP2_BM_SWF_LONG_POOL(port) 0
864
865/* BM cookie (32 bits) definition */
866#define MVPP2_BM_COOKIE_POOL_OFFS 8
867#define MVPP2_BM_COOKIE_CPU_OFFS 24
868
869/* BM short pool packet size
870 * These value assure that for SWF the total number
871 * of bytes allocated for each buffer will be 512
872 */
873#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
874
875enum mvpp2_bm_type {
876 MVPP2_BM_FREE,
877 MVPP2_BM_SWF_LONG,
878 MVPP2_BM_SWF_SHORT
879};
880
881/* Definitions */
882
883/* Shared Packet Processor resources */
884struct mvpp2 {
885 /* Shared registers' base addresses */
886 void __iomem *base;
887 void __iomem *lms_base;
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100888 void __iomem *iface_base;
Stefan Roese96c19042016-02-10 07:22:10 +0100889
Stefan Roese40e749b2017-03-22 15:07:30 +0100890 void __iomem *mpcs_base;
891 void __iomem *xpcs_base;
892 void __iomem *rfu1_base;
893
894 u32 netc_config;
895
Stefan Roese96c19042016-02-10 07:22:10 +0100896 /* List of pointers to port structures */
897 struct mvpp2_port **port_list;
898
899 /* Aggregated TXQs */
900 struct mvpp2_tx_queue *aggr_txqs;
901
902 /* BM pools */
903 struct mvpp2_bm_pool *bm_pools;
904
905 /* PRS shadow table */
906 struct mvpp2_prs_shadow *prs_shadow;
907 /* PRS auxiliary table for double vlan entries control */
908 bool *prs_double_vlans;
909
910 /* Tclk value */
911 u32 tclk;
912
Thomas Petazzoni51ccb412017-02-15 14:08:59 +0100913 /* HW version */
914 enum { MVPP21, MVPP22 } hw_version;
915
Thomas Petazzoni38a23282017-02-16 09:03:16 +0100916 /* Maximum number of RXQs per port */
917 unsigned int max_port_rxqs;
918
Stefan Roesed017cdf2017-02-16 15:26:06 +0100919 int probe_done;
Stefan Chulski75872182017-08-09 10:37:46 +0300920 u8 num_ports;
Stefan Roese96c19042016-02-10 07:22:10 +0100921};
922
923struct mvpp2_pcpu_stats {
924 u64 rx_packets;
925 u64 rx_bytes;
926 u64 tx_packets;
927 u64 tx_bytes;
928};
929
930struct mvpp2_port {
931 u8 id;
932
Thomas Petazzoni5555f072017-02-16 08:03:37 +0100933 /* Index of the port from the "group of ports" complex point
934 * of view
935 */
936 int gop_id;
937
Stefan Roese96c19042016-02-10 07:22:10 +0100938 int irq;
939
940 struct mvpp2 *priv;
941
942 /* Per-port registers' base address */
943 void __iomem *base;
944
945 struct mvpp2_rx_queue **rxqs;
946 struct mvpp2_tx_queue **txqs;
947
948 int pkt_size;
949
950 u32 pending_cause_rx;
951
952 /* Per-CPU port control */
953 struct mvpp2_port_pcpu __percpu *pcpu;
954
955 /* Flags */
956 unsigned long flags;
957
958 u16 tx_ring_size;
959 u16 rx_ring_size;
960 struct mvpp2_pcpu_stats __percpu *stats;
961
962 struct phy_device *phy_dev;
963 phy_interface_t phy_interface;
Stefan Roese96c19042016-02-10 07:22:10 +0100964 int phyaddr;
Nevo Hed5e975612019-08-15 18:08:44 -0400965 struct udevice *mdio_dev;
Simon Glassfa4689a2019-12-06 21:41:35 -0700966 struct mii_dev *bus;
967#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski0d65eb62017-08-09 10:37:43 +0300968 struct gpio_desc phy_reset_gpio;
969 struct gpio_desc phy_tx_disable_gpio;
970#endif
Stefan Roese96c19042016-02-10 07:22:10 +0100971 int init;
972 unsigned int link;
973 unsigned int duplex;
974 unsigned int speed;
975
976 struct mvpp2_bm_pool *pool_long;
977 struct mvpp2_bm_pool *pool_short;
978
979 /* Index of first port's physical RXQ */
980 u8 first_rxq;
981
982 u8 dev_addr[ETH_ALEN];
983};
984
985/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
986 * layout of the transmit and reception DMA descriptors, and their
987 * layout is therefore defined by the hardware design
988 */
989
990#define MVPP2_TXD_L3_OFF_SHIFT 0
991#define MVPP2_TXD_IP_HLEN_SHIFT 8
992#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
993#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
994#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
995#define MVPP2_TXD_PADDING_DISABLE BIT(23)
996#define MVPP2_TXD_L4_UDP BIT(24)
997#define MVPP2_TXD_L3_IP6 BIT(26)
998#define MVPP2_TXD_L_DESC BIT(28)
999#define MVPP2_TXD_F_DESC BIT(29)
1000
1001#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1002#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1003#define MVPP2_RXD_ERR_CRC 0x0
1004#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1005#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1006#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1007#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1008#define MVPP2_RXD_HWF_SYNC BIT(21)
1009#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1010#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1011#define MVPP2_RXD_L4_TCP BIT(25)
1012#define MVPP2_RXD_L4_UDP BIT(26)
1013#define MVPP2_RXD_L3_IP4 BIT(28)
1014#define MVPP2_RXD_L3_IP6 BIT(30)
1015#define MVPP2_RXD_BUF_HDR BIT(31)
1016
Thomas Petazzonie3645a02017-02-15 16:25:53 +01001017/* HW TX descriptor for PPv2.1 */
1018struct mvpp21_tx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +01001019 u32 command; /* Options used by HW for packet transmitting.*/
1020 u8 packet_offset; /* the offset from the buffer beginning */
1021 u8 phys_txq; /* destination queue ID */
1022 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001023 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese96c19042016-02-10 07:22:10 +01001024 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1025 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1026 u32 reserved2; /* reserved (for future use) */
1027};
1028
Thomas Petazzonie3645a02017-02-15 16:25:53 +01001029/* HW RX descriptor for PPv2.1 */
1030struct mvpp21_rx_desc {
Stefan Roese96c19042016-02-10 07:22:10 +01001031 u32 status; /* info about received packet */
1032 u16 reserved1; /* parser_info (for future use, PnC) */
1033 u16 data_size; /* size of received packet in bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001034 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese96c19042016-02-10 07:22:10 +01001035 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1036 u16 reserved2; /* gem_port_id (for future use, PON) */
1037 u16 reserved3; /* csum_l4 (for future use, PnC) */
1038 u8 reserved4; /* bm_qset (for future use, BM) */
1039 u8 reserved5;
1040 u16 reserved6; /* classify_info (for future use, PnC) */
1041 u32 reserved7; /* flow_id (for future use, PnC) */
1042 u32 reserved8;
1043};
1044
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001045/* HW TX descriptor for PPv2.2 */
1046struct mvpp22_tx_desc {
1047 u32 command;
1048 u8 packet_offset;
1049 u8 phys_txq;
1050 u16 data_size;
1051 u64 reserved1;
1052 u64 buf_dma_addr_ptp;
1053 u64 buf_cookie_misc;
1054};
1055
1056/* HW RX descriptor for PPv2.2 */
1057struct mvpp22_rx_desc {
1058 u32 status;
1059 u16 reserved1;
1060 u16 data_size;
1061 u32 reserved2;
1062 u32 reserved3;
1063 u64 buf_dma_addr_key_hash;
1064 u64 buf_cookie_misc;
1065};
1066
Thomas Petazzonie3645a02017-02-15 16:25:53 +01001067/* Opaque type used by the driver to manipulate the HW TX and RX
1068 * descriptors
1069 */
1070struct mvpp2_tx_desc {
1071 union {
1072 struct mvpp21_tx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001073 struct mvpp22_tx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +01001074 };
1075};
1076
1077struct mvpp2_rx_desc {
1078 union {
1079 struct mvpp21_rx_desc pp21;
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001080 struct mvpp22_rx_desc pp22;
Thomas Petazzonie3645a02017-02-15 16:25:53 +01001081 };
1082};
1083
Stefan Roese96c19042016-02-10 07:22:10 +01001084/* Per-CPU Tx queue control */
1085struct mvpp2_txq_pcpu {
1086 int cpu;
1087
1088 /* Number of Tx DMA descriptors in the descriptor ring */
1089 int size;
1090
1091 /* Number of currently used Tx DMA descriptor in the
1092 * descriptor ring
1093 */
1094 int count;
1095
1096 /* Number of Tx DMA descriptors reserved for each CPU */
1097 int reserved_num;
1098
1099 /* Index of last TX DMA descriptor that was inserted */
1100 int txq_put_index;
1101
1102 /* Index of the TX DMA descriptor to be cleaned up */
1103 int txq_get_index;
1104};
1105
1106struct mvpp2_tx_queue {
1107 /* Physical number of this Tx queue */
1108 u8 id;
1109
1110 /* Logical number of this Tx queue */
1111 u8 log_id;
1112
1113 /* Number of Tx DMA descriptors in the descriptor ring */
1114 int size;
1115
1116 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1117 int count;
1118
1119 /* Per-CPU control of physical Tx queues */
1120 struct mvpp2_txq_pcpu __percpu *pcpu;
1121
1122 u32 done_pkts_coal;
1123
1124 /* Virtual address of thex Tx DMA descriptors array */
1125 struct mvpp2_tx_desc *descs;
1126
1127 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001128 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +01001129
1130 /* Index of the last Tx DMA descriptor */
1131 int last_desc;
1132
1133 /* Index of the next Tx DMA descriptor to process */
1134 int next_desc_to_proc;
1135};
1136
1137struct mvpp2_rx_queue {
1138 /* RX queue number, in the range 0-31 for physical RXQs */
1139 u8 id;
1140
1141 /* Num of rx descriptors in the rx descriptor ring */
1142 int size;
1143
1144 u32 pkts_coal;
1145 u32 time_coal;
1146
1147 /* Virtual address of the RX DMA descriptors array */
1148 struct mvpp2_rx_desc *descs;
1149
1150 /* DMA address of the RX DMA descriptors array */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001151 dma_addr_t descs_dma;
Stefan Roese96c19042016-02-10 07:22:10 +01001152
1153 /* Index of the last RX DMA descriptor */
1154 int last_desc;
1155
1156 /* Index of the next RX DMA descriptor to process */
1157 int next_desc_to_proc;
1158
1159 /* ID of port to which physical RXQ is mapped */
1160 int port;
1161
1162 /* Port's logic RXQ number to which physical RXQ is mapped */
1163 int logic_rxq;
1164};
1165
1166union mvpp2_prs_tcam_entry {
1167 u32 word[MVPP2_PRS_TCAM_WORDS];
1168 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1169};
1170
1171union mvpp2_prs_sram_entry {
1172 u32 word[MVPP2_PRS_SRAM_WORDS];
1173 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1174};
1175
1176struct mvpp2_prs_entry {
1177 u32 index;
1178 union mvpp2_prs_tcam_entry tcam;
1179 union mvpp2_prs_sram_entry sram;
1180};
1181
1182struct mvpp2_prs_shadow {
1183 bool valid;
1184 bool finish;
1185
1186 /* Lookup ID */
1187 int lu;
1188
1189 /* User defined offset */
1190 int udf;
1191
1192 /* Result info */
1193 u32 ri;
1194 u32 ri_mask;
1195};
1196
1197struct mvpp2_cls_flow_entry {
1198 u32 index;
1199 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1200};
1201
1202struct mvpp2_cls_lookup_entry {
1203 u32 lkpid;
1204 u32 way;
1205 u32 data;
1206};
1207
1208struct mvpp2_bm_pool {
1209 /* Pool number in the range 0-7 */
1210 int id;
1211 enum mvpp2_bm_type type;
1212
1213 /* Buffer Pointers Pool External (BPPE) size */
1214 int size;
1215 /* Number of buffers for this pool */
1216 int buf_num;
1217 /* Pool buffer size */
1218 int buf_size;
1219 /* Packet size */
1220 int pkt_size;
1221
1222 /* BPPE virtual base address */
Stefan Roesefeb0b332017-02-15 12:46:18 +01001223 unsigned long *virt_addr;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01001224 /* BPPE DMA base address */
1225 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01001226
1227 /* Ports using BM pool */
1228 u32 port_map;
Stefan Roese96c19042016-02-10 07:22:10 +01001229};
1230
Stefan Roese96c19042016-02-10 07:22:10 +01001231/* Static declaractions */
1232
1233/* Number of RXQs used by single port */
1234static int rxq_number = MVPP2_DEFAULT_RXQ;
1235/* Number of TXQs used by single port */
1236static int txq_number = MVPP2_DEFAULT_TXQ;
1237
Stefan Roese38801d42017-02-24 10:12:41 +01001238static int base_id;
1239
Stefan Roese96c19042016-02-10 07:22:10 +01001240#define MVPP2_DRIVER_NAME "mvpp2"
1241#define MVPP2_DRIVER_VERSION "1.0"
1242
1243/*
1244 * U-Boot internal data, mostly uncached buffers for descriptors and data
1245 */
1246struct buffer_location {
1247 struct mvpp2_tx_desc *aggr_tx_descs;
1248 struct mvpp2_tx_desc *tx_descs;
1249 struct mvpp2_rx_desc *rx_descs;
Stefan Roesefeb0b332017-02-15 12:46:18 +01001250 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1251 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese96c19042016-02-10 07:22:10 +01001252 int first_rxq;
1253};
1254
1255/*
1256 * All 4 interfaces use the same global buffer, since only one interface
1257 * can be enabled at once
1258 */
1259static struct buffer_location buffer_loc;
Sven Auhagena50bca12020-07-01 17:43:43 +02001260static int buffer_loc_init;
Stefan Roese96c19042016-02-10 07:22:10 +01001261
1262/*
1263 * Page table entries are set to 1MB, or multiples of 1MB
1264 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1265 */
1266#define BD_SPACE (1 << 20)
1267
1268/* Utility/helper methods */
1269
1270static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1271{
1272 writel(data, priv->base + offset);
1273}
1274
1275static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1276{
1277 return readl(priv->base + offset);
1278}
1279
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001280static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1281 struct mvpp2_tx_desc *tx_desc,
1282 dma_addr_t dma_addr)
1283{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001284 if (port->priv->hw_version == MVPP21) {
1285 tx_desc->pp21.buf_dma_addr = dma_addr;
1286 } else {
1287 u64 val = (u64)dma_addr;
1288
1289 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1290 tx_desc->pp22.buf_dma_addr_ptp |= val;
1291 }
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001292}
1293
1294static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1295 struct mvpp2_tx_desc *tx_desc,
1296 size_t size)
1297{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001298 if (port->priv->hw_version == MVPP21)
1299 tx_desc->pp21.data_size = size;
1300 else
1301 tx_desc->pp22.data_size = size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001302}
1303
1304static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1305 struct mvpp2_tx_desc *tx_desc,
1306 unsigned int txq)
1307{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001308 if (port->priv->hw_version == MVPP21)
1309 tx_desc->pp21.phys_txq = txq;
1310 else
1311 tx_desc->pp22.phys_txq = txq;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001312}
1313
1314static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1315 struct mvpp2_tx_desc *tx_desc,
1316 unsigned int command)
1317{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001318 if (port->priv->hw_version == MVPP21)
1319 tx_desc->pp21.command = command;
1320 else
1321 tx_desc->pp22.command = command;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001322}
1323
1324static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1325 struct mvpp2_tx_desc *tx_desc,
1326 unsigned int offset)
1327{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001328 if (port->priv->hw_version == MVPP21)
1329 tx_desc->pp21.packet_offset = offset;
1330 else
1331 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001332}
1333
1334static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1335 struct mvpp2_rx_desc *rx_desc)
1336{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001337 if (port->priv->hw_version == MVPP21)
1338 return rx_desc->pp21.buf_dma_addr;
1339 else
1340 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001341}
1342
1343static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1344 struct mvpp2_rx_desc *rx_desc)
1345{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001346 if (port->priv->hw_version == MVPP21)
1347 return rx_desc->pp21.buf_cookie;
1348 else
1349 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001350}
1351
1352static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1353 struct mvpp2_rx_desc *rx_desc)
1354{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001355 if (port->priv->hw_version == MVPP21)
1356 return rx_desc->pp21.data_size;
1357 else
1358 return rx_desc->pp22.data_size;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001359}
1360
1361static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1362 struct mvpp2_rx_desc *rx_desc)
1363{
Thomas Petazzoni56563ad2017-02-20 11:08:46 +01001364 if (port->priv->hw_version == MVPP21)
1365 return rx_desc->pp21.status;
1366 else
1367 return rx_desc->pp22.status;
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01001368}
1369
Stefan Roese96c19042016-02-10 07:22:10 +01001370static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1371{
1372 txq_pcpu->txq_get_index++;
1373 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1374 txq_pcpu->txq_get_index = 0;
1375}
1376
1377/* Get number of physical egress port */
1378static inline int mvpp2_egress_port(struct mvpp2_port *port)
1379{
1380 return MVPP2_MAX_TCONT + port->id;
1381}
1382
1383/* Get number of physical TXQ */
1384static inline int mvpp2_txq_phys(int port, int txq)
1385{
1386 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1387}
1388
1389/* Parser configuration routines */
1390
1391/* Update parser tcam and sram hw entries */
1392static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1393{
1394 int i;
1395
1396 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1397 return -EINVAL;
1398
1399 /* Clear entry invalidation bit */
1400 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1401
1402 /* Write tcam index - indirect access */
1403 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1404 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1405 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1406
1407 /* Write sram index - indirect access */
1408 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1409 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1410 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1411
1412 return 0;
1413}
1414
1415/* Read tcam entry from hw */
1416static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1417{
1418 int i;
1419
1420 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1421 return -EINVAL;
1422
1423 /* Write tcam index - indirect access */
1424 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1425
1426 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1427 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1428 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1429 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1430
1431 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1432 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1433
1434 /* Write sram index - indirect access */
1435 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1436 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1437 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1438
1439 return 0;
1440}
1441
1442/* Invalidate tcam hw entry */
1443static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1444{
1445 /* Write index - indirect access */
1446 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1447 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1448 MVPP2_PRS_TCAM_INV_MASK);
1449}
1450
1451/* Enable shadow table entry and set its lookup ID */
1452static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1453{
1454 priv->prs_shadow[index].valid = true;
1455 priv->prs_shadow[index].lu = lu;
1456}
1457
1458/* Update ri fields in shadow table entry */
1459static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1460 unsigned int ri, unsigned int ri_mask)
1461{
1462 priv->prs_shadow[index].ri_mask = ri_mask;
1463 priv->prs_shadow[index].ri = ri;
1464}
1465
1466/* Update lookup field in tcam sw entry */
1467static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1468{
1469 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1470
1471 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1472 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1473}
1474
1475/* Update mask for single port in tcam sw entry */
1476static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1477 unsigned int port, bool add)
1478{
1479 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1480
1481 if (add)
1482 pe->tcam.byte[enable_off] &= ~(1 << port);
1483 else
1484 pe->tcam.byte[enable_off] |= 1 << port;
1485}
1486
1487/* Update port map in tcam sw entry */
1488static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1489 unsigned int ports)
1490{
1491 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1492 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1493
1494 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1495 pe->tcam.byte[enable_off] &= ~port_mask;
1496 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1497}
1498
1499/* Obtain port map from tcam sw entry */
1500static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1501{
1502 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1503
1504 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1505}
1506
1507/* Set byte of data and its enable bits in tcam sw entry */
1508static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1509 unsigned int offs, unsigned char byte,
1510 unsigned char enable)
1511{
1512 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1513 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1514}
1515
1516/* Get byte of data and its enable bits from tcam sw entry */
1517static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1518 unsigned int offs, unsigned char *byte,
1519 unsigned char *enable)
1520{
1521 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1522 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1523}
1524
1525/* Set ethertype in tcam sw entry */
1526static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1527 unsigned short ethertype)
1528{
1529 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1530 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1531}
1532
1533/* Set bits in sram sw entry */
1534static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1535 int val)
1536{
1537 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1538}
1539
1540/* Clear bits in sram sw entry */
1541static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1542 int val)
1543{
1544 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1545}
1546
1547/* Update ri bits in sram sw entry */
1548static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1549 unsigned int bits, unsigned int mask)
1550{
1551 unsigned int i;
1552
1553 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1554 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1555
1556 if (!(mask & BIT(i)))
1557 continue;
1558
1559 if (bits & BIT(i))
1560 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1561 else
1562 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1563
1564 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1565 }
1566}
1567
1568/* Update ai bits in sram sw entry */
1569static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1570 unsigned int bits, unsigned int mask)
1571{
1572 unsigned int i;
1573 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1574
1575 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1576
1577 if (!(mask & BIT(i)))
1578 continue;
1579
1580 if (bits & BIT(i))
1581 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1582 else
1583 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1584
1585 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1586 }
1587}
1588
1589/* Read ai bits from sram sw entry */
1590static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1591{
1592 u8 bits;
1593 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1594 int ai_en_off = ai_off + 1;
1595 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1596
1597 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1598 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1599
1600 return bits;
1601}
1602
1603/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1604 * lookup interation
1605 */
1606static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1607 unsigned int lu)
1608{
1609 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1610
1611 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1612 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1613 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1614}
1615
1616/* In the sram sw entry set sign and value of the next lookup offset
1617 * and the offset value generated to the classifier
1618 */
1619static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1620 unsigned int op)
1621{
1622 /* Set sign */
1623 if (shift < 0) {
1624 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1625 shift = 0 - shift;
1626 } else {
1627 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1628 }
1629
1630 /* Set value */
1631 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1632 (unsigned char)shift;
1633
1634 /* Reset and set operation */
1635 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1636 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1637 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1638
1639 /* Set base offset as current */
1640 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1641}
1642
1643/* In the sram sw entry set sign and value of the user defined offset
1644 * generated to the classifier
1645 */
1646static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1647 unsigned int type, int offset,
1648 unsigned int op)
1649{
1650 /* Set sign */
1651 if (offset < 0) {
1652 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1653 offset = 0 - offset;
1654 } else {
1655 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1656 }
1657
1658 /* Set value */
1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1660 MVPP2_PRS_SRAM_UDF_MASK);
1661 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1662 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1663 MVPP2_PRS_SRAM_UDF_BITS)] &=
1664 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1665 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1666 MVPP2_PRS_SRAM_UDF_BITS)] |=
1667 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1668
1669 /* Set offset type */
1670 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1671 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1672 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1673
1674 /* Set offset operation */
1675 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1676 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1677 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1678
1679 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1680 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1681 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1682 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1683
1684 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1685 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1686 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1687
1688 /* Set base offset as current */
1689 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1690}
1691
1692/* Find parser flow entry */
1693static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1694{
1695 struct mvpp2_prs_entry *pe;
1696 int tid;
1697
1698 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1699 if (!pe)
1700 return NULL;
1701 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1702
1703 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1704 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1705 u8 bits;
1706
1707 if (!priv->prs_shadow[tid].valid ||
1708 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1709 continue;
1710
1711 pe->index = tid;
1712 mvpp2_prs_hw_read(priv, pe);
1713 bits = mvpp2_prs_sram_ai_get(pe);
1714
1715 /* Sram store classification lookup ID in AI bits [5:0] */
1716 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1717 return pe;
1718 }
1719 kfree(pe);
1720
1721 return NULL;
1722}
1723
1724/* Return first free tcam index, seeking from start to end */
1725static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1726 unsigned char end)
1727{
1728 int tid;
1729
1730 if (start > end)
1731 swap(start, end);
1732
1733 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1734 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1735
1736 for (tid = start; tid <= end; tid++) {
1737 if (!priv->prs_shadow[tid].valid)
1738 return tid;
1739 }
1740
1741 return -EINVAL;
1742}
1743
1744/* Enable/disable dropping all mac da's */
1745static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1746{
1747 struct mvpp2_prs_entry pe;
1748
1749 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1750 /* Entry exist - update port only */
1751 pe.index = MVPP2_PE_DROP_ALL;
1752 mvpp2_prs_hw_read(priv, &pe);
1753 } else {
1754 /* Entry doesn't exist - create new */
1755 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1756 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1757 pe.index = MVPP2_PE_DROP_ALL;
1758
1759 /* Non-promiscuous mode for all ports - DROP unknown packets */
1760 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1761 MVPP2_PRS_RI_DROP_MASK);
1762
1763 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1764 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1765
1766 /* Update shadow table */
1767 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1768
1769 /* Mask all ports */
1770 mvpp2_prs_tcam_port_map_set(&pe, 0);
1771 }
1772
1773 /* Update port mask */
1774 mvpp2_prs_tcam_port_set(&pe, port, add);
1775
1776 mvpp2_prs_hw_write(priv, &pe);
1777}
1778
1779/* Set port to promiscuous mode */
1780static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1781{
1782 struct mvpp2_prs_entry pe;
1783
1784 /* Promiscuous mode - Accept unknown packets */
1785
1786 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1787 /* Entry exist - update port only */
1788 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1789 mvpp2_prs_hw_read(priv, &pe);
1790 } else {
1791 /* Entry doesn't exist - create new */
1792 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1793 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1794 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1795
1796 /* Continue - set next lookup */
1797 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1798
1799 /* Set result info bits */
1800 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1801 MVPP2_PRS_RI_L2_CAST_MASK);
1802
1803 /* Shift to ethertype */
1804 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1805 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1806
1807 /* Mask all ports */
1808 mvpp2_prs_tcam_port_map_set(&pe, 0);
1809
1810 /* Update shadow table */
1811 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1812 }
1813
1814 /* Update port mask */
1815 mvpp2_prs_tcam_port_set(&pe, port, add);
1816
1817 mvpp2_prs_hw_write(priv, &pe);
1818}
1819
1820/* Accept multicast */
1821static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1822 bool add)
1823{
1824 struct mvpp2_prs_entry pe;
1825 unsigned char da_mc;
1826
1827 /* Ethernet multicast address first byte is
1828 * 0x01 for IPv4 and 0x33 for IPv6
1829 */
1830 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1831
1832 if (priv->prs_shadow[index].valid) {
1833 /* Entry exist - update port only */
1834 pe.index = index;
1835 mvpp2_prs_hw_read(priv, &pe);
1836 } else {
1837 /* Entry doesn't exist - create new */
1838 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1839 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1840 pe.index = index;
1841
1842 /* Continue - set next lookup */
1843 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1844
1845 /* Set result info bits */
1846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1847 MVPP2_PRS_RI_L2_CAST_MASK);
1848
1849 /* Update tcam entry data first byte */
1850 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1851
1852 /* Shift to ethertype */
1853 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1854 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1855
1856 /* Mask all ports */
1857 mvpp2_prs_tcam_port_map_set(&pe, 0);
1858
1859 /* Update shadow table */
1860 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1861 }
1862
1863 /* Update port mask */
1864 mvpp2_prs_tcam_port_set(&pe, port, add);
1865
1866 mvpp2_prs_hw_write(priv, &pe);
1867}
1868
1869/* Parser per-port initialization */
1870static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1871 int lu_max, int offset)
1872{
1873 u32 val;
1874
1875 /* Set lookup ID */
1876 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1877 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1878 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1879 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1880
1881 /* Set maximum number of loops for packet received from port */
1882 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1883 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1884 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1885 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1886
1887 /* Set initial offset for packet header extraction for the first
1888 * searching loop
1889 */
1890 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1891 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1892 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1893 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1894}
1895
1896/* Default flow entries initialization for all ports */
1897static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1898{
1899 struct mvpp2_prs_entry pe;
1900 int port;
1901
1902 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1903 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1904 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1905 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1906
1907 /* Mask all ports */
1908 mvpp2_prs_tcam_port_map_set(&pe, 0);
1909
1910 /* Set flow ID*/
1911 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1912 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1913
1914 /* Update shadow table and hw entry */
1915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1916 mvpp2_prs_hw_write(priv, &pe);
1917 }
1918}
1919
1920/* Set default entry for Marvell Header field */
1921static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1922{
1923 struct mvpp2_prs_entry pe;
1924
1925 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1926
1927 pe.index = MVPP2_PE_MH_DEFAULT;
1928 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1929 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1930 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1931 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1932
1933 /* Unmask all ports */
1934 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1935
1936 /* Update shadow table and hw entry */
1937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1938 mvpp2_prs_hw_write(priv, &pe);
1939}
1940
1941/* Set default entires (place holder) for promiscuous, non-promiscuous and
1942 * multicast MAC addresses
1943 */
1944static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1945{
1946 struct mvpp2_prs_entry pe;
1947
1948 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1949
1950 /* Non-promiscuous mode for all ports - DROP unknown packets */
1951 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1952 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1953
1954 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1955 MVPP2_PRS_RI_DROP_MASK);
1956 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1957 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1958
1959 /* Unmask all ports */
1960 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1961
1962 /* Update shadow table and hw entry */
1963 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1964 mvpp2_prs_hw_write(priv, &pe);
1965
1966 /* place holders only - no ports */
1967 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1968 mvpp2_prs_mac_promisc_set(priv, 0, false);
1969 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1970 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1971}
1972
1973/* Match basic ethertypes */
1974static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1975{
1976 struct mvpp2_prs_entry pe;
1977 int tid;
1978
1979 /* Ethertype: PPPoE */
1980 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1981 MVPP2_PE_LAST_FREE_TID);
1982 if (tid < 0)
1983 return tid;
1984
1985 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1986 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1987 pe.index = tid;
1988
1989 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1990
1991 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1992 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1993 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1994 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1995 MVPP2_PRS_RI_PPPOE_MASK);
1996
1997 /* Update shadow table and hw entry */
1998 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1999 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2000 priv->prs_shadow[pe.index].finish = false;
2001 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2002 MVPP2_PRS_RI_PPPOE_MASK);
2003 mvpp2_prs_hw_write(priv, &pe);
2004
2005 /* Ethertype: ARP */
2006 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2007 MVPP2_PE_LAST_FREE_TID);
2008 if (tid < 0)
2009 return tid;
2010
2011 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2012 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2013 pe.index = tid;
2014
2015 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2016
2017 /* Generate flow in the next iteration*/
2018 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2019 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2020 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2021 MVPP2_PRS_RI_L3_PROTO_MASK);
2022 /* Set L3 offset */
2023 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2024 MVPP2_ETH_TYPE_LEN,
2025 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2026
2027 /* Update shadow table and hw entry */
2028 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2029 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2030 priv->prs_shadow[pe.index].finish = true;
2031 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2032 MVPP2_PRS_RI_L3_PROTO_MASK);
2033 mvpp2_prs_hw_write(priv, &pe);
2034
2035 /* Ethertype: LBTD */
2036 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2037 MVPP2_PE_LAST_FREE_TID);
2038 if (tid < 0)
2039 return tid;
2040
2041 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2042 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2043 pe.index = tid;
2044
2045 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2046
2047 /* Generate flow in the next iteration*/
2048 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2049 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2050 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2051 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2052 MVPP2_PRS_RI_CPU_CODE_MASK |
2053 MVPP2_PRS_RI_UDF3_MASK);
2054 /* Set L3 offset */
2055 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2056 MVPP2_ETH_TYPE_LEN,
2057 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2058
2059 /* Update shadow table and hw entry */
2060 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2061 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2062 priv->prs_shadow[pe.index].finish = true;
2063 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2064 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2065 MVPP2_PRS_RI_CPU_CODE_MASK |
2066 MVPP2_PRS_RI_UDF3_MASK);
2067 mvpp2_prs_hw_write(priv, &pe);
2068
2069 /* Ethertype: IPv4 without options */
2070 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2071 MVPP2_PE_LAST_FREE_TID);
2072 if (tid < 0)
2073 return tid;
2074
2075 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2077 pe.index = tid;
2078
2079 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2080 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2081 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2082 MVPP2_PRS_IPV4_HEAD_MASK |
2083 MVPP2_PRS_IPV4_IHL_MASK);
2084
2085 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2086 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2087 MVPP2_PRS_RI_L3_PROTO_MASK);
2088 /* Skip eth_type + 4 bytes of IP header */
2089 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2090 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2091 /* Set L3 offset */
2092 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2093 MVPP2_ETH_TYPE_LEN,
2094 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2095
2096 /* Update shadow table and hw entry */
2097 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2098 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2099 priv->prs_shadow[pe.index].finish = false;
2100 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2101 MVPP2_PRS_RI_L3_PROTO_MASK);
2102 mvpp2_prs_hw_write(priv, &pe);
2103
2104 /* Ethertype: IPv4 with options */
2105 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2106 MVPP2_PE_LAST_FREE_TID);
2107 if (tid < 0)
2108 return tid;
2109
2110 pe.index = tid;
2111
2112 /* Clear tcam data before updating */
2113 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2114 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2115
2116 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2117 MVPP2_PRS_IPV4_HEAD,
2118 MVPP2_PRS_IPV4_HEAD_MASK);
2119
2120 /* Clear ri before updating */
2121 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2122 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2123 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2124 MVPP2_PRS_RI_L3_PROTO_MASK);
2125
2126 /* Update shadow table and hw entry */
2127 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2128 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2129 priv->prs_shadow[pe.index].finish = false;
2130 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2131 MVPP2_PRS_RI_L3_PROTO_MASK);
2132 mvpp2_prs_hw_write(priv, &pe);
2133
2134 /* Ethertype: IPv6 without options */
2135 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2136 MVPP2_PE_LAST_FREE_TID);
2137 if (tid < 0)
2138 return tid;
2139
2140 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2141 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2142 pe.index = tid;
2143
2144 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2145
2146 /* Skip DIP of IPV6 header */
2147 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2148 MVPP2_MAX_L3_ADDR_SIZE,
2149 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2150 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2151 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2152 MVPP2_PRS_RI_L3_PROTO_MASK);
2153 /* Set L3 offset */
2154 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2155 MVPP2_ETH_TYPE_LEN,
2156 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2157
2158 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2159 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2160 priv->prs_shadow[pe.index].finish = false;
2161 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2162 MVPP2_PRS_RI_L3_PROTO_MASK);
2163 mvpp2_prs_hw_write(priv, &pe);
2164
2165 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2166 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2167 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2168 pe.index = MVPP2_PE_ETH_TYPE_UN;
2169
2170 /* Unmask all ports */
2171 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2172
2173 /* Generate flow in the next iteration*/
2174 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2176 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2177 MVPP2_PRS_RI_L3_PROTO_MASK);
2178 /* Set L3 offset even it's unknown L3 */
2179 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2180 MVPP2_ETH_TYPE_LEN,
2181 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2182
2183 /* Update shadow table and hw entry */
2184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2185 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2186 priv->prs_shadow[pe.index].finish = true;
2187 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2188 MVPP2_PRS_RI_L3_PROTO_MASK);
2189 mvpp2_prs_hw_write(priv, &pe);
2190
2191 return 0;
2192}
2193
2194/* Parser default initialization */
2195static int mvpp2_prs_default_init(struct udevice *dev,
2196 struct mvpp2 *priv)
2197{
2198 int err, index, i;
2199
2200 /* Enable tcam table */
2201 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2202
2203 /* Clear all tcam and sram entries */
2204 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2205 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2206 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2207 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2208
2209 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2210 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2211 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2212 }
2213
2214 /* Invalidate all tcam entries */
2215 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2216 mvpp2_prs_hw_inv(priv, index);
2217
2218 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2219 sizeof(struct mvpp2_prs_shadow),
2220 GFP_KERNEL);
2221 if (!priv->prs_shadow)
2222 return -ENOMEM;
2223
2224 /* Always start from lookup = 0 */
2225 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2226 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2227 MVPP2_PRS_PORT_LU_MAX, 0);
2228
2229 mvpp2_prs_def_flow_init(priv);
2230
2231 mvpp2_prs_mh_init(priv);
2232
2233 mvpp2_prs_mac_init(priv);
2234
2235 err = mvpp2_prs_etype_init(priv);
2236 if (err)
2237 return err;
2238
2239 return 0;
2240}
2241
2242/* Compare MAC DA with tcam entry data */
2243static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2244 const u8 *da, unsigned char *mask)
2245{
2246 unsigned char tcam_byte, tcam_mask;
2247 int index;
2248
2249 for (index = 0; index < ETH_ALEN; index++) {
2250 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2251 if (tcam_mask != mask[index])
2252 return false;
2253
2254 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2255 return false;
2256 }
2257
2258 return true;
2259}
2260
2261/* Find tcam entry with matched pair <MAC DA, port> */
2262static struct mvpp2_prs_entry *
2263mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2264 unsigned char *mask, int udf_type)
2265{
2266 struct mvpp2_prs_entry *pe;
2267 int tid;
2268
2269 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2270 if (!pe)
2271 return NULL;
2272 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2273
2274 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2275 for (tid = MVPP2_PE_FIRST_FREE_TID;
2276 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2277 unsigned int entry_pmap;
2278
2279 if (!priv->prs_shadow[tid].valid ||
2280 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2281 (priv->prs_shadow[tid].udf != udf_type))
2282 continue;
2283
2284 pe->index = tid;
2285 mvpp2_prs_hw_read(priv, pe);
2286 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2287
2288 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2289 entry_pmap == pmap)
2290 return pe;
2291 }
2292 kfree(pe);
2293
2294 return NULL;
2295}
2296
2297/* Update parser's mac da entry */
2298static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2299 const u8 *da, bool add)
2300{
2301 struct mvpp2_prs_entry *pe;
2302 unsigned int pmap, len, ri;
2303 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2304 int tid;
2305
2306 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2307 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2308 MVPP2_PRS_UDF_MAC_DEF);
2309
2310 /* No such entry */
2311 if (!pe) {
2312 if (!add)
2313 return 0;
2314
2315 /* Create new TCAM entry */
2316 /* Find first range mac entry*/
2317 for (tid = MVPP2_PE_FIRST_FREE_TID;
2318 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2319 if (priv->prs_shadow[tid].valid &&
2320 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2321 (priv->prs_shadow[tid].udf ==
2322 MVPP2_PRS_UDF_MAC_RANGE))
2323 break;
2324
2325 /* Go through the all entries from first to last */
2326 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2327 tid - 1);
2328 if (tid < 0)
2329 return tid;
2330
2331 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2332 if (!pe)
2333 return -1;
2334 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2335 pe->index = tid;
2336
2337 /* Mask all ports */
2338 mvpp2_prs_tcam_port_map_set(pe, 0);
2339 }
2340
2341 /* Update port mask */
2342 mvpp2_prs_tcam_port_set(pe, port, add);
2343
2344 /* Invalidate the entry if no ports are left enabled */
2345 pmap = mvpp2_prs_tcam_port_map_get(pe);
2346 if (pmap == 0) {
2347 if (add) {
2348 kfree(pe);
2349 return -1;
2350 }
2351 mvpp2_prs_hw_inv(priv, pe->index);
2352 priv->prs_shadow[pe->index].valid = false;
2353 kfree(pe);
2354 return 0;
2355 }
2356
2357 /* Continue - set next lookup */
2358 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2359
2360 /* Set match on DA */
2361 len = ETH_ALEN;
2362 while (len--)
2363 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2364
2365 /* Set result info bits */
2366 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2367
2368 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2369 MVPP2_PRS_RI_MAC_ME_MASK);
2370 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2371 MVPP2_PRS_RI_MAC_ME_MASK);
2372
2373 /* Shift to ethertype */
2374 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2375 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2376
2377 /* Update shadow table and hw entry */
2378 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2379 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2380 mvpp2_prs_hw_write(priv, pe);
2381
2382 kfree(pe);
2383
2384 return 0;
2385}
2386
2387static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2388{
2389 int err;
2390
2391 /* Remove old parser entry */
2392 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2393 false);
2394 if (err)
2395 return err;
2396
2397 /* Add new parser entry */
2398 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2399 if (err)
2400 return err;
2401
2402 /* Set addr in the device */
2403 memcpy(port->dev_addr, da, ETH_ALEN);
2404
2405 return 0;
2406}
2407
2408/* Set prs flow for the port */
2409static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2410{
2411 struct mvpp2_prs_entry *pe;
2412 int tid;
2413
2414 pe = mvpp2_prs_flow_find(port->priv, port->id);
2415
2416 /* Such entry not exist */
2417 if (!pe) {
2418 /* Go through the all entires from last to first */
2419 tid = mvpp2_prs_tcam_first_free(port->priv,
2420 MVPP2_PE_LAST_FREE_TID,
2421 MVPP2_PE_FIRST_FREE_TID);
2422 if (tid < 0)
2423 return tid;
2424
2425 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2426 if (!pe)
2427 return -ENOMEM;
2428
2429 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2430 pe->index = tid;
2431
2432 /* Set flow ID*/
2433 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2434 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2435
2436 /* Update shadow table */
2437 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2438 }
2439
2440 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2441 mvpp2_prs_hw_write(port->priv, pe);
2442 kfree(pe);
2443
2444 return 0;
2445}
2446
2447/* Classifier configuration routines */
2448
2449/* Update classification flow table registers */
2450static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2451 struct mvpp2_cls_flow_entry *fe)
2452{
2453 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2454 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2455 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2456 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2457}
2458
2459/* Update classification lookup table register */
2460static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2461 struct mvpp2_cls_lookup_entry *le)
2462{
2463 u32 val;
2464
2465 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2466 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2467 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2468}
2469
2470/* Classifier default initialization */
2471static void mvpp2_cls_init(struct mvpp2 *priv)
2472{
2473 struct mvpp2_cls_lookup_entry le;
2474 struct mvpp2_cls_flow_entry fe;
2475 int index;
2476
2477 /* Enable classifier */
2478 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2479
2480 /* Clear classifier flow table */
2481 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2482 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2483 fe.index = index;
2484 mvpp2_cls_flow_write(priv, &fe);
2485 }
2486
2487 /* Clear classifier lookup table */
2488 le.data = 0;
2489 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2490 le.lkpid = index;
2491 le.way = 0;
2492 mvpp2_cls_lookup_write(priv, &le);
2493
2494 le.way = 1;
2495 mvpp2_cls_lookup_write(priv, &le);
2496 }
2497}
2498
2499static void mvpp2_cls_port_config(struct mvpp2_port *port)
2500{
2501 struct mvpp2_cls_lookup_entry le;
2502 u32 val;
2503
2504 /* Set way for the port */
2505 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2506 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2507 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2508
2509 /* Pick the entry to be accessed in lookup ID decoding table
2510 * according to the way and lkpid.
2511 */
2512 le.lkpid = port->id;
2513 le.way = 0;
2514 le.data = 0;
2515
2516 /* Set initial CPU queue for receiving packets */
2517 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2518 le.data |= port->first_rxq;
2519
2520 /* Disable classification engines */
2521 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2522
2523 /* Update lookup ID table entry */
2524 mvpp2_cls_lookup_write(port->priv, &le);
2525}
2526
2527/* Set CPU queue number for oversize packets */
2528static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2529{
2530 u32 val;
2531
2532 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2533 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2534
2535 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2536 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2537
2538 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2539 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2540 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2541}
2542
2543/* Buffer Manager configuration routines */
2544
2545/* Create pool */
2546static int mvpp2_bm_pool_create(struct udevice *dev,
2547 struct mvpp2 *priv,
2548 struct mvpp2_bm_pool *bm_pool, int size)
2549{
2550 u32 val;
2551
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002552 /* Number of buffer pointers must be a multiple of 16, as per
2553 * hardware constraints
2554 */
2555 if (!IS_ALIGNED(size, 16))
2556 return -EINVAL;
2557
Stefan Roese96c19042016-02-10 07:22:10 +01002558 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002559 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese96c19042016-02-10 07:22:10 +01002560 if (!bm_pool->virt_addr)
2561 return -ENOMEM;
2562
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002563 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2564 MVPP2_BM_POOL_PTR_ALIGN)) {
Sean Anderson77a88792020-09-15 10:44:56 -04002565 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
Stefan Roese96c19042016-02-10 07:22:10 +01002566 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2567 return -ENOMEM;
2568 }
2569
2570 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002571 lower_32_bits(bm_pool->dma_addr));
Stefan Chulski115f76f2017-08-09 10:37:50 +03002572 if (priv->hw_version == MVPP22)
2573 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2574 (upper_32_bits(bm_pool->dma_addr) &
2575 MVPP22_BM_POOL_BASE_HIGH_MASK));
Stefan Roese96c19042016-02-10 07:22:10 +01002576 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2577
2578 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2579 val |= MVPP2_BM_START_MASK;
2580 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2581
2582 bm_pool->type = MVPP2_BM_FREE;
2583 bm_pool->size = size;
2584 bm_pool->pkt_size = 0;
2585 bm_pool->buf_num = 0;
2586
2587 return 0;
2588}
2589
2590/* Set pool buffer size */
2591static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2592 struct mvpp2_bm_pool *bm_pool,
2593 int buf_size)
2594{
2595 u32 val;
2596
2597 bm_pool->buf_size = buf_size;
2598
2599 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2600 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2601}
2602
2603/* Free all buffers from the pool */
2604static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2605 struct mvpp2_bm_pool *bm_pool)
2606{
Stefan Roese380b3232017-03-23 17:01:59 +01002607 int i;
2608
2609 for (i = 0; i < bm_pool->buf_num; i++) {
2610 /* Allocate buffer back from the buffer manager */
2611 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2612 }
2613
Stefan Roese96c19042016-02-10 07:22:10 +01002614 bm_pool->buf_num = 0;
2615}
2616
2617/* Cleanup pool */
2618static int mvpp2_bm_pool_destroy(struct udevice *dev,
2619 struct mvpp2 *priv,
2620 struct mvpp2_bm_pool *bm_pool)
2621{
2622 u32 val;
2623
2624 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2625 if (bm_pool->buf_num) {
2626 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2627 return 0;
2628 }
2629
2630 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2631 val |= MVPP2_BM_STOP_MASK;
2632 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2633
2634 return 0;
2635}
2636
2637static int mvpp2_bm_pools_init(struct udevice *dev,
2638 struct mvpp2 *priv)
2639{
2640 int i, err, size;
2641 struct mvpp2_bm_pool *bm_pool;
2642
2643 /* Create all pools with maximum size */
2644 size = MVPP2_BM_POOL_SIZE_MAX;
2645 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2646 bm_pool = &priv->bm_pools[i];
2647 bm_pool->id = i;
2648 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2649 if (err)
2650 goto err_unroll_pools;
Stefan Chulski36eb98a2017-08-09 10:37:52 +03002651 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
Stefan Roese96c19042016-02-10 07:22:10 +01002652 }
2653 return 0;
2654
2655err_unroll_pools:
Sean Anderson77a88792020-09-15 10:44:56 -04002656 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
Stefan Roese96c19042016-02-10 07:22:10 +01002657 for (i = i - 1; i >= 0; i--)
2658 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2659 return err;
2660}
2661
2662static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2663{
2664 int i, err;
2665
2666 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2667 /* Mask BM all interrupts */
2668 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2669 /* Clear BM cause register */
2670 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2671 }
2672
2673 /* Allocate and initialize BM pools */
2674 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2675 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2676 if (!priv->bm_pools)
2677 return -ENOMEM;
2678
2679 err = mvpp2_bm_pools_init(dev, priv);
2680 if (err < 0)
2681 return err;
2682 return 0;
2683}
2684
2685/* Attach long pool to rxq */
2686static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2687 int lrxq, int long_pool)
2688{
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002689 u32 val, mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002690 int prxq;
2691
2692 /* Get queue physical ID */
2693 prxq = port->rxqs[lrxq]->id;
2694
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002695 if (port->priv->hw_version == MVPP21)
2696 mask = MVPP21_RXQ_POOL_LONG_MASK;
2697 else
2698 mask = MVPP22_RXQ_POOL_LONG_MASK;
Stefan Roese96c19042016-02-10 07:22:10 +01002699
Thomas Petazzoni2321c922017-02-16 06:53:51 +01002700 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2701 val &= ~mask;
2702 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Stefan Roese96c19042016-02-10 07:22:10 +01002703 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2704}
2705
2706/* Set pool number in a BM cookie */
2707static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2708{
2709 u32 bm;
2710
2711 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2712 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2713
2714 return bm;
2715}
2716
2717/* Get pool number from a BM cookie */
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002718static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese96c19042016-02-10 07:22:10 +01002719{
2720 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2721}
2722
2723/* Release buffer to BM */
2724static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002725 dma_addr_t buf_dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002726 unsigned long buf_phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002727{
Thomas Petazzoni3520a332017-02-20 11:29:16 +01002728 if (port->priv->hw_version == MVPP22) {
2729 u32 val = 0;
2730
2731 if (sizeof(dma_addr_t) == 8)
2732 val |= upper_32_bits(buf_dma_addr) &
2733 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2734
2735 if (sizeof(phys_addr_t) == 8)
2736 val |= (upper_32_bits(buf_phys_addr)
2737 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2738 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2739
2740 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2741 }
2742
Thomas Petazzoni09831762017-02-20 10:37:59 +01002743 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2744 * returned in the "cookie" field of the RX
2745 * descriptor. Instead of storing the virtual address, we
2746 * store the physical address
2747 */
2748 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002749 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002750}
2751
2752/* Refill BM pool */
2753static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01002754 dma_addr_t dma_addr,
Thomas Petazzoni09831762017-02-20 10:37:59 +01002755 phys_addr_t phys_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01002756{
2757 int pool = mvpp2_bm_cookie_pool_get(bm);
2758
Thomas Petazzoni09831762017-02-20 10:37:59 +01002759 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01002760}
2761
2762/* Allocate buffers for the pool */
2763static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2764 struct mvpp2_bm_pool *bm_pool, int buf_num)
2765{
2766 int i;
Stefan Roese96c19042016-02-10 07:22:10 +01002767
2768 if (buf_num < 0 ||
2769 (buf_num + bm_pool->buf_num > bm_pool->size)) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04002770 dev_err(port->phy_dev->dev,
2771 "cannot allocate %d buffers for pool %d\n", buf_num,
2772 bm_pool->id);
Stefan Roese96c19042016-02-10 07:22:10 +01002773 return 0;
2774 }
2775
Stefan Roese96c19042016-02-10 07:22:10 +01002776 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002777 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonia3c988f2017-02-15 12:31:53 +01002778 (dma_addr_t)buffer_loc.rx_buffer[i],
2779 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzoni1b085c52017-02-15 12:13:43 +01002780
Stefan Roese96c19042016-02-10 07:22:10 +01002781 }
2782
2783 /* Update BM driver with number of buffers added to pool */
2784 bm_pool->buf_num += i;
Stefan Roese96c19042016-02-10 07:22:10 +01002785
2786 return i;
2787}
2788
2789/* Notify the driver that BM pool is being used as specific type and return the
2790 * pool pointer on success
2791 */
2792static struct mvpp2_bm_pool *
2793mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2794 int pkt_size)
2795{
2796 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2797 int num;
2798
2799 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04002800 dev_err(port->phy_dev->dev, "mixing pool types is forbidden\n");
Stefan Roese96c19042016-02-10 07:22:10 +01002801 return NULL;
2802 }
2803
2804 if (new_pool->type == MVPP2_BM_FREE)
2805 new_pool->type = type;
2806
2807 /* Allocate buffers in case BM pool is used as long pool, but packet
2808 * size doesn't match MTU or BM pool hasn't being used yet
2809 */
2810 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2811 (new_pool->pkt_size == 0)) {
2812 int pkts_num;
2813
2814 /* Set default buffer number or free all the buffers in case
2815 * the pool is not empty
2816 */
2817 pkts_num = new_pool->buf_num;
2818 if (pkts_num == 0)
2819 pkts_num = type == MVPP2_BM_SWF_LONG ?
2820 MVPP2_BM_LONG_BUF_NUM :
2821 MVPP2_BM_SHORT_BUF_NUM;
2822 else
2823 mvpp2_bm_bufs_free(NULL,
2824 port->priv, new_pool);
2825
2826 new_pool->pkt_size = pkt_size;
2827
2828 /* Allocate buffers for this pool */
2829 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2830 if (num != pkts_num) {
Sean Anderson77a88792020-09-15 10:44:56 -04002831 dev_err(port->phy_dev->dev,
2832 "pool %d: %d of %d allocated\n", new_pool->id,
2833 num, pkts_num);
Stefan Roese96c19042016-02-10 07:22:10 +01002834 return NULL;
2835 }
2836 }
2837
Stefan Roese96c19042016-02-10 07:22:10 +01002838 return new_pool;
2839}
2840
2841/* Initialize pools for swf */
2842static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2843{
2844 int rxq;
2845
2846 if (!port->pool_long) {
2847 port->pool_long =
2848 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2849 MVPP2_BM_SWF_LONG,
2850 port->pkt_size);
2851 if (!port->pool_long)
2852 return -ENOMEM;
2853
2854 port->pool_long->port_map |= (1 << port->id);
2855
2856 for (rxq = 0; rxq < rxq_number; rxq++)
2857 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2858 }
2859
2860 return 0;
2861}
2862
2863/* Port configuration routines */
2864
2865static void mvpp2_port_mii_set(struct mvpp2_port *port)
2866{
2867 u32 val;
2868
2869 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2870
2871 switch (port->phy_interface) {
2872 case PHY_INTERFACE_MODE_SGMII:
2873 val |= MVPP2_GMAC_INBAND_AN_MASK;
2874 break;
Stefan Chulski237105f2021-05-03 08:08:46 +02002875 case PHY_INTERFACE_MODE_1000BASEX:
2876 case PHY_INTERFACE_MODE_2500BASEX:
2877 val &= ~MVPP2_GMAC_INBAND_AN_MASK;
2878 break;
Stefan Roese96c19042016-02-10 07:22:10 +01002879 case PHY_INTERFACE_MODE_RGMII:
Stefan Roesec20e4e42017-03-22 15:11:00 +01002880 case PHY_INTERFACE_MODE_RGMII_ID:
Stefan Roese96c19042016-02-10 07:22:10 +01002881 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2882 default:
2883 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2884 }
2885
2886 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2887}
2888
2889static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2890{
2891 u32 val;
2892
2893 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2894 val |= MVPP2_GMAC_FC_ADV_EN;
2895 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2896}
2897
2898static void mvpp2_port_enable(struct mvpp2_port *port)
2899{
2900 u32 val;
2901
2902 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2903 val |= MVPP2_GMAC_PORT_EN_MASK;
2904 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2905 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2906}
2907
2908static void mvpp2_port_disable(struct mvpp2_port *port)
2909{
2910 u32 val;
2911
2912 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2913 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2914 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2915}
2916
2917/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2918static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2919{
2920 u32 val;
2921
2922 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2923 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2924 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2925}
2926
2927/* Configure loopback port */
2928static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2929{
2930 u32 val;
2931
2932 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2933
2934 if (port->speed == 1000)
2935 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2936 else
2937 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2938
Stefan Chulski237105f2021-05-03 08:08:46 +02002939 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
2940 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
2941 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
Stefan Roese96c19042016-02-10 07:22:10 +01002942 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2943 else
2944 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2945
2946 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2947}
2948
2949static void mvpp2_port_reset(struct mvpp2_port *port)
2950{
2951 u32 val;
2952
2953 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2954 ~MVPP2_GMAC_PORT_RESET_MASK;
2955 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2956
2957 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2958 MVPP2_GMAC_PORT_RESET_MASK)
2959 continue;
2960}
2961
2962/* Change maximum receive size of the port */
2963static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2964{
2965 u32 val;
2966
2967 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2968 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2969 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2970 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2971 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2972}
2973
Stefan Roese40e749b2017-03-22 15:07:30 +01002974/* PPv2.2 GoP/GMAC config */
2975
2976/* Set the MAC to reset or exit from reset */
2977static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2978{
2979 u32 val;
2980
2981 /* read - modify - write */
2982 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2983 if (reset)
2984 val |= MVPP2_GMAC_PORT_RESET_MASK;
2985 else
2986 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2987 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2988
2989 return 0;
2990}
2991
2992/*
2993 * gop_gpcs_mode_cfg
2994 *
2995 * Configure port to working with Gig PCS or don't.
2996 */
2997static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
2998{
2999 u32 val;
3000
3001 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3002 if (en)
3003 val |= MVPP2_GMAC_PCS_ENABLE_MASK;
3004 else
3005 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3006 /* enable / disable PCS on this port */
3007 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3008
3009 return 0;
3010}
3011
3012static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3013{
3014 u32 val;
3015
3016 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3017 if (en)
3018 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3019 else
3020 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3021 /* enable / disable PCS on this port */
3022 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3023
3024 return 0;
3025}
3026
Stefan Roese40e749b2017-03-22 15:07:30 +01003027static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3028{
3029 u32 val, thresh;
3030
3031 /*
3032 * Configure minimal level of the Tx FIFO before the lower part
3033 * starts to read a packet
3034 */
3035 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3036 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3037 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3038 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3039 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3040
3041 /* Disable bypass of sync module */
3042 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3043 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3044 /* configure DP clock select according to mode */
3045 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3046 /* configure QSGMII bypass according to mode */
3047 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3048 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3049
Stefan Roese40e749b2017-03-22 15:07:30 +01003050 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3051 /* configure GIG MAC to SGMII mode */
3052 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3053 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3054
3055 /* configure AN */
3056 val = MVPP2_GMAC_EN_PCS_AN |
3057 MVPP2_GMAC_AN_BYPASS_EN |
3058 MVPP2_GMAC_AN_SPEED_EN |
3059 MVPP2_GMAC_EN_FC_AN |
3060 MVPP2_GMAC_AN_DUPLEX_EN |
3061 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3062 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3063}
3064
Stefan Chulski237105f2021-05-03 08:08:46 +02003065static void gop_gmac_2500basex_cfg(struct mvpp2_port *port)
3066{
3067 u32 val, thresh;
3068
3069 /*
3070 * Configure minimal level of the Tx FIFO before the lower part
3071 * starts to read a packet
3072 */
3073 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3074 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3075 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3076 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3077 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3078
3079 /* Disable bypass of sync module */
3080 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3081 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3082 /* configure DP clock select according to mode */
3083 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3084 /* configure QSGMII bypass according to mode */
3085 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3086 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3087
3088 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3089 /*
3090 * Configure GIG MAC to 2500Base-X mode connected to a fiber
3091 * transceiver
3092 */
3093 val |= MVPP2_GMAC_PORT_TYPE_MASK;
3094 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3095
3096 /* In 2500BaseX mode, we can't negotiate speed
3097 * and we do not want InBand autoneg
3098 * bypass enabled (link interrupt storm risk
3099 * otherwise).
3100 */
Ben Peled03da3622021-05-03 08:08:49 +02003101 val = MVPP2_GMAC_AN_BYPASS_EN |
3102 MVPP2_GMAC_EN_PCS_AN |
Stefan Chulski237105f2021-05-03 08:08:46 +02003103 MVPP2_GMAC_CONFIG_GMII_SPEED |
3104 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3105 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3106 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3107}
3108
3109static void gop_gmac_1000basex_cfg(struct mvpp2_port *port)
3110{
3111 u32 val, thresh;
3112
3113 /*
3114 * Configure minimal level of the Tx FIFO before the lower part
3115 * starts to read a packet
3116 */
3117 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3118 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3119 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3120 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3121 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3122
3123 /* Disable bypass of sync module */
3124 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3125 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3126 /* configure DP clock select according to mode */
3127 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3128 /* configure QSGMII bypass according to mode */
3129 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3130 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3131
3132 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3133 /* configure GIG MAC to 1000BASEX mode */
3134 val |= MVPP2_GMAC_PORT_TYPE_MASK;
3135 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3136
3137 /* In 1000BaseX mode, we can't negotiate speed (it's
3138 * only 1000), and we do not want InBand autoneg
3139 * bypass enabled (link interrupt storm risk
3140 * otherwise).
3141 */
Ben Peled03da3622021-05-03 08:08:49 +02003142 val = MVPP2_GMAC_AN_BYPASS_EN |
3143 MVPP2_GMAC_EN_PCS_AN |
Stefan Chulski237105f2021-05-03 08:08:46 +02003144 MVPP2_GMAC_CONFIG_GMII_SPEED |
3145 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3146 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3147 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3148}
3149
Stefan Roese40e749b2017-03-22 15:07:30 +01003150static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3151{
3152 u32 val, thresh;
3153
3154 /*
3155 * Configure minimal level of the Tx FIFO before the lower part
3156 * starts to read a packet
3157 */
3158 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3159 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3160 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3161 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3162 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3163
3164 /* Disable bypass of sync module */
3165 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3166 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3167 /* configure DP clock select according to mode */
3168 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3169 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3170 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3171 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3172
Stefan Roese40e749b2017-03-22 15:07:30 +01003173 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3174 /* configure GIG MAC to SGMII mode */
3175 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3176 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3177
3178 /* configure AN 0xb8e8 */
3179 val = MVPP2_GMAC_AN_BYPASS_EN |
3180 MVPP2_GMAC_AN_SPEED_EN |
3181 MVPP2_GMAC_EN_FC_AN |
3182 MVPP2_GMAC_AN_DUPLEX_EN |
3183 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3184 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3185}
3186
3187/* Set the internal mux's to the required MAC in the GOP */
3188static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3189{
3190 u32 val;
3191
3192 /* Set TX FIFO thresholds */
3193 switch (port->phy_interface) {
3194 case PHY_INTERFACE_MODE_SGMII:
Marcin Wojtas0db0f352021-05-03 08:08:53 +02003195 gop_gmac_sgmii_cfg(port);
Stefan Roese40e749b2017-03-22 15:07:30 +01003196 break;
Stefan Chulski237105f2021-05-03 08:08:46 +02003197 case PHY_INTERFACE_MODE_1000BASEX:
3198 gop_gmac_1000basex_cfg(port);
Ben Peled95c292c2021-05-03 08:08:51 +02003199 break;
Stefan Chulski237105f2021-05-03 08:08:46 +02003200
3201 case PHY_INTERFACE_MODE_2500BASEX:
3202 gop_gmac_2500basex_cfg(port);
Ben Peled95c292c2021-05-03 08:08:51 +02003203 break;
Stefan Chulski237105f2021-05-03 08:08:46 +02003204
Stefan Roese40e749b2017-03-22 15:07:30 +01003205 case PHY_INTERFACE_MODE_RGMII:
3206 case PHY_INTERFACE_MODE_RGMII_ID:
3207 gop_gmac_rgmii_cfg(port);
3208 break;
3209
3210 default:
3211 return -1;
3212 }
3213
3214 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3215 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3216 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3217 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3218 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3219
3220 /* PeriodicXonEn disable */
3221 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3222 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3223 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3224
3225 return 0;
3226}
3227
3228static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3229{
3230 u32 val;
3231
3232 /* relevant only for MAC0 (XLG0 and GMAC0) */
3233 if (port->gop_id > 0)
3234 return;
3235
3236 /* configure 1Gig MAC mode */
3237 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3238 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3239 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3240 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3241}
3242
3243static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3244{
3245 u32 val;
3246
3247 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3248 if (reset)
3249 val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3250 else
3251 val |= MVPP2_GMAC_SGMII_MODE_MASK;
3252 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3253
3254 return 0;
3255}
3256
Stefan Roese01647642017-03-22 15:09:38 +01003257static int gop_mpcs_mode(struct mvpp2_port *port)
3258{
3259 u32 val;
3260
3261 /* configure PCS40G COMMON CONTROL */
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003262 val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3263 PCS40G_COMMON_CONTROL);
Stefan Roese01647642017-03-22 15:09:38 +01003264 val &= ~FORWARD_ERROR_CORRECTION_MASK;
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003265 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3266 PCS40G_COMMON_CONTROL);
Stefan Roese01647642017-03-22 15:09:38 +01003267
3268 /* configure PCS CLOCK RESET */
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003269 val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3270 PCS_CLOCK_RESET);
Stefan Roese01647642017-03-22 15:09:38 +01003271 val &= ~CLK_DIVISION_RATIO_MASK;
3272 val |= 1 << CLK_DIVISION_RATIO_OFFS;
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003273 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3274 PCS_CLOCK_RESET);
Stefan Roese01647642017-03-22 15:09:38 +01003275
3276 val &= ~CLK_DIV_PHASE_SET_MASK;
3277 val |= MAC_CLK_RESET_MASK;
3278 val |= RX_SD_CLK_RESET_MASK;
3279 val |= TX_SD_CLK_RESET_MASK;
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003280 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3281 PCS_CLOCK_RESET);
Stefan Roese01647642017-03-22 15:09:38 +01003282
3283 return 0;
3284}
3285
3286/* Set the internal mux's to the required MAC in the GOP */
3287static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3288{
3289 u32 val;
3290
3291 /* configure 10G MAC mode */
3292 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3293 val |= MVPP22_XLG_RX_FC_EN;
3294 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3295
3296 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3297 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3298 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3299 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3300
3301 /* read - modify - write */
3302 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3303 val &= ~MVPP22_XLG_MODE_DMA_1G;
3304 val |= MVPP22_XLG_FORWARD_PFC_EN;
3305 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3306 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3307 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3308
3309 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3310 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3311 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3312 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3313 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3314
3315 /* unmask link change interrupt */
3316 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3317 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3318 val |= 1; /* unmask summary bit */
3319 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3320
3321 return 0;
3322}
3323
Stefan Roese01647642017-03-22 15:09:38 +01003324/* Set the MAC to reset or exit from reset */
3325static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3326{
3327 u32 val;
3328
3329 /* read - modify - write */
3330 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3331 if (reset)
3332 val &= ~MVPP22_XLG_MAC_RESETN;
3333 else
3334 val |= MVPP22_XLG_MAC_RESETN;
3335 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3336
3337 return 0;
3338}
3339
Stefan Roese40e749b2017-03-22 15:07:30 +01003340/*
3341 * gop_port_init
3342 *
3343 * Init physical port. Configures the port mode and all it's elements
3344 * accordingly.
3345 * Does not verify that the selected mode/port number is valid at the
3346 * core level.
3347 */
3348static int gop_port_init(struct mvpp2_port *port)
3349{
3350 int mac_num = port->gop_id;
Stefan Roese01647642017-03-22 15:09:38 +01003351 int num_of_act_lanes;
Stefan Roese40e749b2017-03-22 15:07:30 +01003352
3353 if (mac_num >= MVPP22_GOP_MAC_NUM) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04003354 log_err("illegal port number %d", mac_num);
Stefan Roese40e749b2017-03-22 15:07:30 +01003355 return -1;
3356 }
3357
3358 switch (port->phy_interface) {
3359 case PHY_INTERFACE_MODE_RGMII:
3360 case PHY_INTERFACE_MODE_RGMII_ID:
3361 gop_gmac_reset(port, 1);
3362
3363 /* configure PCS */
3364 gop_gpcs_mode_cfg(port, 0);
3365 gop_bypass_clk_cfg(port, 1);
3366
3367 /* configure MAC */
3368 gop_gmac_mode_cfg(port);
3369 /* pcs unreset */
3370 gop_gpcs_reset(port, 0);
3371
3372 /* mac unreset */
3373 gop_gmac_reset(port, 0);
3374 break;
3375
3376 case PHY_INTERFACE_MODE_SGMII:
Stefan Chulski237105f2021-05-03 08:08:46 +02003377 case PHY_INTERFACE_MODE_1000BASEX:
3378 case PHY_INTERFACE_MODE_2500BASEX:
Stefan Roese40e749b2017-03-22 15:07:30 +01003379 /* configure PCS */
3380 gop_gpcs_mode_cfg(port, 1);
3381
3382 /* configure MAC */
3383 gop_gmac_mode_cfg(port);
3384 /* select proper Mac mode */
3385 gop_xlg_2_gig_mac_cfg(port);
3386
3387 /* pcs unreset */
3388 gop_gpcs_reset(port, 0);
3389 /* mac unreset */
3390 gop_gmac_reset(port, 0);
3391 break;
3392
Marek Vasut24b8e482023-03-21 18:25:53 +01003393 case PHY_INTERFACE_MODE_10GBASER:
3394 case PHY_INTERFACE_MODE_5GBASER:
3395 case PHY_INTERFACE_MODE_XAUI:
Stefan Roese01647642017-03-22 15:09:38 +01003396 num_of_act_lanes = 2;
3397 mac_num = 0;
3398 /* configure PCS */
Stefan Roese01647642017-03-22 15:09:38 +01003399 gop_mpcs_mode(port);
3400 /* configure MAC */
3401 gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3402
Stefan Roese01647642017-03-22 15:09:38 +01003403 /* mac unreset */
3404 gop_xlg_mac_reset(port, 0);
3405 break;
3406
Stefan Roese40e749b2017-03-22 15:07:30 +01003407 default:
Sean Andersonc7cbf092020-09-15 10:44:57 -04003408 log_err("Requested port mode (%d) not supported\n",
3409 port->phy_interface);
Stefan Roese40e749b2017-03-22 15:07:30 +01003410 return -1;
3411 }
3412
3413 return 0;
3414}
3415
Stefan Roese01647642017-03-22 15:09:38 +01003416static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3417{
3418 u32 val;
3419
3420 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3421 if (enable) {
3422 /* Enable port and MIB counters update */
3423 val |= MVPP22_XLG_PORT_EN;
3424 val &= ~MVPP22_XLG_MIBCNT_DIS;
3425 } else {
3426 /* Disable port */
3427 val &= ~MVPP22_XLG_PORT_EN;
3428 }
3429 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3430}
3431
Stefan Roese40e749b2017-03-22 15:07:30 +01003432static void gop_port_enable(struct mvpp2_port *port, int enable)
3433{
3434 switch (port->phy_interface) {
3435 case PHY_INTERFACE_MODE_RGMII:
3436 case PHY_INTERFACE_MODE_RGMII_ID:
3437 case PHY_INTERFACE_MODE_SGMII:
Stefan Chulski237105f2021-05-03 08:08:46 +02003438 case PHY_INTERFACE_MODE_1000BASEX:
3439 case PHY_INTERFACE_MODE_2500BASEX:
Stefan Roese40e749b2017-03-22 15:07:30 +01003440 if (enable)
3441 mvpp2_port_enable(port);
3442 else
3443 mvpp2_port_disable(port);
3444 break;
3445
Marek Vasut24b8e482023-03-21 18:25:53 +01003446 case PHY_INTERFACE_MODE_10GBASER:
3447 case PHY_INTERFACE_MODE_5GBASER:
3448 case PHY_INTERFACE_MODE_XAUI:
Stefan Roese01647642017-03-22 15:09:38 +01003449 gop_xlg_mac_port_enable(port, enable);
3450
3451 break;
Stefan Roese40e749b2017-03-22 15:07:30 +01003452 default:
Sean Andersonc7cbf092020-09-15 10:44:57 -04003453 log_err("%s: Wrong port mode (%d)\n", __func__,
3454 port->phy_interface);
Stefan Roese40e749b2017-03-22 15:07:30 +01003455 return;
3456 }
3457}
3458
3459/* RFU1 functions */
3460static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3461{
3462 return readl(priv->rfu1_base + offset);
3463}
3464
3465static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3466{
3467 writel(data, priv->rfu1_base + offset);
3468}
3469
3470static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3471{
3472 u32 val = 0;
3473
3474 if (gop_id == 2) {
Stefan Chulski237105f2021-05-03 08:08:46 +02003475 if (phy_type == PHY_INTERFACE_MODE_SGMII ||
3476 phy_type == PHY_INTERFACE_MODE_1000BASEX ||
3477 phy_type == PHY_INTERFACE_MODE_2500BASEX)
Stefan Roese40e749b2017-03-22 15:07:30 +01003478 val |= MV_NETC_GE_MAC2_SGMII;
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003479 else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3480 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3481 val |= MV_NETC_GE_MAC2_RGMII;
Stefan Roese40e749b2017-03-22 15:07:30 +01003482 }
3483
3484 if (gop_id == 3) {
Stefan Chulski237105f2021-05-03 08:08:46 +02003485 if (phy_type == PHY_INTERFACE_MODE_SGMII ||
3486 phy_type == PHY_INTERFACE_MODE_1000BASEX ||
3487 phy_type == PHY_INTERFACE_MODE_2500BASEX)
Stefan Roese40e749b2017-03-22 15:07:30 +01003488 val |= MV_NETC_GE_MAC3_SGMII;
3489 else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3490 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3491 val |= MV_NETC_GE_MAC3_RGMII;
3492 }
3493
3494 return val;
3495}
3496
3497static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3498{
3499 u32 reg;
3500
3501 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3502 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3503
3504 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3505 val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3506
3507 reg |= val;
3508
3509 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3510}
3511
3512static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3513{
3514 u32 reg;
3515
3516 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3517 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3518
3519 val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3520 val &= NETC_GBE_PORT1_MII_MODE_MASK;
3521
3522 reg |= val;
3523
3524 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3525}
3526
3527static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3528{
3529 u32 reg;
3530
3531 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3532 reg &= ~NETC_GOP_SOFT_RESET_MASK;
3533
3534 val <<= NETC_GOP_SOFT_RESET_OFFS;
3535 val &= NETC_GOP_SOFT_RESET_MASK;
3536
3537 reg |= val;
3538
3539 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3540}
3541
3542static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3543{
3544 u32 reg;
3545
3546 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3547 reg &= ~NETC_CLK_DIV_PHASE_MASK;
3548
3549 val <<= NETC_CLK_DIV_PHASE_OFFS;
3550 val &= NETC_CLK_DIV_PHASE_MASK;
3551
3552 reg |= val;
3553
3554 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3555}
3556
3557static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3558{
3559 u32 reg;
3560
3561 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3562 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3563
3564 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3565 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3566
3567 reg |= val;
3568
3569 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3570}
3571
3572static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3573 u32 val)
3574{
3575 u32 reg, mask, offset;
3576
3577 if (gop_id == 2) {
3578 mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3579 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3580 } else {
3581 mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3582 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3583 }
3584 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3585 reg &= ~mask;
3586
3587 val <<= offset;
3588 val &= mask;
3589
3590 reg |= val;
3591
3592 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3593}
3594
3595static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3596{
3597 u32 reg;
3598
3599 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3600 reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3601
3602 val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3603 val &= NETC_BUS_WIDTH_SELECT_MASK;
3604
3605 reg |= val;
3606
3607 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3608}
3609
3610static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3611{
3612 u32 reg;
3613
3614 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3615 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3616
3617 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3618 val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3619
3620 reg |= val;
3621
3622 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3623}
3624
3625static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3626 enum mv_netc_phase phase)
3627{
3628 switch (phase) {
3629 case MV_NETC_FIRST_PHASE:
3630 /* Set Bus Width to HB mode = 1 */
3631 gop_netc_bus_width_select(priv, 1);
3632 /* Select RGMII mode */
3633 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3634 break;
3635
3636 case MV_NETC_SECOND_PHASE:
3637 /* De-assert the relevant port HB reset */
3638 gop_netc_port_rf_reset(priv, gop_id, 1);
3639 break;
3640 }
3641}
3642
3643static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3644 enum mv_netc_phase phase)
3645{
3646 switch (phase) {
3647 case MV_NETC_FIRST_PHASE:
3648 /* Set Bus Width to HB mode = 1 */
3649 gop_netc_bus_width_select(priv, 1);
3650 /* Select SGMII mode */
3651 if (gop_id >= 1) {
3652 gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3653 MV_NETC_GBE_SGMII);
3654 }
3655
3656 /* Configure the sample stages */
3657 gop_netc_sample_stages_timing(priv, 0);
3658 /* Configure the ComPhy Selector */
3659 /* gop_netc_com_phy_selector_config(netComplex); */
3660 break;
3661
3662 case MV_NETC_SECOND_PHASE:
3663 /* De-assert the relevant port HB reset */
3664 gop_netc_port_rf_reset(priv, gop_id, 1);
3665 break;
3666 }
3667}
3668
3669static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3670{
3671 u32 c = priv->netc_config;
3672
3673 if (c & MV_NETC_GE_MAC2_SGMII)
3674 gop_netc_mac_to_sgmii(priv, 2, phase);
Stefan Chulskib3f12b52021-05-03 08:08:45 +02003675 else if (c & MV_NETC_GE_MAC2_RGMII)
Stefan Roese40e749b2017-03-22 15:07:30 +01003676 gop_netc_mac_to_xgmii(priv, 2, phase);
3677
3678 if (c & MV_NETC_GE_MAC3_SGMII) {
3679 gop_netc_mac_to_sgmii(priv, 3, phase);
3680 } else {
3681 gop_netc_mac_to_xgmii(priv, 3, phase);
3682 if (c & MV_NETC_GE_MAC3_RGMII)
3683 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3684 else
3685 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3686 }
3687
3688 /* Activate gop ports 0, 2, 3 */
3689 gop_netc_active_port(priv, 0, 1);
3690 gop_netc_active_port(priv, 2, 1);
3691 gop_netc_active_port(priv, 3, 1);
3692
3693 if (phase == MV_NETC_SECOND_PHASE) {
3694 /* Enable the GOP internal clock logic */
3695 gop_netc_gop_clock_logic_set(priv, 1);
3696 /* De-assert GOP unit reset */
3697 gop_netc_gop_reset(priv, 1);
3698 }
3699
3700 return 0;
3701}
3702
Stefan Roese96c19042016-02-10 07:22:10 +01003703/* Set defaults to the MVPP2 port */
3704static void mvpp2_defaults_set(struct mvpp2_port *port)
3705{
3706 int tx_port_num, val, queue, ptxq, lrxq;
3707
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01003708 if (port->priv->hw_version == MVPP21) {
3709 /* Configure port to loopback if needed */
3710 if (port->flags & MVPP2_F_LOOPBACK)
3711 mvpp2_port_loopback_set(port);
Stefan Roese96c19042016-02-10 07:22:10 +01003712
Thomas Petazzoni58159ee2017-02-16 06:57:24 +01003713 /* Update TX FIFO MIN Threshold */
3714 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3715 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3716 /* Min. TX threshold must be less than minimal packet length */
3717 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3718 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3719 }
Stefan Roese96c19042016-02-10 07:22:10 +01003720
3721 /* Disable Legacy WRR, Disable EJP, Release from reset */
3722 tx_port_num = mvpp2_egress_port(port);
3723 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3724 tx_port_num);
3725 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3726
3727 /* Close bandwidth for all queues */
3728 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3729 ptxq = mvpp2_txq_phys(port->id, queue);
3730 mvpp2_write(port->priv,
3731 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3732 }
3733
3734 /* Set refill period to 1 usec, refill tokens
3735 * and bucket size to maximum
3736 */
3737 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3738 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3739 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3740 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3741 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3742 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3743 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3745
3746 /* Set MaximumLowLatencyPacketSize value to 256 */
3747 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3748 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3749 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3750
3751 /* Enable Rx cache snoop */
3752 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3753 queue = port->rxqs[lrxq]->id;
3754 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3755 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3756 MVPP2_SNOOP_BUF_HDR_MASK;
3757 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3758 }
3759}
3760
3761/* Enable/disable receiving packets */
3762static void mvpp2_ingress_enable(struct mvpp2_port *port)
3763{
3764 u32 val;
3765 int lrxq, queue;
3766
3767 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3768 queue = port->rxqs[lrxq]->id;
3769 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3770 val &= ~MVPP2_RXQ_DISABLE_MASK;
3771 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3772 }
3773}
3774
3775static void mvpp2_ingress_disable(struct mvpp2_port *port)
3776{
3777 u32 val;
3778 int lrxq, queue;
3779
3780 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3781 queue = port->rxqs[lrxq]->id;
3782 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3783 val |= MVPP2_RXQ_DISABLE_MASK;
3784 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3785 }
3786}
3787
3788/* Enable transmit via physical egress queue
3789 * - HW starts take descriptors from DRAM
3790 */
3791static void mvpp2_egress_enable(struct mvpp2_port *port)
3792{
3793 u32 qmap;
3794 int queue;
3795 int tx_port_num = mvpp2_egress_port(port);
3796
3797 /* Enable all initialized TXs. */
3798 qmap = 0;
3799 for (queue = 0; queue < txq_number; queue++) {
3800 struct mvpp2_tx_queue *txq = port->txqs[queue];
3801
3802 if (txq->descs != NULL)
3803 qmap |= (1 << queue);
3804 }
3805
3806 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3807 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3808}
3809
3810/* Disable transmit via physical egress queue
3811 * - HW doesn't take descriptors from DRAM
3812 */
3813static void mvpp2_egress_disable(struct mvpp2_port *port)
3814{
3815 u32 reg_data;
3816 int delay;
3817 int tx_port_num = mvpp2_egress_port(port);
3818
3819 /* Issue stop command for active channels only */
3820 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3821 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3822 MVPP2_TXP_SCHED_ENQ_MASK;
3823 if (reg_data != 0)
3824 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3825 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3826
3827 /* Wait for all Tx activity to terminate. */
3828 delay = 0;
3829 do {
3830 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04003831 dev_warn(port->phy_dev->dev,
3832 "Tx stop timed out, status=0x%08x\n",
3833 reg_data);
Stefan Roese96c19042016-02-10 07:22:10 +01003834 break;
3835 }
3836 mdelay(1);
3837 delay++;
3838
3839 /* Check port TX Command register that all
3840 * Tx queues are stopped
3841 */
3842 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3843 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3844}
3845
3846/* Rx descriptors helper methods */
3847
3848/* Get number of Rx descriptors occupied by received packets */
3849static inline int
3850mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3851{
3852 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3853
3854 return val & MVPP2_RXQ_OCCUPIED_MASK;
3855}
3856
3857/* Update Rx queue status with the number of occupied and available
3858 * Rx descriptor slots.
3859 */
3860static inline void
3861mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3862 int used_count, int free_count)
3863{
3864 /* Decrement the number of used descriptors and increment count
3865 * increment the number of free descriptors.
3866 */
3867 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3868
3869 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3870}
3871
3872/* Get pointer to next RX descriptor to be processed by SW */
3873static inline struct mvpp2_rx_desc *
3874mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3875{
3876 int rx_desc = rxq->next_desc_to_proc;
3877
3878 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3879 prefetch(rxq->descs + rxq->next_desc_to_proc);
3880 return rxq->descs + rx_desc;
3881}
3882
3883/* Set rx queue offset */
3884static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3885 int prxq, int offset)
3886{
3887 u32 val;
3888
3889 /* Convert offset from bytes to units of 32 bytes */
3890 offset = offset >> 5;
3891
3892 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3893 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3894
3895 /* Offset is in */
3896 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3897 MVPP2_RXQ_PACKET_OFFSET_MASK);
3898
3899 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3900}
3901
3902/* Obtain BM cookie information from descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003903static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3904 struct mvpp2_rx_desc *rx_desc)
Stefan Roese96c19042016-02-10 07:22:10 +01003905{
Stefan Roese96c19042016-02-10 07:22:10 +01003906 int cpu = smp_processor_id();
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01003907 int pool;
3908
3909 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3910 MVPP2_RXD_BM_POOL_ID_MASK) >>
3911 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese96c19042016-02-10 07:22:10 +01003912
3913 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3914 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3915}
3916
3917/* Tx descriptors helper methods */
3918
3919/* Get number of Tx descriptors waiting to be transmitted by HW */
3920static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3921 struct mvpp2_tx_queue *txq)
3922{
3923 u32 val;
3924
3925 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3926 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3927
3928 return val & MVPP2_TXQ_PENDING_MASK;
3929}
3930
3931/* Get pointer to next Tx descriptor to be processed (send) by HW */
3932static struct mvpp2_tx_desc *
3933mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3934{
3935 int tx_desc = txq->next_desc_to_proc;
3936
3937 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3938 return txq->descs + tx_desc;
3939}
3940
3941/* Update HW with number of aggregated Tx descriptors to be sent */
3942static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3943{
3944 /* aggregated access - relevant TXQ number is written in TX desc */
3945 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3946}
3947
3948/* Get number of sent descriptors and decrement counter.
3949 * The number of sent descriptors is returned.
3950 * Per-CPU access
3951 */
3952static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3953 struct mvpp2_tx_queue *txq)
3954{
3955 u32 val;
3956
3957 /* Reading status reg resets transmitted descriptor counter */
3958 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3959
3960 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3961 MVPP2_TRANSMITTED_COUNT_OFFSET;
3962}
3963
3964static void mvpp2_txq_sent_counter_clear(void *arg)
3965{
3966 struct mvpp2_port *port = arg;
3967 int queue;
3968
3969 for (queue = 0; queue < txq_number; queue++) {
3970 int id = port->txqs[queue]->id;
3971
3972 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3973 }
3974}
3975
3976/* Set max sizes for Tx queues */
3977static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3978{
3979 u32 val, size, mtu;
3980 int txq, tx_port_num;
3981
3982 mtu = port->pkt_size * 8;
3983 if (mtu > MVPP2_TXP_MTU_MAX)
3984 mtu = MVPP2_TXP_MTU_MAX;
3985
3986 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3987 mtu = 3 * mtu;
3988
3989 /* Indirect access to registers */
3990 tx_port_num = mvpp2_egress_port(port);
3991 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3992
3993 /* Set MTU */
3994 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3995 val &= ~MVPP2_TXP_MTU_MAX;
3996 val |= mtu;
3997 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3998
3999 /* TXP token size and all TXQs token size must be larger that MTU */
4000 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4001 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4002 if (size < mtu) {
4003 size = mtu;
4004 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4005 val |= size;
4006 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4007 }
4008
4009 for (txq = 0; txq < txq_number; txq++) {
4010 val = mvpp2_read(port->priv,
4011 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4012 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4013
4014 if (size < mtu) {
4015 size = mtu;
4016 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4017 val |= size;
4018 mvpp2_write(port->priv,
4019 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4020 val);
4021 }
4022 }
4023}
4024
4025/* Free Tx queue skbuffs */
4026static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4027 struct mvpp2_tx_queue *txq,
4028 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4029{
4030 int i;
4031
4032 for (i = 0; i < num; i++)
4033 mvpp2_txq_inc_get(txq_pcpu);
4034}
4035
4036static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4037 u32 cause)
4038{
4039 int queue = fls(cause) - 1;
4040
4041 return port->rxqs[queue];
4042}
4043
4044static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4045 u32 cause)
4046{
4047 int queue = fls(cause) - 1;
4048
4049 return port->txqs[queue];
4050}
4051
4052/* Rx/Tx queue initialization/cleanup methods */
4053
4054/* Allocate and initialize descriptors for aggr TXQ */
4055static int mvpp2_aggr_txq_init(struct udevice *dev,
4056 struct mvpp2_tx_queue *aggr_txq,
4057 int desc_num, int cpu,
4058 struct mvpp2 *priv)
4059{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01004060 u32 txq_dma;
4061
Stefan Roese96c19042016-02-10 07:22:10 +01004062 /* Allocate memory for TX descriptors */
4063 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004064 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01004065 if (!aggr_txq->descs)
4066 return -ENOMEM;
4067
4068 /* Make sure descriptor address is cache line size aligned */
4069 BUG_ON(aggr_txq->descs !=
4070 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4071
4072 aggr_txq->last_desc = aggr_txq->size - 1;
4073
4074 /* Aggr TXQ no reset WA */
4075 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4076 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4077
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01004078 /* Set Tx descriptors queue starting address indirect
4079 * access
4080 */
4081 if (priv->hw_version == MVPP21)
4082 txq_dma = aggr_txq->descs_dma;
4083 else
4084 txq_dma = aggr_txq->descs_dma >>
4085 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4086
4087 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01004088 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4089
4090 return 0;
4091}
4092
4093/* Create a specified Rx queue */
4094static int mvpp2_rxq_init(struct mvpp2_port *port,
4095 struct mvpp2_rx_queue *rxq)
4096
4097{
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01004098 u32 rxq_dma;
4099
Stefan Roese96c19042016-02-10 07:22:10 +01004100 rxq->size = port->rx_ring_size;
4101
4102 /* Allocate memory for RX descriptors */
4103 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004104 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01004105 if (!rxq->descs)
4106 return -ENOMEM;
4107
4108 BUG_ON(rxq->descs !=
4109 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4110
4111 rxq->last_desc = rxq->size - 1;
4112
4113 /* Zero occupied and non-occupied counters - direct access */
4114 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4115
4116 /* Set Rx descriptors queue starting address - indirect access */
4117 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni7f215c72017-02-20 11:36:57 +01004118 if (port->priv->hw_version == MVPP21)
4119 rxq_dma = rxq->descs_dma;
4120 else
4121 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4122 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01004123 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4124 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4125
4126 /* Set Offset */
4127 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4128
4129 /* Add number of descriptors ready for receiving packets */
4130 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4131
4132 return 0;
4133}
4134
4135/* Push packets received by the RXQ to BM pool */
4136static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4137 struct mvpp2_rx_queue *rxq)
4138{
4139 int rx_received, i;
4140
4141 rx_received = mvpp2_rxq_received(port, rxq->id);
4142 if (!rx_received)
4143 return;
4144
4145 for (i = 0; i < rx_received; i++) {
4146 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004147 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004148
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004149 mvpp2_pool_refill(port, bm,
4150 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4151 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese96c19042016-02-10 07:22:10 +01004152 }
4153 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4154}
4155
4156/* Cleanup Rx queue */
4157static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4158 struct mvpp2_rx_queue *rxq)
4159{
4160 mvpp2_rxq_drop_pkts(port, rxq);
4161
4162 rxq->descs = NULL;
4163 rxq->last_desc = 0;
4164 rxq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004165 rxq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01004166
4167 /* Clear Rx descriptors queue starting address and size;
4168 * free descriptor number
4169 */
4170 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4171 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4172 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4173 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4174}
4175
4176/* Create and initialize a Tx queue */
4177static int mvpp2_txq_init(struct mvpp2_port *port,
4178 struct mvpp2_tx_queue *txq)
4179{
4180 u32 val;
4181 int cpu, desc, desc_per_txq, tx_port_num;
4182 struct mvpp2_txq_pcpu *txq_pcpu;
4183
4184 txq->size = port->tx_ring_size;
4185
4186 /* Allocate memory for Tx descriptors */
4187 txq->descs = buffer_loc.tx_descs;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004188 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese96c19042016-02-10 07:22:10 +01004189 if (!txq->descs)
4190 return -ENOMEM;
4191
4192 /* Make sure descriptor address is cache line size aligned */
4193 BUG_ON(txq->descs !=
4194 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4195
4196 txq->last_desc = txq->size - 1;
4197
4198 /* Set Tx descriptors queue starting address - indirect access */
4199 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004200 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese96c19042016-02-10 07:22:10 +01004201 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4202 MVPP2_TXQ_DESC_SIZE_MASK);
4203 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4204 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4205 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4206 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4207 val &= ~MVPP2_TXQ_PENDING_MASK;
4208 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4209
4210 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4211 * for each existing TXQ.
4212 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4213 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4214 */
4215 desc_per_txq = 16;
4216 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4217 (txq->log_id * desc_per_txq);
4218
4219 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4220 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004221 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Stefan Roese96c19042016-02-10 07:22:10 +01004222
4223 /* WRR / EJP configuration - indirect access */
4224 tx_port_num = mvpp2_egress_port(port);
4225 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4226
4227 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4228 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4229 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4230 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4231 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4232
4233 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4234 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4235 val);
4236
4237 for_each_present_cpu(cpu) {
4238 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4239 txq_pcpu->size = txq->size;
4240 }
4241
4242 return 0;
4243}
4244
4245/* Free allocated TXQ resources */
4246static void mvpp2_txq_deinit(struct mvpp2_port *port,
4247 struct mvpp2_tx_queue *txq)
4248{
4249 txq->descs = NULL;
4250 txq->last_desc = 0;
4251 txq->next_desc_to_proc = 0;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004252 txq->descs_dma = 0;
Stefan Roese96c19042016-02-10 07:22:10 +01004253
4254 /* Set minimum bandwidth for disabled TXQs */
4255 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4256
4257 /* Set Tx descriptors queue starting address and size */
4258 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4259 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4260 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4261}
4262
4263/* Cleanup Tx ports */
4264static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4265{
4266 struct mvpp2_txq_pcpu *txq_pcpu;
4267 int delay, pending, cpu;
4268 u32 val;
4269
4270 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4271 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4272 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4273 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4274
4275 /* The napi queue has been stopped so wait for all packets
4276 * to be transmitted.
4277 */
4278 delay = 0;
4279 do {
4280 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004281 dev_warn(port->phy_dev->dev,
4282 "port %d: cleaning queue %d timed out\n",
4283 port->id, txq->log_id);
Stefan Roese96c19042016-02-10 07:22:10 +01004284 break;
4285 }
4286 mdelay(1);
4287 delay++;
4288
4289 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4290 } while (pending);
4291
4292 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4293 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4294
4295 for_each_present_cpu(cpu) {
4296 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4297
4298 /* Release all packets */
4299 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4300
4301 /* Reset queue */
4302 txq_pcpu->count = 0;
4303 txq_pcpu->txq_put_index = 0;
4304 txq_pcpu->txq_get_index = 0;
4305 }
4306}
4307
4308/* Cleanup all Tx queues */
4309static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4310{
4311 struct mvpp2_tx_queue *txq;
4312 int queue;
4313 u32 val;
4314
4315 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4316
4317 /* Reset Tx ports and delete Tx queues */
4318 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4319 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4320
4321 for (queue = 0; queue < txq_number; queue++) {
4322 txq = port->txqs[queue];
4323 mvpp2_txq_clean(port, txq);
4324 mvpp2_txq_deinit(port, txq);
4325 }
4326
4327 mvpp2_txq_sent_counter_clear(port);
4328
4329 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4330 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4331}
4332
4333/* Cleanup all Rx queues */
4334static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4335{
4336 int queue;
4337
4338 for (queue = 0; queue < rxq_number; queue++)
4339 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4340}
4341
4342/* Init all Rx queues for port */
4343static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4344{
4345 int queue, err;
4346
4347 for (queue = 0; queue < rxq_number; queue++) {
4348 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4349 if (err)
4350 goto err_cleanup;
4351 }
4352 return 0;
4353
4354err_cleanup:
4355 mvpp2_cleanup_rxqs(port);
4356 return err;
4357}
4358
4359/* Init all tx queues for port */
4360static int mvpp2_setup_txqs(struct mvpp2_port *port)
4361{
4362 struct mvpp2_tx_queue *txq;
4363 int queue, err;
4364
4365 for (queue = 0; queue < txq_number; queue++) {
4366 txq = port->txqs[queue];
4367 err = mvpp2_txq_init(port, txq);
4368 if (err)
4369 goto err_cleanup;
4370 }
4371
4372 mvpp2_txq_sent_counter_clear(port);
4373 return 0;
4374
4375err_cleanup:
4376 mvpp2_cleanup_txqs(port);
4377 return err;
4378}
4379
4380/* Adjust link */
4381static void mvpp2_link_event(struct mvpp2_port *port)
4382{
4383 struct phy_device *phydev = port->phy_dev;
4384 int status_change = 0;
4385 u32 val;
4386
4387 if (phydev->link) {
4388 if ((port->speed != phydev->speed) ||
4389 (port->duplex != phydev->duplex)) {
4390 u32 val;
4391
4392 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4393 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4394 MVPP2_GMAC_CONFIG_GMII_SPEED |
4395 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4396 MVPP2_GMAC_AN_SPEED_EN |
4397 MVPP2_GMAC_AN_DUPLEX_EN);
4398
4399 if (phydev->duplex)
4400 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4401
Stefan Chulski4de58cb2021-05-03 08:08:48 +02004402 if (phydev->speed == SPEED_1000 ||
4403 phydev->speed == 2500)
Stefan Roese96c19042016-02-10 07:22:10 +01004404 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4405 else if (phydev->speed == SPEED_100)
4406 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4407
4408 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4409
4410 port->duplex = phydev->duplex;
4411 port->speed = phydev->speed;
4412 }
4413 }
4414
4415 if (phydev->link != port->link) {
4416 if (!phydev->link) {
4417 port->duplex = -1;
4418 port->speed = 0;
4419 }
4420
4421 port->link = phydev->link;
4422 status_change = 1;
4423 }
4424
4425 if (status_change) {
4426 if (phydev->link) {
4427 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4428 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4429 MVPP2_GMAC_FORCE_LINK_DOWN);
4430 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4431 mvpp2_egress_enable(port);
4432 mvpp2_ingress_enable(port);
4433 } else {
4434 mvpp2_ingress_disable(port);
4435 mvpp2_egress_disable(port);
4436 }
4437 }
4438}
4439
4440/* Main RX/TX processing routines */
4441
4442/* Display more error info */
4443static void mvpp2_rx_error(struct mvpp2_port *port,
4444 struct mvpp2_rx_desc *rx_desc)
4445{
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01004446 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4447 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01004448
4449 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4450 case MVPP2_RXD_ERR_CRC:
Sean Andersonc7cbf092020-09-15 10:44:57 -04004451 dev_err(port->phy_dev->dev,
4452 "bad rx status %08x (crc error), size=%zu\n", status,
4453 sz);
Stefan Roese96c19042016-02-10 07:22:10 +01004454 break;
4455 case MVPP2_RXD_ERR_OVERRUN:
Sean Andersonc7cbf092020-09-15 10:44:57 -04004456 dev_err(port->phy_dev->dev,
4457 "bad rx status %08x (overrun error), size=%zu\n",
4458 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01004459 break;
4460 case MVPP2_RXD_ERR_RESOURCE:
Sean Andersonc7cbf092020-09-15 10:44:57 -04004461 dev_err(port->phy_dev->dev,
4462 "bad rx status %08x (resource error), size=%zu\n",
4463 status, sz);
Stefan Roese96c19042016-02-10 07:22:10 +01004464 break;
4465 }
4466}
4467
4468/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4469static int mvpp2_rx_refill(struct mvpp2_port *port,
4470 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004471 u32 bm, dma_addr_t dma_addr)
Stefan Roese96c19042016-02-10 07:22:10 +01004472{
Thomas Petazzonic49aff22017-02-20 10:27:51 +01004473 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01004474 return 0;
4475}
4476
4477/* Set hw internals when starting port */
4478static void mvpp2_start_dev(struct mvpp2_port *port)
4479{
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004480 switch (port->phy_interface) {
4481 case PHY_INTERFACE_MODE_RGMII:
4482 case PHY_INTERFACE_MODE_RGMII_ID:
4483 case PHY_INTERFACE_MODE_SGMII:
Stefan Chulski237105f2021-05-03 08:08:46 +02004484 case PHY_INTERFACE_MODE_1000BASEX:
4485 case PHY_INTERFACE_MODE_2500BASEX:
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004486 mvpp2_gmac_max_rx_size_set(port);
4487 default:
4488 break;
4489 }
4490
Stefan Roese96c19042016-02-10 07:22:10 +01004491 mvpp2_txp_max_tx_size_set(port);
4492
Stefan Roese40e749b2017-03-22 15:07:30 +01004493 if (port->priv->hw_version == MVPP21)
4494 mvpp2_port_enable(port);
4495 else
4496 gop_port_enable(port, 1);
Stefan Roese96c19042016-02-10 07:22:10 +01004497}
4498
4499/* Set hw internals when stopping port */
4500static void mvpp2_stop_dev(struct mvpp2_port *port)
4501{
4502 /* Stop new packets from arriving to RXQs */
4503 mvpp2_ingress_disable(port);
4504
4505 mvpp2_egress_disable(port);
Stefan Roese40e749b2017-03-22 15:07:30 +01004506
4507 if (port->priv->hw_version == MVPP21)
4508 mvpp2_port_disable(port);
4509 else
4510 gop_port_enable(port, 0);
Stefan Roese96c19042016-02-10 07:22:10 +01004511}
4512
Stefan Chulskib261d1b2019-08-15 18:08:41 -04004513static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese96c19042016-02-10 07:22:10 +01004514{
4515 struct phy_device *phy_dev;
4516
4517 if (!port->init || port->link == 0) {
Nevo Hed5e975612019-08-15 18:08:44 -04004518 phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
4519 dev, port->phy_interface);
Grzegorz Jaszczyka531afd2019-08-15 18:08:42 -04004520
4521 /*
4522 * If the phy doesn't match with any existing u-boot drivers the
4523 * phy framework will connect it to generic one which
4524 * uid == 0xffffffff. In this case act as if the phy wouldn't be
4525 * declared in dts. Otherwise in case of 3310 (for which the
4526 * driver doesn't exist) the link will not be correctly
4527 * detected. Removing phy entry from dts in case of 3310 is not
4528 * an option because it is required for the phy_fw_down
4529 * procedure.
4530 */
4531 if (phy_dev &&
4532 phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
Sean Andersonc7cbf092020-09-15 10:44:57 -04004533 dev_warn(port->phy_dev->dev,
4534 "Marking phy as invalid, link will not be checked\n");
Grzegorz Jaszczyka531afd2019-08-15 18:08:42 -04004535 /* set phy_addr to invalid value */
4536 port->phyaddr = PHY_MAX_ADDR;
4537 mvpp2_egress_enable(port);
4538 mvpp2_ingress_enable(port);
4539
4540 return;
4541 }
4542
Stefan Roese96c19042016-02-10 07:22:10 +01004543 port->phy_dev = phy_dev;
4544 if (!phy_dev) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004545 dev_err(port->phy_dev->dev, "cannot connect to phy\n");
Stefan Chulskib261d1b2019-08-15 18:08:41 -04004546 return;
Stefan Roese96c19042016-02-10 07:22:10 +01004547 }
4548 phy_dev->supported &= PHY_GBIT_FEATURES;
4549 phy_dev->advertising = phy_dev->supported;
4550
4551 port->phy_dev = phy_dev;
4552 port->link = 0;
4553 port->duplex = 0;
4554 port->speed = 0;
4555
4556 phy_config(phy_dev);
4557 phy_startup(phy_dev);
Stefan Chulskib261d1b2019-08-15 18:08:41 -04004558 if (!phy_dev->link)
Stefan Roese96c19042016-02-10 07:22:10 +01004559 printf("%s: No link\n", phy_dev->dev->name);
Stefan Chulskib261d1b2019-08-15 18:08:41 -04004560 else
4561 port->init = 1;
Stefan Roese96c19042016-02-10 07:22:10 +01004562 } else {
4563 mvpp2_egress_enable(port);
4564 mvpp2_ingress_enable(port);
4565 }
Stefan Roese96c19042016-02-10 07:22:10 +01004566}
4567
4568static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4569{
4570 unsigned char mac_bcast[ETH_ALEN] = {
4571 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4572 int err;
4573
4574 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4575 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004576 dev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004577 return err;
4578 }
4579 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4580 port->dev_addr, true);
4581 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004582 dev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004583 return err;
4584 }
4585 err = mvpp2_prs_def_flow(port);
4586 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004587 dev_err(dev, "mvpp2_prs_def_flow failed\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004588 return err;
4589 }
4590
4591 /* Allocate the Rx/Tx queues */
4592 err = mvpp2_setup_rxqs(port);
4593 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004594 dev_err(port->phy_dev->dev, "cannot allocate Rx queues\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004595 return err;
4596 }
4597
4598 err = mvpp2_setup_txqs(port);
4599 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04004600 dev_err(port->phy_dev->dev, "cannot allocate Tx queues\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004601 return err;
4602 }
4603
Nevo Hed5e975612019-08-15 18:08:44 -04004604 if (port->phyaddr < PHY_MAX_ADDR) {
Stefan Chulskib261d1b2019-08-15 18:08:41 -04004605 mvpp2_phy_connect(dev, port);
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004606 mvpp2_link_event(port);
4607 } else {
4608 mvpp2_egress_enable(port);
4609 mvpp2_ingress_enable(port);
4610 }
Stefan Roese96c19042016-02-10 07:22:10 +01004611
4612 mvpp2_start_dev(port);
4613
4614 return 0;
4615}
4616
4617/* No Device ops here in U-Boot */
4618
4619/* Driver initialization */
4620
4621static void mvpp2_port_power_up(struct mvpp2_port *port)
4622{
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01004623 struct mvpp2 *priv = port->priv;
4624
Stefan Roese40e749b2017-03-22 15:07:30 +01004625 /* On PPv2.2 the GoP / interface configuration has already been done */
4626 if (priv->hw_version == MVPP21)
4627 mvpp2_port_mii_set(port);
Stefan Roese96c19042016-02-10 07:22:10 +01004628 mvpp2_port_periodic_xon_disable(port);
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01004629 if (priv->hw_version == MVPP21)
4630 mvpp2_port_fc_adv_enable(port);
Stefan Roese96c19042016-02-10 07:22:10 +01004631 mvpp2_port_reset(port);
4632}
4633
4634/* Initialize port HW */
4635static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4636{
4637 struct mvpp2 *priv = port->priv;
4638 struct mvpp2_txq_pcpu *txq_pcpu;
4639 int queue, cpu, err;
4640
Thomas Petazzoni38a23282017-02-16 09:03:16 +01004641 if (port->first_rxq + rxq_number >
4642 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Stefan Roese96c19042016-02-10 07:22:10 +01004643 return -EINVAL;
4644
4645 /* Disable port */
4646 mvpp2_egress_disable(port);
Stefan Roese40e749b2017-03-22 15:07:30 +01004647 if (priv->hw_version == MVPP21)
4648 mvpp2_port_disable(port);
4649 else
4650 gop_port_enable(port, 0);
Stefan Roese96c19042016-02-10 07:22:10 +01004651
4652 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4653 GFP_KERNEL);
4654 if (!port->txqs)
4655 return -ENOMEM;
4656
4657 /* Associate physical Tx queues to this port and initialize.
4658 * The mapping is predefined.
4659 */
4660 for (queue = 0; queue < txq_number; queue++) {
4661 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4662 struct mvpp2_tx_queue *txq;
4663
4664 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4665 if (!txq)
4666 return -ENOMEM;
4667
4668 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4669 GFP_KERNEL);
4670 if (!txq->pcpu)
4671 return -ENOMEM;
4672
4673 txq->id = queue_phy_id;
4674 txq->log_id = queue;
4675 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4676 for_each_present_cpu(cpu) {
4677 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4678 txq_pcpu->cpu = cpu;
4679 }
4680
4681 port->txqs[queue] = txq;
4682 }
4683
4684 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4685 GFP_KERNEL);
4686 if (!port->rxqs)
4687 return -ENOMEM;
4688
4689 /* Allocate and initialize Rx queue for this port */
4690 for (queue = 0; queue < rxq_number; queue++) {
4691 struct mvpp2_rx_queue *rxq;
4692
4693 /* Map physical Rx queue to port's logical Rx queue */
4694 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4695 if (!rxq)
4696 return -ENOMEM;
4697 /* Map this Rx queue to a physical queue */
4698 rxq->id = port->first_rxq + queue;
4699 rxq->port = port->id;
4700 rxq->logic_rxq = queue;
4701
4702 port->rxqs[queue] = rxq;
4703 }
4704
Stefan Roese96c19042016-02-10 07:22:10 +01004705 /* Create Rx descriptor rings */
4706 for (queue = 0; queue < rxq_number; queue++) {
4707 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4708
4709 rxq->size = port->rx_ring_size;
4710 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4711 rxq->time_coal = MVPP2_RX_COAL_USEC;
4712 }
4713
4714 mvpp2_ingress_disable(port);
4715
4716 /* Port default configuration */
4717 mvpp2_defaults_set(port);
4718
4719 /* Port's classifier configuration */
4720 mvpp2_cls_oversize_rxq_set(port);
4721 mvpp2_cls_port_config(port);
4722
4723 /* Provide an initial Rx packet size */
4724 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4725
4726 /* Initialize pools for swf */
4727 err = mvpp2_swf_bm_pool_init(port);
4728 if (err)
4729 return err;
4730
4731 return 0;
4732}
4733
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004734static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese96c19042016-02-10 07:22:10 +01004735{
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004736 int port_node = dev_of_offset(dev);
Baruch Siach4f42c862018-11-21 13:05:33 +02004737 int phy_node;
Stefan Roese96c19042016-02-10 07:22:10 +01004738 u32 id;
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004739 u32 phyaddr = 0;
Stefan Chulski4032d402021-05-03 08:08:52 +02004740 int fixed_link = 0;
Nevo Hed5e975612019-08-15 18:08:44 -04004741 int ret;
Baruch Siach3cf0f2e2018-11-21 13:05:34 +02004742
Stefan Roese96c19042016-02-10 07:22:10 +01004743 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
Stefan Chulski4032d402021-05-03 08:08:52 +02004744 fixed_link = fdt_subnode_offset(gd->fdt_blob, port_node, "fixed-link");
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004745
4746 if (phy_node > 0) {
Nevo Hed5e975612019-08-15 18:08:44 -04004747 int parent;
Stefan Chulski4032d402021-05-03 08:08:52 +02004748
4749 if (fixed_link != -FDT_ERR_NOTFOUND) {
4750 /* phy_addr is set to invalid value for fixed links */
4751 phyaddr = PHY_MAX_ADDR;
4752 } else {
4753 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node,
4754 "reg", 0);
4755 if (phyaddr < 0) {
4756 dev_err(dev, "could not find phy address\n");
4757 return -1;
4758 }
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004759 }
Nevo Hed5e975612019-08-15 18:08:44 -04004760 parent = fdt_parent_offset(gd->fdt_blob, phy_node);
4761 ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
4762 &port->mdio_dev);
4763 if (ret)
4764 return ret;
Stefan Chulskia27adcb2017-04-06 15:39:08 +02004765 } else {
Nevo Hed5e975612019-08-15 18:08:44 -04004766 /* phy_addr is set to invalid value */
4767 phyaddr = PHY_MAX_ADDR;
Stefan Roese96c19042016-02-10 07:22:10 +01004768 }
4769
Marek Behúnbc194772022-04-07 00:33:01 +02004770 port->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +02004771 if (port->phy_interface == PHY_INTERFACE_MODE_NA) {
Sean Anderson77a88792020-09-15 10:44:56 -04004772 dev_err(dev, "incorrect phy mode\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004773 return -EINVAL;
4774 }
4775
4776 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4777 if (id == -1) {
Sean Anderson77a88792020-09-15 10:44:56 -04004778 dev_err(dev, "missing port-id value\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004779 return -EINVAL;
4780 }
4781
Simon Glassfa4689a2019-12-06 21:41:35 -07004782#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski0d65eb62017-08-09 10:37:43 +03004783 gpio_request_by_name(dev, "phy-reset-gpios", 0,
4784 &port->phy_reset_gpio, GPIOD_IS_OUT);
4785 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4786 &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4787#endif
4788
Stefan Roese96c19042016-02-10 07:22:10 +01004789 port->id = id;
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004790 if (port->priv->hw_version == MVPP21)
Thomas Petazzoni38a23282017-02-16 09:03:16 +01004791 port->first_rxq = port->id * rxq_number;
4792 else
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004793 port->first_rxq = port->id * port->priv->max_port_rxqs;
Stefan Roese96c19042016-02-10 07:22:10 +01004794 port->phyaddr = phyaddr;
4795
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004796 return 0;
4797}
Thomas Petazzoni5555f072017-02-16 08:03:37 +01004798
Simon Glassfa4689a2019-12-06 21:41:35 -07004799#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski0d65eb62017-08-09 10:37:43 +03004800/* Port GPIO initialization */
4801static void mvpp2_gpio_init(struct mvpp2_port *port)
4802{
4803 if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
Stefan Chulski0d65eb62017-08-09 10:37:43 +03004804 dm_gpio_set_value(&port->phy_reset_gpio, 1);
Baruch Siach67674e42018-10-15 13:16:48 +03004805 mdelay(10);
Baruch Siach187dced2018-10-15 13:16:47 +03004806 dm_gpio_set_value(&port->phy_reset_gpio, 0);
Stefan Chulski0d65eb62017-08-09 10:37:43 +03004807 }
4808
4809 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4810 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4811}
4812#endif
4813
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004814/* Ports initialization */
4815static int mvpp2_port_probe(struct udevice *dev,
4816 struct mvpp2_port *port,
4817 int port_node,
4818 struct mvpp2 *priv)
4819{
4820 int err;
Stefan Roese96c19042016-02-10 07:22:10 +01004821
4822 port->tx_ring_size = MVPP2_MAX_TXD;
4823 port->rx_ring_size = MVPP2_MAX_RXD;
4824
4825 err = mvpp2_port_init(dev, port);
4826 if (err < 0) {
Sean Anderson77a88792020-09-15 10:44:56 -04004827 dev_err(dev, "failed to init port %d\n", port->id);
Stefan Roese96c19042016-02-10 07:22:10 +01004828 return err;
4829 }
4830 mvpp2_port_power_up(port);
4831
Simon Glassfa4689a2019-12-06 21:41:35 -07004832#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski0d65eb62017-08-09 10:37:43 +03004833 mvpp2_gpio_init(port);
4834#endif
4835
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01004836 priv->port_list[port->id] = port;
Stefan Chulski75872182017-08-09 10:37:46 +03004837 priv->num_ports++;
Stefan Roese96c19042016-02-10 07:22:10 +01004838 return 0;
4839}
4840
4841/* Initialize decoding windows */
4842static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4843 struct mvpp2 *priv)
4844{
4845 u32 win_enable;
4846 int i;
4847
4848 for (i = 0; i < 6; i++) {
4849 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4850 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4851
4852 if (i < 4)
4853 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4854 }
4855
4856 win_enable = 0;
4857
4858 for (i = 0; i < dram->num_cs; i++) {
4859 const struct mbus_dram_window *cs = dram->cs + i;
4860
4861 mvpp2_write(priv, MVPP2_WIN_BASE(i),
4862 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4863 dram->mbus_dram_target_id);
4864
4865 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4866 (cs->size - 1) & 0xffff0000);
4867
4868 win_enable |= (1 << i);
4869 }
4870
4871 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4872}
4873
4874/* Initialize Rx FIFO's */
4875static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4876{
4877 int port;
4878
4879 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Stefan Roesea8801ed2017-03-01 13:09:42 +01004880 if (priv->hw_version == MVPP22) {
4881 if (port == 0) {
4882 mvpp2_write(priv,
4883 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4884 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4885 mvpp2_write(priv,
4886 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4887 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4888 } else if (port == 1) {
4889 mvpp2_write(priv,
4890 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4891 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4892 mvpp2_write(priv,
4893 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4894 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4895 } else {
4896 mvpp2_write(priv,
4897 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4898 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4899 mvpp2_write(priv,
4900 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4901 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4902 }
4903 } else {
4904 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4905 MVPP21_RX_FIFO_PORT_DATA_SIZE);
4906 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4907 MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4908 }
Stefan Roese96c19042016-02-10 07:22:10 +01004909 }
4910
4911 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4912 MVPP2_RX_FIFO_PORT_MIN_PKT);
4913 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4914}
4915
Stefan Roesea8801ed2017-03-01 13:09:42 +01004916/* Initialize Tx FIFO's */
4917static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4918{
4919 int port, val;
4920
4921 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4922 /* Port 0 supports 10KB TX FIFO */
4923 if (port == 0) {
4924 val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4925 MVPP22_TX_FIFO_SIZE_MASK;
4926 } else {
4927 val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4928 MVPP22_TX_FIFO_SIZE_MASK;
4929 }
4930 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4931 }
4932}
4933
Thomas Petazzonica560ab2017-02-16 08:41:07 +01004934static void mvpp2_axi_init(struct mvpp2 *priv)
4935{
4936 u32 val, rdval, wrval;
4937
4938 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4939
4940 /* AXI Bridge Configuration */
4941
4942 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4943 << MVPP22_AXI_ATTR_CACHE_OFFS;
4944 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4945 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4946
4947 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4948 << MVPP22_AXI_ATTR_CACHE_OFFS;
4949 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4950 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4951
4952 /* BM */
4953 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4954 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4955
4956 /* Descriptors */
4957 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4958 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4959 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4960 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4961
4962 /* Buffer Data */
4963 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4964 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4965
4966 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4967 << MVPP22_AXI_CODE_CACHE_OFFS;
4968 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4969 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4970 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4971 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4972
4973 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4974 << MVPP22_AXI_CODE_CACHE_OFFS;
4975 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4976 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4977
4978 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4979
4980 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4981 << MVPP22_AXI_CODE_CACHE_OFFS;
4982 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4983 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4984
4985 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4986}
4987
Stefan Roese96c19042016-02-10 07:22:10 +01004988/* Initialize network controller common part HW */
4989static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4990{
4991 const struct mbus_dram_target_info *dram_target_info;
4992 int err, i;
4993 u32 val;
4994
4995 /* Checks for hardware constraints (U-Boot uses only one rxq) */
Thomas Petazzoni38a23282017-02-16 09:03:16 +01004996 if ((rxq_number > priv->max_port_rxqs) ||
4997 (txq_number > MVPP2_MAX_TXQ)) {
Sean Anderson77a88792020-09-15 10:44:56 -04004998 dev_err(dev, "invalid queue size parameter\n");
Stefan Roese96c19042016-02-10 07:22:10 +01004999 return -EINVAL;
5000 }
5001
Thomas Petazzonica560ab2017-02-16 08:41:07 +01005002 if (priv->hw_version == MVPP22)
5003 mvpp2_axi_init(priv);
Stefan Chulskicaa97bf2017-08-09 10:37:48 +03005004 else {
5005 /* MBUS windows configuration */
5006 dram_target_info = mvebu_mbus_dram_info();
5007 if (dram_target_info)
5008 mvpp2_conf_mbus_windows(dram_target_info, priv);
5009 }
Thomas Petazzonica560ab2017-02-16 08:41:07 +01005010
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01005011 if (priv->hw_version == MVPP21) {
Stefan Roeseb851e402017-03-09 12:01:57 +01005012 /* Disable HW PHY polling */
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01005013 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5014 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5015 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5016 } else {
Stefan Roeseb851e402017-03-09 12:01:57 +01005017 /* Enable HW PHY polling */
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01005018 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
Stefan Roeseb851e402017-03-09 12:01:57 +01005019 val |= MVPP22_SMI_POLLING_EN;
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01005020 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5021 }
Stefan Roese96c19042016-02-10 07:22:10 +01005022
5023 /* Allocate and initialize aggregated TXQs */
5024 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
5025 sizeof(struct mvpp2_tx_queue),
5026 GFP_KERNEL);
5027 if (!priv->aggr_txqs)
5028 return -ENOMEM;
5029
5030 for_each_present_cpu(i) {
5031 priv->aggr_txqs[i].id = i;
5032 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5033 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5034 MVPP2_AGGR_TXQ_SIZE, i, priv);
5035 if (err < 0)
5036 return err;
5037 }
5038
5039 /* Rx Fifo Init */
5040 mvpp2_rx_fifo_init(priv);
5041
Stefan Roesea8801ed2017-03-01 13:09:42 +01005042 /* Tx Fifo Init */
5043 if (priv->hw_version == MVPP22)
5044 mvpp2_tx_fifo_init(priv);
5045
Thomas Petazzonicc2445f2017-02-20 11:42:51 +01005046 if (priv->hw_version == MVPP21)
5047 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5048 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Stefan Roese96c19042016-02-10 07:22:10 +01005049
5050 /* Allow cache snoop when transmiting packets */
5051 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5052
5053 /* Buffer Manager initialization */
5054 err = mvpp2_bm_init(dev, priv);
5055 if (err < 0)
5056 return err;
5057
5058 /* Parser default initialization */
5059 err = mvpp2_prs_default_init(dev, priv);
5060 if (err < 0)
5061 return err;
5062
5063 /* Classifier default initialization */
5064 mvpp2_cls_init(priv);
5065
5066 return 0;
5067}
5068
Stefan Roese96c19042016-02-10 07:22:10 +01005069static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5070{
5071 struct mvpp2_port *port = dev_get_priv(dev);
5072 struct mvpp2_rx_desc *rx_desc;
5073 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzonic49aff22017-02-20 10:27:51 +01005074 dma_addr_t dma_addr;
Stefan Roese96c19042016-02-10 07:22:10 +01005075 u32 bm, rx_status;
5076 int pool, rx_bytes, err;
5077 int rx_received;
5078 struct mvpp2_rx_queue *rxq;
Stefan Roese96c19042016-02-10 07:22:10 +01005079 u8 *data;
5080
Nevo Hed5e975612019-08-15 18:08:44 -04005081 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulskib261d1b2019-08-15 18:08:41 -04005082 if (!port->phy_dev->link)
5083 return 0;
5084
Stefan Roese96c19042016-02-10 07:22:10 +01005085 /* Process RX packets */
Stefan Chulskib4025532017-08-09 10:37:49 +03005086 rxq = port->rxqs[0];
Stefan Roese96c19042016-02-10 07:22:10 +01005087
5088 /* Get number of received packets and clamp the to-do */
5089 rx_received = mvpp2_rxq_received(port, rxq->id);
5090
5091 /* Return if no packets are received */
5092 if (!rx_received)
5093 return 0;
5094
5095 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01005096 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5097 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5098 rx_bytes -= MVPP2_MH_SIZE;
5099 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01005100
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01005101 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese96c19042016-02-10 07:22:10 +01005102 pool = mvpp2_bm_cookie_pool_get(bm);
5103 bm_pool = &port->priv->bm_pools[pool];
5104
Stefan Roese96c19042016-02-10 07:22:10 +01005105 /* In case of an error, release the requested buffer pointer
5106 * to the Buffer Manager. This request process is controlled
5107 * by the hardware, and the information about the buffer is
5108 * comprised by the RX descriptor.
5109 */
5110 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5111 mvpp2_rx_error(port, rx_desc);
5112 /* Return the buffer to the pool */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01005113 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01005114 return 0;
5115 }
5116
Thomas Petazzonic49aff22017-02-20 10:27:51 +01005117 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese96c19042016-02-10 07:22:10 +01005118 if (err) {
Sean Andersonc7cbf092020-09-15 10:44:57 -04005119 dev_err(port->phy_dev->dev, "failed to refill BM pools\n");
Stefan Roese96c19042016-02-10 07:22:10 +01005120 return 0;
5121 }
5122
5123 /* Update Rx queue management counters */
5124 mb();
5125 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5126
5127 /* give packet to stack - skip on first n bytes */
Thomas Petazzonic49aff22017-02-20 10:27:51 +01005128 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese96c19042016-02-10 07:22:10 +01005129
5130 if (rx_bytes <= 0)
5131 return 0;
5132
5133 /*
5134 * No cache invalidation needed here, since the rx_buffer's are
5135 * located in a uncached memory region
5136 */
5137 *packetp = data;
5138
5139 return rx_bytes;
5140}
5141
Stefan Roese96c19042016-02-10 07:22:10 +01005142static int mvpp2_send(struct udevice *dev, void *packet, int length)
5143{
5144 struct mvpp2_port *port = dev_get_priv(dev);
5145 struct mvpp2_tx_queue *txq, *aggr_txq;
5146 struct mvpp2_tx_desc *tx_desc;
5147 int tx_done;
5148 int timeout;
5149
Nevo Hed5e975612019-08-15 18:08:44 -04005150 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulskib261d1b2019-08-15 18:08:41 -04005151 if (!port->phy_dev->link)
5152 return 0;
5153
Stefan Roese96c19042016-02-10 07:22:10 +01005154 txq = port->txqs[0];
5155 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5156
5157 /* Get a descriptor for the first part of the packet */
5158 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01005159 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5160 mvpp2_txdesc_size_set(port, tx_desc, length);
5161 mvpp2_txdesc_offset_set(port, tx_desc,
5162 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5163 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5164 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese96c19042016-02-10 07:22:10 +01005165 /* First and Last descriptor */
Thomas Petazzonifb3a7bb2017-02-15 15:35:00 +01005166 mvpp2_txdesc_cmd_set(port, tx_desc,
5167 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5168 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese96c19042016-02-10 07:22:10 +01005169
5170 /* Flush tx data */
Stefan Roeseb4268e22017-02-16 13:58:37 +01005171 flush_dcache_range((unsigned long)packet,
5172 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese96c19042016-02-10 07:22:10 +01005173
5174 /* Enable transmit */
5175 mb();
5176 mvpp2_aggr_txq_pend_desc_add(port, 1);
5177
5178 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5179
5180 timeout = 0;
5181 do {
5182 if (timeout++ > 10000) {
5183 printf("timeout: packet not sent from aggregated to phys TXQ\n");
5184 return 0;
5185 }
5186 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5187 } while (tx_done);
5188
Stefan Roese96c19042016-02-10 07:22:10 +01005189 timeout = 0;
5190 do {
5191 if (timeout++ > 10000) {
5192 printf("timeout: packet not sent\n");
5193 return 0;
5194 }
5195 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5196 } while (!tx_done);
5197
Stefan Roese96c19042016-02-10 07:22:10 +01005198 return 0;
5199}
5200
5201static int mvpp2_start(struct udevice *dev)
5202{
Simon Glassfa20e932020-12-03 16:55:20 -07005203 struct eth_pdata *pdata = dev_get_plat(dev);
Stefan Roese96c19042016-02-10 07:22:10 +01005204 struct mvpp2_port *port = dev_get_priv(dev);
5205
5206 /* Load current MAC address */
5207 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5208
5209 /* Reconfigure parser accept the original MAC address */
5210 mvpp2_prs_update_mac_da(port, port->dev_addr);
5211
Stefan Chulskia27adcb2017-04-06 15:39:08 +02005212 switch (port->phy_interface) {
5213 case PHY_INTERFACE_MODE_RGMII:
5214 case PHY_INTERFACE_MODE_RGMII_ID:
5215 case PHY_INTERFACE_MODE_SGMII:
Stefan Chulski237105f2021-05-03 08:08:46 +02005216 case PHY_INTERFACE_MODE_1000BASEX:
5217 case PHY_INTERFACE_MODE_2500BASEX:
Stefan Chulskia27adcb2017-04-06 15:39:08 +02005218 mvpp2_port_power_up(port);
5219 default:
5220 break;
5221 }
Stefan Roese96c19042016-02-10 07:22:10 +01005222
5223 mvpp2_open(dev, port);
5224
5225 return 0;
5226}
5227
5228static void mvpp2_stop(struct udevice *dev)
5229{
5230 struct mvpp2_port *port = dev_get_priv(dev);
5231
5232 mvpp2_stop_dev(port);
5233 mvpp2_cleanup_rxqs(port);
5234 mvpp2_cleanup_txqs(port);
5235}
5236
Matt Pelland5712d7d2019-07-30 09:40:24 -04005237static int mvpp2_write_hwaddr(struct udevice *dev)
5238{
5239 struct mvpp2_port *port = dev_get_priv(dev);
5240
5241 return mvpp2_prs_update_mac_da(port, port->dev_addr);
5242}
5243
Stefan Roese96c19042016-02-10 07:22:10 +01005244static int mvpp2_base_probe(struct udevice *dev)
5245{
5246 struct mvpp2 *priv = dev_get_priv(dev);
Stefan Roese96c19042016-02-10 07:22:10 +01005247 void *bd_space;
5248 u32 size = 0;
5249 int i;
5250
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01005251 /* Save hw-version */
5252 priv->hw_version = dev_get_driver_data(dev);
5253
Stefan Roese96c19042016-02-10 07:22:10 +01005254 /*
5255 * U-Boot special buffer handling:
5256 *
5257 * Allocate buffer area for descs and rx_buffers. This is only
5258 * done once for all interfaces. As only one interface can
5259 * be active. Make this area DMA-safe by disabling the D-cache
5260 */
5261
Sven Auhagena50bca12020-07-01 17:43:43 +02005262 if (!buffer_loc_init) {
5263 /* Align buffer area for descs and rx_buffers to 1MiB */
5264 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
5265 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5266 BD_SPACE, DCACHE_OFF);
Stefan Roese96c19042016-02-10 07:22:10 +01005267
Sven Auhagena50bca12020-07-01 17:43:43 +02005268 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5269 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
Stefan Roese96c19042016-02-10 07:22:10 +01005270
Sven Auhagena50bca12020-07-01 17:43:43 +02005271 buffer_loc.tx_descs =
5272 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
5273 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
Stefan Roese96c19042016-02-10 07:22:10 +01005274
Sven Auhagena50bca12020-07-01 17:43:43 +02005275 buffer_loc.rx_descs =
5276 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
5277 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
Stefan Roese96c19042016-02-10 07:22:10 +01005278
Sven Auhagena50bca12020-07-01 17:43:43 +02005279 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5280 buffer_loc.bm_pool[i] =
5281 (unsigned long *)((unsigned long)bd_space + size);
5282 if (priv->hw_version == MVPP21)
5283 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5284 else
5285 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
5286 }
Stefan Roese96c19042016-02-10 07:22:10 +01005287
Sven Auhagena50bca12020-07-01 17:43:43 +02005288 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
5289 buffer_loc.rx_buffer[i] =
5290 (unsigned long *)((unsigned long)bd_space + size);
5291 size += RX_BUFFER_SIZE;
5292 }
Stefan Roese96c19042016-02-10 07:22:10 +01005293
Sven Auhagena50bca12020-07-01 17:43:43 +02005294 /* Clear the complete area so that all descriptors are cleared */
5295 memset(bd_space, 0, size);
5296
5297 buffer_loc_init = 1;
5298 }
Stefan Roese4cc75412017-02-16 13:29:08 +01005299
Stefan Roese96c19042016-02-10 07:22:10 +01005300 /* Save base addresses for later use */
Johan Jonkerb52189e2023-03-13 01:32:31 +01005301 priv->base = devfdt_get_addr_index_ptr(dev, 0);
5302 if (!priv->base)
5303 return -EINVAL;
Stefan Roese96c19042016-02-10 07:22:10 +01005304
Thomas Petazzoni5555f072017-02-16 08:03:37 +01005305 if (priv->hw_version == MVPP21) {
Johan Jonkerb52189e2023-03-13 01:32:31 +01005306 priv->lms_base = devfdt_get_addr_index_ptr(dev, 1);
5307 if (!priv->lms_base)
5308 return -EINVAL;
Thomas Petazzoni5555f072017-02-16 08:03:37 +01005309 } else {
Johan Jonkerb52189e2023-03-13 01:32:31 +01005310 priv->iface_base = devfdt_get_addr_index_ptr(dev, 1);
5311 if (!priv->iface_base)
5312 return -EINVAL;
Stefan Roeseb71c2a32017-02-16 08:31:32 +01005313
Stefan Roese40e749b2017-03-22 15:07:30 +01005314 /* Store common base addresses for all ports */
5315 priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5316 priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5317 priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
Thomas Petazzoni5555f072017-02-16 08:03:37 +01005318 }
Stefan Roese96c19042016-02-10 07:22:10 +01005319
Thomas Petazzoni38a23282017-02-16 09:03:16 +01005320 if (priv->hw_version == MVPP21)
5321 priv->max_port_rxqs = 8;
5322 else
5323 priv->max_port_rxqs = 32;
5324
Baruch Siach3cf0f2e2018-11-21 13:05:34 +02005325 return 0;
Stefan Roese96c19042016-02-10 07:22:10 +01005326}
5327
Stefan Roesed017cdf2017-02-16 15:26:06 +01005328static int mvpp2_probe(struct udevice *dev)
5329{
5330 struct mvpp2_port *port = dev_get_priv(dev);
5331 struct mvpp2 *priv = dev_get_priv(dev->parent);
5332 int err;
5333
5334 /* Only call the probe function for the parent once */
Stefan Chulski75872182017-08-09 10:37:46 +03005335 if (!priv->probe_done)
Stefan Roesed017cdf2017-02-16 15:26:06 +01005336 err = mvpp2_base_probe(dev->parent);
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005337
Nevo Hed5e975612019-08-15 18:08:44 -04005338 port->priv = priv;
Baruch Siach3cf0f2e2018-11-21 13:05:34 +02005339
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005340 err = phy_info_parse(dev, port);
5341 if (err)
5342 return err;
5343
5344 /*
5345 * We need the port specific io base addresses at this stage, since
5346 * gop_port_init() accesses these registers
5347 */
5348 if (priv->hw_version == MVPP21) {
5349 int priv_common_regs_num = 2;
5350
Johan Jonkerb52189e2023-03-13 01:32:31 +01005351 port->base = devfdt_get_addr_index_ptr(
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005352 dev->parent, priv_common_regs_num + port->id);
Johan Jonkerb52189e2023-03-13 01:32:31 +01005353 if (!port->base)
5354 return -EINVAL;
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005355 } else {
5356 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5357 "gop-port-id", -1);
5358 if (port->id == -1) {
Sean Anderson77a88792020-09-15 10:44:56 -04005359 dev_err(dev, "missing gop-port-id value\n");
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005360 return -EINVAL;
5361 }
5362
5363 port->base = priv->iface_base + MVPP22_PORT_BASE +
5364 port->gop_id * MVPP22_PORT_OFFSET;
Stefan Roese40e749b2017-03-22 15:07:30 +01005365
5366 /* GoP Init */
5367 gop_port_init(port);
Stefan Roese8ba5f0b2017-03-22 14:11:16 +01005368 }
5369
Stefan Chulski75872182017-08-09 10:37:46 +03005370 if (!priv->probe_done) {
5371 /* Initialize network controller */
5372 err = mvpp2_init(dev, priv);
5373 if (err < 0) {
Sean Anderson77a88792020-09-15 10:44:56 -04005374 dev_err(dev, "failed to initialize controller\n");
Stefan Chulski75872182017-08-09 10:37:46 +03005375 return err;
5376 }
5377 priv->num_ports = 0;
5378 priv->probe_done = 1;
Stefan Roesed017cdf2017-02-16 15:26:06 +01005379 }
5380
Stefan Roese40e749b2017-03-22 15:07:30 +01005381 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5382 if (err)
5383 return err;
5384
5385 if (priv->hw_version == MVPP22) {
5386 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5387 port->phy_interface);
5388
5389 /* Netcomplex configurations for all ports */
5390 gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5391 gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5392 }
5393
5394 return 0;
Stefan Roesed017cdf2017-02-16 15:26:06 +01005395}
5396
Stefan Roese380b3232017-03-23 17:01:59 +01005397/*
5398 * Empty BM pool and stop its activity before the OS is started
5399 */
5400static int mvpp2_remove(struct udevice *dev)
5401{
5402 struct mvpp2_port *port = dev_get_priv(dev);
5403 struct mvpp2 *priv = port->priv;
5404 int i;
5405
Stefan Chulski75872182017-08-09 10:37:46 +03005406 priv->num_ports--;
5407
5408 if (priv->num_ports)
5409 return 0;
5410
Stefan Roese380b3232017-03-23 17:01:59 +01005411 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5412 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5413
5414 return 0;
5415}
5416
Stefan Roesed017cdf2017-02-16 15:26:06 +01005417static const struct eth_ops mvpp2_ops = {
5418 .start = mvpp2_start,
5419 .send = mvpp2_send,
5420 .recv = mvpp2_recv,
5421 .stop = mvpp2_stop,
Matt Pelland5712d7d2019-07-30 09:40:24 -04005422 .write_hwaddr = mvpp2_write_hwaddr
Stefan Roesed017cdf2017-02-16 15:26:06 +01005423};
5424
5425static struct driver mvpp2_driver = {
5426 .name = "mvpp2",
5427 .id = UCLASS_ETH,
5428 .probe = mvpp2_probe,
Stefan Roese380b3232017-03-23 17:01:59 +01005429 .remove = mvpp2_remove,
Stefan Roesed017cdf2017-02-16 15:26:06 +01005430 .ops = &mvpp2_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -07005431 .priv_auto = sizeof(struct mvpp2_port),
Simon Glass71fa5b42020-12-03 16:55:18 -07005432 .plat_auto = sizeof(struct eth_pdata),
Stefan Roese380b3232017-03-23 17:01:59 +01005433 .flags = DM_FLAG_ACTIVE_DMA,
Stefan Roesed017cdf2017-02-16 15:26:06 +01005434};
5435
5436/*
5437 * Use a MISC device to bind the n instances (child nodes) of the
5438 * network base controller in UCLASS_ETH.
5439 */
Stefan Roese96c19042016-02-10 07:22:10 +01005440static int mvpp2_base_bind(struct udevice *parent)
5441{
5442 const void *blob = gd->fdt_blob;
Simon Glassdd79d6e2017-01-17 16:52:55 -07005443 int node = dev_of_offset(parent);
Stefan Roese96c19042016-02-10 07:22:10 +01005444 struct uclass_driver *drv;
5445 struct udevice *dev;
5446 struct eth_pdata *plat;
5447 char *name;
5448 int subnode;
5449 u32 id;
Stefan Roese38801d42017-02-24 10:12:41 +01005450 int base_id_add;
Stefan Roese96c19042016-02-10 07:22:10 +01005451
5452 /* Lookup eth driver */
5453 drv = lists_uclass_lookup(UCLASS_ETH);
5454 if (!drv) {
5455 puts("Cannot find eth driver\n");
5456 return -ENOENT;
5457 }
5458
Stefan Roese38801d42017-02-24 10:12:41 +01005459 base_id_add = base_id;
5460
Simon Glass499c29e2016-10-02 17:59:29 -06005461 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roese38801d42017-02-24 10:12:41 +01005462 /* Increment base_id for all subnodes, also the disabled ones */
5463 base_id++;
5464
Stefan Roese96c19042016-02-10 07:22:10 +01005465 /* Skip disabled ports */
5466 if (!fdtdec_get_is_enabled(blob, subnode))
5467 continue;
5468
5469 plat = calloc(1, sizeof(*plat));
5470 if (!plat)
5471 return -ENOMEM;
5472
5473 id = fdtdec_get_int(blob, subnode, "port-id", -1);
Stefan Roese38801d42017-02-24 10:12:41 +01005474 id += base_id_add;
Stefan Roese96c19042016-02-10 07:22:10 +01005475
5476 name = calloc(1, 16);
Heinrich Schuchardtd0c42d72018-03-07 03:39:04 +01005477 if (!name) {
5478 free(plat);
5479 return -ENOMEM;
5480 }
Stefan Roese96c19042016-02-10 07:22:10 +01005481 sprintf(name, "mvpp2-%d", id);
5482
5483 /* Create child device UCLASS_ETH and bind it */
Simon Glass6996c662020-11-28 17:50:03 -07005484 device_bind(parent, &mvpp2_driver, name, plat,
5485 offset_to_ofnode(subnode), &dev);
Stefan Roese96c19042016-02-10 07:22:10 +01005486 }
5487
5488 return 0;
5489}
5490
5491static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni51ccb412017-02-15 14:08:59 +01005492 {
5493 .compatible = "marvell,armada-375-pp2",
5494 .data = MVPP21,
5495 },
Thomas Petazzonie595a232017-02-20 11:54:31 +01005496 {
5497 .compatible = "marvell,armada-7k-pp22",
5498 .data = MVPP22,
5499 },
Stefan Roese96c19042016-02-10 07:22:10 +01005500 { }
5501};
5502
5503U_BOOT_DRIVER(mvpp2_base) = {
5504 .name = "mvpp2_base",
5505 .id = UCLASS_MISC,
5506 .of_match = mvpp2_ids,
5507 .bind = mvpp2_base_bind,
Simon Glass8a2b47f2020-12-03 16:55:17 -07005508 .priv_auto = sizeof(struct mvpp2),
Stefan Roese96c19042016-02-10 07:22:10 +01005509};