blob: a2414c437a71ad9dd52cff8ecf91069e47e51d27 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* Copyright 2016 MediaTek Inc.
2 * Author: Carlos Huang <carlos.huang@mediatek.com>
3 * Author: Harry Huang <harry.huang@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include "raether.h"
15#include "ra_ioctl.h"
16#include "raether_qdma.h"
17
18/* skb->mark to queue mapping table */
19struct QDMA_txdesc *free_head;
20
21/* ioctl */
22unsigned int M2Q_table[64] = { 0 };
23EXPORT_SYMBOL(M2Q_table);
24unsigned int lan_wan_separate;
25EXPORT_SYMBOL(lan_wan_separate);
26struct sk_buff *magic_id = (struct sk_buff *)0xFFFFFFFF;
27
28/* CONFIG_HW_SFQ */
29unsigned int web_sfq_enable;
30#define HW_SFQ_UP 3
31#define HW_SFQ_DL 1
32
33#define sfq_debug 0
34struct SFQ_table *sfq0;
35struct SFQ_table *sfq1;
36struct SFQ_table *sfq2;
37struct SFQ_table *sfq3;
38
39#define KSEG1 0xa0000000
40#define PHYS_TO_VIRT(x) phys_to_virt(x)
41#define VIRT_TO_PHYS(x) virt_to_phys(x)
42/* extern void set_fe_dma_glo_cfg(void); */
43struct parse_result sfq_parse_result;
44
45/**
46 *
47 * @brief: get the TXD index from its address
48 *
49 * @param: cpu_ptr
50 *
51 * @return: TXD index
52*/
53
54/**
55 * @brief cal txd number for a page
56 *
57 * @parm size
58 *
59 * @return frag_txd_num
60 */
61
62static inline unsigned int cal_frag_txd_num(unsigned int size)
63{
64 unsigned int frag_txd_num = 0;
65
66 if (size == 0)
67 return 0;
68 while (size > 0) {
69 if (size > MAX_QTXD_LEN) {
70 frag_txd_num++;
71 size -= MAX_QTXD_LEN;
72 } else {
73 frag_txd_num++;
74 size = 0;
75 }
76 }
77 return frag_txd_num;
78}
79
80/**
81 * @brief get free TXD from TXD queue
82 *
83 * @param free_txd
84 *
85 * @return
86 */
87static inline int get_free_txd(struct END_DEVICE *ei_local, int ring_no)
88{
89 unsigned int tmp_idx;
90
91 tmp_idx = ei_local->free_txd_head[ring_no];
92 ei_local->free_txd_head[ring_no] = ei_local->txd_pool_info[tmp_idx];
93 atomic_sub(1, &ei_local->free_txd_num[ring_no]);
94 return tmp_idx;
95}
96
97static inline unsigned int get_phy_addr(struct END_DEVICE *ei_local,
98 unsigned int idx)
99{
100 return ei_local->phy_txd_pool + (idx * QTXD_LEN);
101}
102
103/**
104 * @brief add free TXD into TXD queue
105 *
106 * @param free_txd
107 *
108 * @return
109 */
110static inline void put_free_txd(struct END_DEVICE *ei_local, int free_txd_idx)
111{
112 ei_local->txd_pool_info[ei_local->free_txd_tail[0]] = free_txd_idx;
113 ei_local->free_txd_tail[0] = free_txd_idx;
114}
115
116void init_pseudo_link_list(struct END_DEVICE *ei_local)
117{
118 int i;
119
120 for (i = 0; i < gmac1_txq_num; i++) {
121 atomic_set(&ei_local->free_txd_num[i], gmac1_txq_txd_num);
122 ei_local->free_txd_head[i] = gmac1_txq_txd_num * i;
123 ei_local->free_txd_tail[i] = gmac1_txq_txd_num * (i + 1) - 1;
124 }
125 for (i = 0; i < gmac2_txq_num; i++) {
126 atomic_set(&ei_local->free_txd_num[i + gmac1_txq_num],
127 gmac2_txq_txd_num);
128 ei_local->free_txd_head[i + gmac1_txq_num] =
129 gmac1_txd_num + gmac2_txq_txd_num * i;
130 ei_local->free_txd_tail[i + gmac1_txq_num] =
131 gmac1_txd_num + gmac2_txq_txd_num * (i + 1) - 1;
132 }
133}
134
135static inline int ring_no_mapping(int txd_idx)
136{
137 int i;
138
139 if (txd_idx < gmac1_txd_num) {
140 for (i = 0; i < gmac1_txq_num; i++) {
141 if (txd_idx < (gmac1_txq_txd_num * (i + 1)))
142 return i;
143 }
144 }
145
146 txd_idx -= gmac1_txd_num;
147 for (i = 0; i < gmac2_txq_num; i++) {
148 if (txd_idx < (gmac2_txq_txd_num * (i + 1)))
149 return (i + gmac1_txq_num);
150 }
151 pr_err("txd index out of range\n");
152 return 0;
153}
154
155/*define qdma initial alloc*/
156/**
157 * @brief
158 *
159 * @param net_dev
160 *
161 * @return 0: fail
162 * 1: success
163 */
164bool qdma_tx_desc_alloc(void)
165{
166 struct net_device *dev = dev_raether;
167 struct END_DEVICE *ei_local = netdev_priv(dev);
168 unsigned int txd_idx;
169 int i = 0;
170
171 ei_local->txd_pool =
172 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
173 QTXD_LEN * num_tx_desc,
174 &ei_local->phy_txd_pool, GFP_KERNEL);
175 pr_err("txd_pool=%p phy_txd_pool=%p\n", ei_local->txd_pool,
176 (void *)ei_local->phy_txd_pool);
177
178 if (!ei_local->txd_pool) {
179 pr_err("adapter->txd_pool allocation failed!\n");
180 return 0;
181 }
182 pr_err("ei_local->skb_free start address is 0x%p.\n",
183 ei_local->skb_free);
184 /* set all txd_pool_info to 0. */
185 for (i = 0; i < num_tx_desc; i++) {
186 ei_local->skb_free[i] = 0;
187 ei_local->txd_pool_info[i] = i + 1;
188 ei_local->txd_pool[i].txd_info3.LS = 1;
189 ei_local->txd_pool[i].txd_info3.DDONE = 1;
190 }
191
192 init_pseudo_link_list(ei_local);
193
194 /* get free txd from txd pool */
195 txd_idx = get_free_txd(ei_local, 0);
196 ei_local->tx_cpu_idx = txd_idx;
197 /* add null TXD for transmit */
198 sys_reg_write(QTX_CTX_PTR, get_phy_addr(ei_local, txd_idx));
199 sys_reg_write(QTX_DTX_PTR, get_phy_addr(ei_local, txd_idx));
200
201 /* get free txd from txd pool */
202 txd_idx = get_free_txd(ei_local, 0);
203 ei_local->rls_cpu_idx = txd_idx;
204 /* add null TXD for release */
205 sys_reg_write(QTX_CRX_PTR, get_phy_addr(ei_local, txd_idx));
206 sys_reg_write(QTX_DRX_PTR, get_phy_addr(ei_local, txd_idx));
207
208 /*Reserve 4 TXD for each physical queue */
209 if (ei_local->chip_name == MT7623_FE || ei_local->chip_name == MT7621_FE ||
210 ei_local->chip_name == LEOPARD_FE) {
211 //for (i = 0; i < NUM_PQ; i++)
212 for (i = 0; i < 16; i++)
213 sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
214 (NUM_PQ_RESV | (NUM_PQ_RESV << 8)));
215 }
216
217 sys_reg_write(QTX_SCH_1, 0x80000000);
218#if 0
219 if (ei_local->chip_name == MT7622_FE) {
220 for (i = 0; i < NUM_PQ; i++) {
221 if (i <= 15) {
222 sys_reg_write(QDMA_PAGE, 0);
223 sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
224 (NUM_PQ_RESV |
225 (NUM_PQ_RESV << 8)));
226 } else if (i > 15 && i <= 31) {
227 sys_reg_write(QDMA_PAGE, 1);
228 sys_reg_write(QTX_CFG_0 +
229 QUEUE_OFFSET * (i - 16),
230 (NUM_PQ_RESV |
231 (NUM_PQ_RESV << 8)));
232 } else if (i > 31 && i <= 47) {
233 sys_reg_write(QDMA_PAGE, 2);
234 sys_reg_write(QTX_CFG_0 +
235 QUEUE_OFFSET * (i - 32),
236 (NUM_PQ_RESV |
237 (NUM_PQ_RESV << 8)));
238 } else if (i > 47 && i <= 63) {
239 sys_reg_write(QDMA_PAGE, 3);
240 sys_reg_write(QTX_CFG_0 +
241 QUEUE_OFFSET * (i - 48),
242 (NUM_PQ_RESV |
243 (NUM_PQ_RESV << 8)));
244 }
245 }
246 sys_reg_write(QDMA_PAGE, 0);
247 }
248#endif
249
250 return 1;
251}
252
253bool sfq_init(struct net_device *dev)
254{
255 struct END_DEVICE *ei_local = netdev_priv(dev_raether);
256 unsigned int reg_val;
257 dma_addr_t sfq_phy0;
258 dma_addr_t sfq_phy1;
259 dma_addr_t sfq_phy2;
260 dma_addr_t sfq_phy3;
261 struct SFQ_table *sfq0 = NULL;
262 struct SFQ_table *sfq1 = NULL;
263 struct SFQ_table *sfq2 = NULL;
264 struct SFQ_table *sfq3 = NULL;
265
266 dma_addr_t sfq_phy4;
267 dma_addr_t sfq_phy5;
268 dma_addr_t sfq_phy6;
269 dma_addr_t sfq_phy7;
270 struct SFQ_table *sfq4 = NULL;
271 struct SFQ_table *sfq5 = NULL;
272 struct SFQ_table *sfq6 = NULL;
273 struct SFQ_table *sfq7 = NULL;
274
275 int i = 0;
276
277 reg_val = sys_reg_read(VQTX_GLO);
278 reg_val = reg_val | VQTX_MIB_EN;
279 /* Virtual table extends to 32bytes */
280 sys_reg_write(VQTX_GLO, reg_val);
281 reg_val = sys_reg_read(VQTX_GLO);
282 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
283 sys_reg_write(VQTX_NUM,
284 (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
285 (VQTX_NUM_3) | (VQTX_NUM_4) | (VQTX_NUM_5) |
286 (VQTX_NUM_6) | (VQTX_NUM_7));
287 } else {
288 sys_reg_write(VQTX_NUM,
289 (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
290 (VQTX_NUM_3));
291 }
292
293 /* 10 s change hash algorithm */
294 sys_reg_write(VQTX_HASH_CFG, 0xF002710);
295
296 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE)
297 sys_reg_write(VQTX_VLD_CFG, 0xeca86420);
298 else
299 sys_reg_write(VQTX_VLD_CFG, 0xc840);
300 sys_reg_write(VQTX_HASH_SD, 0x0D);
301 sys_reg_write(QDMA_FC_THRES, 0x9b9b4444);
302 sys_reg_write(QDMA_HRED1, 0);
303 sys_reg_write(QDMA_HRED2, 0);
304 sys_reg_write(QDMA_SRED1, 0);
305 sys_reg_write(QDMA_SRED2, 0);
306 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
307 sys_reg_write(VQTX_0_3_BIND_QID,
308 (VQTX_0_BIND_QID) | (VQTX_1_BIND_QID) |
309 (VQTX_2_BIND_QID) | (VQTX_3_BIND_QID));
310 sys_reg_write(VQTX_4_7_BIND_QID,
311 (VQTX_4_BIND_QID) | (VQTX_5_BIND_QID) |
312 (VQTX_6_BIND_QID) | (VQTX_7_BIND_QID));
313 pr_err("VQTX_0_3_BIND_QID =%x\n",
314 sys_reg_read(VQTX_0_3_BIND_QID));
315 pr_err("VQTX_4_7_BIND_QID =%x\n",
316 sys_reg_read(VQTX_4_7_BIND_QID));
317 }
318
319 sfq0 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
320 VQ_NUM0 * sizeof(struct SFQ_table), &sfq_phy0,
321 GFP_KERNEL);
322
323 memset(sfq0, 0x0, VQ_NUM0 * sizeof(struct SFQ_table));
324 for (i = 0; i < VQ_NUM0; i++) {
325 sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
326 sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
327 }
328 sfq1 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
329 VQ_NUM1 * sizeof(struct SFQ_table), &sfq_phy1,
330 GFP_KERNEL);
331 memset(sfq1, 0x0, VQ_NUM1 * sizeof(struct SFQ_table));
332 for (i = 0; i < VQ_NUM1; i++) {
333 sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
334 sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
335 }
336
337 sfq2 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
338 VQ_NUM2 * sizeof(struct SFQ_table), &sfq_phy2,
339 GFP_KERNEL);
340 memset(sfq2, 0x0, VQ_NUM2 * sizeof(struct SFQ_table));
341 for (i = 0; i < VQ_NUM2; i++) {
342 sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
343 sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
344 }
345
346 sfq3 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
347 VQ_NUM3 * sizeof(struct SFQ_table), &sfq_phy3,
348 GFP_KERNEL);
349 memset(sfq3, 0x0, VQ_NUM3 * sizeof(struct SFQ_table));
350 for (i = 0; i < VQ_NUM3; i++) {
351 sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
352 sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
353 }
354 if (unlikely((!sfq0)) || unlikely((!sfq1)) ||
355 unlikely((!sfq2)) || unlikely((!sfq3))) {
356 pr_err("QDMA SFQ0~3 VQ not available...\n");
357 return 1;
358 }
359 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
360 sfq4 =
361 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
362 VQ_NUM4 * sizeof(struct SFQ_table),
363 &sfq_phy4, GFP_KERNEL);
364 memset(sfq4, 0x0, VQ_NUM4 * sizeof(struct SFQ_table));
365 for (i = 0; i < VQ_NUM4; i++) {
366 sfq4[i].sfq_info1.VQHPTR = 0xdeadbeef;
367 sfq4[i].sfq_info2.VQTPTR = 0xdeadbeef;
368 }
369 sfq5 =
370 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
371 VQ_NUM5 * sizeof(struct SFQ_table),
372 &sfq_phy5, GFP_KERNEL);
373 memset(sfq5, 0x0, VQ_NUM5 * sizeof(struct SFQ_table));
374 for (i = 0; i < VQ_NUM5; i++) {
375 sfq5[i].sfq_info1.VQHPTR = 0xdeadbeef;
376 sfq5[i].sfq_info2.VQTPTR = 0xdeadbeef;
377 }
378 sfq6 =
379 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
380 VQ_NUM6 * sizeof(struct SFQ_table),
381 &sfq_phy6, GFP_KERNEL);
382 memset(sfq6, 0x0, VQ_NUM6 * sizeof(struct SFQ_table));
383 for (i = 0; i < VQ_NUM6; i++) {
384 sfq6[i].sfq_info1.VQHPTR = 0xdeadbeef;
385 sfq6[i].sfq_info2.VQTPTR = 0xdeadbeef;
386 }
387 sfq7 =
388 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
389 VQ_NUM7 * sizeof(struct SFQ_table),
390 &sfq_phy7, GFP_KERNEL);
391 memset(sfq7, 0x0, VQ_NUM7 * sizeof(struct SFQ_table));
392 for (i = 0; i < VQ_NUM7; i++) {
393 sfq7[i].sfq_info1.VQHPTR = 0xdeadbeef;
394 sfq7[i].sfq_info2.VQTPTR = 0xdeadbeef;
395 }
396 if (unlikely((!sfq4)) || unlikely((!sfq5)) ||
397 unlikely((!sfq6)) || unlikely((!sfq7))) {
398 pr_err("QDMA SFQ4~7 VQ not available...\n");
399 return 1;
400 }
401 }
402
403 pr_err("*****sfq_phy0 is 0x%p!!!*******\n", (void *)sfq_phy0);
404 pr_err("*****sfq_phy1 is 0x%p!!!*******\n", (void *)sfq_phy1);
405 pr_err("*****sfq_phy2 is 0x%p!!!*******\n", (void *)sfq_phy2);
406 pr_err("*****sfq_phy3 is 0x%p!!!*******\n", (void *)sfq_phy3);
407 pr_err("*****sfq_virt0 is 0x%p!!!*******\n", sfq0);
408 pr_err("*****sfq_virt1 is 0x%p!!!*******\n", sfq1);
409 pr_err("*****sfq_virt2 is 0x%p!!!*******\n", sfq2);
410 pr_err("*****sfq_virt3 is 0x%p!!!*******\n", sfq3);
411 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
412 pr_err("*****sfq_phy4 is 0x%p!!!*******\n", (void *)sfq_phy4);
413 pr_err("*****sfq_phy5 is 0x%p!!!*******\n", (void *)sfq_phy5);
414 pr_err("*****sfq_phy6 is 0x%p!!!*******\n", (void *)sfq_phy6);
415 pr_err("*****sfq_phy7 is 0x%p!!!*******\n", (void *)sfq_phy7);
416 pr_err("*****sfq_virt4 is 0x%p!!!*******\n", sfq4);
417 pr_err("*****sfq_virt5 is 0x%p!!!*******\n", sfq5);
418 pr_err("*****sfq_virt6 is 0x%p!!!*******\n", sfq6);
419 pr_err("*****sfq_virt7 is 0x%p!!!*******\n", sfq7);
420 }
421
422 sys_reg_write(VQTX_TB_BASE0, (u32)sfq_phy0);
423 sys_reg_write(VQTX_TB_BASE1, (u32)sfq_phy1);
424 sys_reg_write(VQTX_TB_BASE2, (u32)sfq_phy2);
425 sys_reg_write(VQTX_TB_BASE3, (u32)sfq_phy3);
426 if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
427 sys_reg_write(VQTX_TB_BASE4, (u32)sfq_phy4);
428 sys_reg_write(VQTX_TB_BASE5, (u32)sfq_phy5);
429 sys_reg_write(VQTX_TB_BASE6, (u32)sfq_phy6);
430 sys_reg_write(VQTX_TB_BASE7, (u32)sfq_phy7);
431 }
432
433 return 0;
434}
435
436bool fq_qdma_init(struct net_device *dev)
437{
438 struct END_DEVICE *ei_local = netdev_priv(dev);
439 /* struct QDMA_txdesc *free_head = NULL; */
440 dma_addr_t phy_free_head;
441 dma_addr_t phy_free_tail;
442 unsigned int *free_page_head = NULL;
443 dma_addr_t phy_free_page_head;
444 int i;
445
446 free_head = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
447 NUM_QDMA_PAGE *
448 QTXD_LEN, &phy_free_head, GFP_KERNEL);
449
450 if (unlikely(!free_head)) {
451 pr_err("QDMA FQ decriptor not available...\n");
452 return 0;
453 }
454 memset(free_head, 0x0, QTXD_LEN * NUM_QDMA_PAGE);
455
456 free_page_head =
457 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
458 NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
459 &phy_free_page_head, GFP_KERNEL);
460
461 if (unlikely(!free_page_head)) {
462 pr_err("QDMA FQ page not available...\n");
463 return 0;
464 }
465 for (i = 0; i < NUM_QDMA_PAGE; i++) {
466 free_head[i].txd_info1.SDP =
467 (phy_free_page_head + (i * QDMA_PAGE_SIZE));
468 if (i < (NUM_QDMA_PAGE - 1)) {
469 free_head[i].txd_info2.NDP =
470 (phy_free_head + ((i + 1) * QTXD_LEN));
471 }
472 free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
473 }
474 phy_free_tail =
475 (phy_free_head + (u32)((NUM_QDMA_PAGE - 1) * QTXD_LEN));
476
477 pr_err("phy_free_head is 0x%p!!!\n", (void *)phy_free_head);
478 pr_err("phy_free_tail_phy is 0x%p!!!\n", (void *)phy_free_tail);
479 sys_reg_write(QDMA_FQ_HEAD, (u32)phy_free_head);
480 sys_reg_write(QDMA_FQ_TAIL, (u32)phy_free_tail);
481 sys_reg_write(QDMA_FQ_CNT, ((num_tx_desc << 16) | NUM_QDMA_PAGE));
482 sys_reg_write(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
483 pr_info("gmac1_txd_num:%d; gmac2_txd_num:%d; num_tx_desc:%d\n",
484 gmac1_txd_num, gmac2_txd_num, num_tx_desc);
485 ei_local->free_head = free_head;
486 ei_local->phy_free_head = phy_free_head;
487 ei_local->free_page_head = free_page_head;
488 ei_local->phy_free_page_head = phy_free_page_head;
489 ei_local->tx_ring_full = 0;
490 return 1;
491}
492
493int sfq_prot;
494
495#if (sfq_debug)
496int udp_source_port;
497int tcp_source_port;
498int ack_packt;
499#endif
500int sfq_parse_layer_info(struct sk_buff *skb)
501{
502 struct vlan_hdr *vh_sfq = NULL;
503 struct ethhdr *eth_sfq = NULL;
504 struct iphdr *iph_sfq = NULL;
505 struct ipv6hdr *ip6h_sfq = NULL;
506 struct tcphdr *th_sfq = NULL;
507 struct udphdr *uh_sfq = NULL;
508
509 memset(&sfq_parse_result, 0, sizeof(sfq_parse_result));
510 eth_sfq = (struct ethhdr *)skb->data;
511 ether_addr_copy(sfq_parse_result.dmac, eth_sfq->h_dest);
512 ether_addr_copy(sfq_parse_result.smac, eth_sfq->h_source);
513 /* memcpy(sfq_parse_result.dmac, eth_sfq->h_dest, ETH_ALEN); */
514 /* memcpy(sfq_parse_result.smac, eth_sfq->h_source, ETH_ALEN); */
515 sfq_parse_result.eth_type = eth_sfq->h_proto;
516
517 if (sfq_parse_result.eth_type == htons(ETH_P_8021Q)) {
518 sfq_parse_result.vlan1_gap = VLAN_HLEN;
519 vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
520 sfq_parse_result.eth_type = vh_sfq->h_vlan_encapsulated_proto;
521 } else {
522 sfq_parse_result.vlan1_gap = 0;
523 }
524
525 /* set layer4 start addr */
526 if ((sfq_parse_result.eth_type == htons(ETH_P_IP)) ||
527 (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
528 sfq_parse_result.ppp_tag == htons(PPP_IP))) {
529 iph_sfq =
530 (struct iphdr *)(skb->data + ETH_HLEN +
531 (sfq_parse_result.vlan1_gap));
532
533 /* prepare layer3/layer4 info */
534 memcpy(&sfq_parse_result.iph, iph_sfq, sizeof(struct iphdr));
535 if (iph_sfq->protocol == IPPROTO_TCP) {
536 th_sfq =
537 (struct tcphdr *)(skb->data + ETH_HLEN +
538 (sfq_parse_result.vlan1_gap) +
539 (iph_sfq->ihl * 4));
540 memcpy(&sfq_parse_result.th, th_sfq,
541 sizeof(struct tcphdr));
542#if (sfq_debug)
543 tcp_source_port = ntohs(sfq_parse_result.th.source);
544 udp_source_port = 0;
545 /* tcp ack packet */
546 if (ntohl(sfq_parse_result.iph.saddr) == 0xa0a0a04)
547 ack_packt = 1;
548 else
549 ack_packt = 0;
550#endif
551 sfq_prot = 2; /* IPV4_HNAPT */
552 if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
553 return 1;
554 } else if (iph_sfq->protocol == IPPROTO_UDP) {
555 uh_sfq =
556 (struct udphdr *)(skb->data + ETH_HLEN +
557 (sfq_parse_result.vlan1_gap) +
558 iph_sfq->ihl * 4);
559 memcpy(&sfq_parse_result.uh, uh_sfq,
560 sizeof(struct udphdr));
561#if (sfq_debug)
562 udp_source_port = ntohs(sfq_parse_result.uh.source);
563 tcp_source_port = 0;
564 ack_packt = 0;
565#endif
566 sfq_prot = 2; /* IPV4_HNAPT */
567 if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
568 return 1;
569 } else {
570 sfq_prot = 1;
571 }
572 } else if (sfq_parse_result.eth_type == htons(ETH_P_IPV6) ||
573 (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
574 sfq_parse_result.ppp_tag == htons(PPP_IPV6))) {
575 ip6h_sfq =
576 (struct ipv6hdr *)(skb->data + ETH_HLEN +
577 (sfq_parse_result.vlan1_gap));
578 if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
579 sfq_prot = 4; /* IPV6_5T */
580#if (sfq_debug)
581 if (ntohl(sfq_parse_result.ip6h.saddr.s6_addr32[3]) ==
582 8)
583 ack_packt = 1;
584 else
585 ack_packt = 0;
586#endif
587 } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
588#if (sfq_debug)
589 ack_packt = 0;
590#endif
591 sfq_prot = 4; /* IPV6_5T */
592
593 } else {
594 sfq_prot = 3; /* IPV6_3T */
595 }
596 }
597 return 0;
598}
599
600int rt2880_qdma_eth_send(struct END_DEVICE *ei_local, struct net_device *dev,
601 struct sk_buff *skb, int gmac_no, int ring_no)
602{
603 unsigned int length = skb->len;
604 struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
605 struct QDMA_txdesc dummy_desc;
606 struct PSEUDO_ADAPTER *p_ad;
607 unsigned long flags;
608 unsigned int next_txd_idx, qidx;
609
610 cpu_ptr = &dummy_desc;
611 /* 2. prepare data */
612 dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
613 virt_to_phys(skb->data),
614 skb->len, DMA_TO_DEVICE);
615 /* cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data); */
616 cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
617 cpu_ptr->txd_info3.SDL = skb->len;
618 if (ei_local->features & FE_HW_SFQ) {
619 sfq_parse_layer_info(skb);
620 cpu_ptr->txd_info5.VQID0 = 1; /* 1:HW hash 0:CPU */
621 cpu_ptr->txd_info5.PROT = sfq_prot;
622 /* no vlan */
623 cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
624 }
625 cpu_ptr->txd_info4.FPORT = gmac_no;
626
627 if (ei_local->features & FE_CSUM_OFFLOAD) {
628 if (skb->ip_summed == CHECKSUM_PARTIAL)
629 cpu_ptr->txd_info5.TUI_CO = 7;
630 else
631 cpu_ptr->txd_info5.TUI_CO = 0;
632 }
633
634 if (ei_local->features & FE_HW_VLAN_TX) {
635 if (skb_vlan_tag_present(skb)) {
636 cpu_ptr->txd_info6.INSV_1 = 1;
637 cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
638 cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
639 } else {
640 cpu_ptr->txd_info4.QID = ring_no;
641 cpu_ptr->txd_info6.INSV_1 = 0;
642 cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
643 }
644 } else {
645 cpu_ptr->txd_info6.INSV_1 = 0;
646 cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
647 }
648 cpu_ptr->txd_info4.QID = 0;
649 /* cpu_ptr->txd_info4.QID = ring_no; */
650
651 if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
652 if (skb->mark < 64) {
653 qidx = M2Q_table[skb->mark];
654 cpu_ptr->txd_info4.QID = ((qidx & 0x30) >> 4);
655 cpu_ptr->txd_info4.QID = (qidx & 0x0f);
656 } else {
657 pr_debug("skb->mark out of range\n");
658 cpu_ptr->txd_info4.QID = 0;
659 cpu_ptr->txd_info4.QID = 0;
660 }
661 }
662 /* QoS Web UI used */
663 if ((ei_local->features & QDMA_QOS_WEB) && (lan_wan_separate == 1)) {
664 if (web_sfq_enable == 1 && (skb->mark == 2)) {
665 if (gmac_no == 1)
666 cpu_ptr->txd_info4.QID = HW_SFQ_DL;
667 else
668 cpu_ptr->txd_info4.QID = HW_SFQ_UP;
669 } else if (gmac_no == 2) {
670 cpu_ptr->txd_info4.QID += 8;
671 }
672 }
673#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
674 if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
675 if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
676 if (ppe_hook_rx_eth) {
677 cpu_ptr->txd_info4.FPORT = 3; /* PPE */
678 FOE_MAGIC_TAG(skb) = 0;
679 }
680 }
681 } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
682 if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
683 if (ppe_hook_rx_eth) {
684 cpu_ptr->txd_info4.FPORT = 3; /* PPE */
685 FOE_MAGIC_TAG(skb) = 0;
686 }
687 }
688 }
689#endif
690
691 /* dma_sync_single_for_device(NULL, virt_to_phys(skb->data), */
692 /* skb->len, DMA_TO_DEVICE); */
693 cpu_ptr->txd_info4.SWC = 1;
694
695 /* 5. move CPU_PTR to new TXD */
696 cpu_ptr->txd_info5.TSO = 0;
697 cpu_ptr->txd_info3.LS = 1;
698 cpu_ptr->txd_info3.DDONE = 0;
699 next_txd_idx = get_free_txd(ei_local, ring_no);
700 cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, next_txd_idx);
701 spin_lock_irqsave(&ei_local->page_lock, flags);
702 prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
703 /* update skb_free */
704 ei_local->skb_free[ei_local->tx_cpu_idx] = skb;
705 /* update tx cpu idx */
706 ei_local->tx_cpu_idx = next_txd_idx;
707 /* update txd info */
708 prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
709 prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
710 prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
711 prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
712 prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
713 prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
714 prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
715 /* NOTE: add memory barrier to avoid
716 * DMA access memory earlier than memory written
717 */
718 wmb();
719 /* update CPU pointer */
720 sys_reg_write(QTX_CTX_PTR,
721 get_phy_addr(ei_local, ei_local->tx_cpu_idx));
722 spin_unlock_irqrestore(&ei_local->page_lock, flags);
723
724 if (ei_local->features & FE_GE2_SUPPORT) {
725 if (gmac_no == 2) {
726 if (ei_local->pseudo_dev) {
727 p_ad = netdev_priv(ei_local->pseudo_dev);
728 p_ad->stat.tx_packets++;
729
730 p_ad->stat.tx_bytes += length;
731 }
732 } else {
733 ei_local->stat.tx_packets++;
734 ei_local->stat.tx_bytes += skb->len;
735 }
736 } else {
737 ei_local->stat.tx_packets++;
738 ei_local->stat.tx_bytes += skb->len;
739 }
740 if (ei_local->features & FE_INT_NAPI) {
741 if (ei_local->tx_full == 1) {
742 ei_local->tx_full = 0;
743 netif_wake_queue(dev);
744 }
745 }
746
747 return length;
748}
749
750int rt2880_qdma_eth_send_tso(struct END_DEVICE *ei_local,
751 struct net_device *dev, struct sk_buff *skb,
752 int gmac_no, int ring_no)
753{
754 unsigned int length = skb->len;
755 struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
756 struct QDMA_txdesc dummy_desc;
757 struct QDMA_txdesc init_dummy_desc;
758 int ctx_idx;
759 struct iphdr *iph = NULL;
760 struct QDMA_txdesc *init_cpu_ptr;
761 struct tcphdr *th = NULL;
762 skb_frag_t * frag;
763 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
764 unsigned int len, size, frag_txd_num, qidx;
765 dma_addr_t offset;
766 unsigned long flags;
767 int i;
768 int init_qid, init_qid1;
769 struct ipv6hdr *ip6h = NULL;
770 struct PSEUDO_ADAPTER *p_ad;
771
772 init_cpu_ptr = &init_dummy_desc;
773 cpu_ptr = &init_dummy_desc;
774
775 len = length - skb->data_len;
776 dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
777 virt_to_phys(skb->data),
778 len,
779 DMA_TO_DEVICE);
780 offset = virt_to_phys(skb->data);
781 cpu_ptr->txd_info1.SDP = offset;
782 if (len > MAX_QTXD_LEN) {
783 cpu_ptr->txd_info3.SDL = MAX_QTXD_LEN;
784 cpu_ptr->txd_info3.LS = 0;
785 len -= MAX_QTXD_LEN;
786 offset += MAX_QTXD_LEN;
787 } else {
788 cpu_ptr->txd_info3.SDL = len;
789 cpu_ptr->txd_info3.LS = nr_frags ? 0 : 1;
790 len = 0;
791 }
792 if (ei_local->features & FE_HW_SFQ) {
793 sfq_parse_layer_info(skb);
794
795 cpu_ptr->txd_info5.VQID0 = 1;
796 cpu_ptr->txd_info5.PROT = sfq_prot;
797 /* no vlan */
798 cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
799 }
800 if (gmac_no == 1)
801 cpu_ptr->txd_info4.FPORT = 1;
802 else
803 cpu_ptr->txd_info4.FPORT = 2;
804
805 cpu_ptr->txd_info5.TSO = 0;
806 cpu_ptr->txd_info4.QID = 0;
807 /* cpu_ptr->txd_info4.QID = ring_no; */
808 if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
809 if (skb->mark < 64) {
810 qidx = M2Q_table[skb->mark];
811 cpu_ptr->txd_info4.QID = qidx;
812
813 } else {
814 pr_debug("skb->mark out of range\n");
815 cpu_ptr->txd_info4.QID = 0;
816
817 }
818 }
819 if (ei_local->features & FE_CSUM_OFFLOAD) {
820 if (skb->ip_summed == CHECKSUM_PARTIAL)
821 cpu_ptr->txd_info5.TUI_CO = 7;
822 else
823 cpu_ptr->txd_info5.TUI_CO = 0;
824 }
825
826 if (ei_local->features & FE_HW_VLAN_TX) {
827 if (skb_vlan_tag_present(skb)) {
828 cpu_ptr->txd_info6.INSV_1 = 1;
829 cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
830 cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
831 } else {
832 cpu_ptr->txd_info4.QID = ring_no;
833 cpu_ptr->txd_info6.INSV_1 = 0;
834 cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
835 }
836 } else {
837 cpu_ptr->txd_info6.INSV_1 = 0;
838 cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
839 }
840
841 if ((ei_local->features & FE_GE2_SUPPORT) && (lan_wan_separate == 1)) {
842 if (web_sfq_enable == 1 && (skb->mark == 2)) {
843 if (gmac_no == 1)
844 cpu_ptr->txd_info4.QID = HW_SFQ_DL;
845 else
846 cpu_ptr->txd_info4.QID = HW_SFQ_UP;
847 } else if (gmac_no == 2) {
848 cpu_ptr->txd_info4.QID += 8;
849 }
850 }
851 /*debug multi tx queue */
852 init_qid = cpu_ptr->txd_info4.QID;
853 init_qid1 = cpu_ptr->txd_info4.QID;
854#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
855 if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
856 if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
857 if (ppe_hook_rx_eth) {
858 cpu_ptr->txd_info4.FPORT = 3; /* PPE */
859 FOE_MAGIC_TAG(skb) = 0;
860 }
861 }
862 } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
863 if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
864 if (ppe_hook_rx_eth) {
865 cpu_ptr->txd_info4.FPORT = 3; /* PPE */
866 FOE_MAGIC_TAG(skb) = 0;
867 }
868 }
869 }
870#endif
871
872 cpu_ptr->txd_info4.SWC = 1;
873
874 ctx_idx = get_free_txd(ei_local, ring_no);
875 cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, ctx_idx);
876 /*prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
877 *prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
878 *prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
879 *prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
880 */
881 if (len > 0) {
882 frag_txd_num = cal_frag_txd_num(len);
883 for (frag_txd_num = frag_txd_num; frag_txd_num > 0;
884 frag_txd_num--) {
885 if (len < MAX_QTXD_LEN)
886 size = len;
887 else
888 size = MAX_QTXD_LEN;
889
890 cpu_ptr = (ei_local->txd_pool + (ctx_idx));
891 dummy_desc.txd_info1 = cpu_ptr->txd_info1;
892 dummy_desc.txd_info2 = cpu_ptr->txd_info2;
893 dummy_desc.txd_info3 = cpu_ptr->txd_info3;
894 dummy_desc.txd_info4 = cpu_ptr->txd_info4;
895 dummy_desc.txd_info5 = cpu_ptr->txd_info5;
896 dummy_desc.txd_info6 = cpu_ptr->txd_info6;
897 dummy_desc.txd_info7 = cpu_ptr->txd_info7;
898 prev_cpu_ptr = cpu_ptr;
899 cpu_ptr = &dummy_desc;
900 cpu_ptr->txd_info4.QID = init_qid;
901 cpu_ptr->txd_info4.QID = init_qid1;
902 cpu_ptr->txd_info1.SDP = offset;
903 cpu_ptr->txd_info3.SDL = size;
904 if ((nr_frags == 0) && (frag_txd_num == 1))
905 cpu_ptr->txd_info3.LS = 1;
906 else
907 cpu_ptr->txd_info3.LS = 0;
908 cpu_ptr->txd_info3.DDONE = 0;
909 cpu_ptr->txd_info4.SWC = 1;
910 if (cpu_ptr->txd_info3.LS == 1)
911 ei_local->skb_free[ctx_idx] = skb;
912 else
913 ei_local->skb_free[ctx_idx] = magic_id;
914 ctx_idx = get_free_txd(ei_local, ring_no);
915 cpu_ptr->txd_info2.NDP =
916 get_phy_addr(ei_local, ctx_idx);
917 prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
918 prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
919 prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
920 prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
921 prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
922 prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
923 prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
924 offset += size;
925 len -= size;
926 }
927 }
928
929 for (i = 0; i < nr_frags; i++) {
930 /* 1. set or get init value for current fragment */
931 offset = 0;
932 frag = &skb_shinfo(skb)->frags[i];
933 len = skb_frag_size(frag);
934 frag_txd_num = cal_frag_txd_num(len);
935 for (frag_txd_num = frag_txd_num;
936 frag_txd_num > 0; frag_txd_num--) {
937 /* 2. size will be assigned to SDL
938 * and can't be larger than MAX_TXD_LEN
939 */
940 if (len < MAX_QTXD_LEN)
941 size = len;
942 else
943 size = MAX_QTXD_LEN;
944
945 /* 3. Update TXD info */
946 cpu_ptr = (ei_local->txd_pool + (ctx_idx));
947 dummy_desc.txd_info1 = cpu_ptr->txd_info1;
948 dummy_desc.txd_info2 = cpu_ptr->txd_info2;
949 dummy_desc.txd_info3 = cpu_ptr->txd_info3;
950 dummy_desc.txd_info4 = cpu_ptr->txd_info4;
951 dummy_desc.txd_info5 = cpu_ptr->txd_info5;
952 dummy_desc.txd_info6 = cpu_ptr->txd_info6;
953 dummy_desc.txd_info7 = cpu_ptr->txd_info7;
954 prev_cpu_ptr = cpu_ptr;
955 cpu_ptr = &dummy_desc;
956 cpu_ptr->txd_info4.QID = init_qid;
957 cpu_ptr->txd_info4.QID = init_qid1;
958 cpu_ptr->txd_info1.SDP = skb_frag_dma_map(&ei_local->qdma_pdev->dev, frag, offset, size, DMA_TO_DEVICE);
959 if (unlikely(dma_mapping_error
960 (&ei_local->qdma_pdev->dev,
961 cpu_ptr->txd_info1.SDP)))
962 pr_err("[%s]dma_map_page() failed...\n",
963 __func__);
964
965 cpu_ptr->txd_info3.SDL = size;
966
967 if ((i == (nr_frags - 1)) && (frag_txd_num == 1))
968 cpu_ptr->txd_info3.LS = 1;
969 else
970 cpu_ptr->txd_info3.LS = 0;
971 cpu_ptr->txd_info3.DDONE = 0;
972 cpu_ptr->txd_info4.SWC = 1;
973 /* 4. Update skb_free for housekeeping */
974 if (cpu_ptr->txd_info3.LS == 1)
975 ei_local->skb_free[ctx_idx] = skb;
976 else
977 ei_local->skb_free[ctx_idx] = magic_id;
978
979 /* 5. Get next TXD */
980 ctx_idx = get_free_txd(ei_local, ring_no);
981 cpu_ptr->txd_info2.NDP =
982 get_phy_addr(ei_local, ctx_idx);
983 prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
984 prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
985 prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
986 prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
987 prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
988 prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
989 prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
990 /* 6. Update offset and len. */
991 offset += size;
992 len -= size;
993 }
994 }
995
996 if (skb_shinfo(skb)->gso_segs > 1) {
997 /* TsoLenUpdate(skb->len); */
998
999 /* TCP over IPv4 */
1000 iph = (struct iphdr *)skb_network_header(skb);
1001 if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
1002 th = (struct tcphdr *)skb_transport_header(skb);
1003
1004 init_cpu_ptr->txd_info5.TSO = 1;
1005
1006 th->check = htons(skb_shinfo(skb)->gso_size);
1007
1008 dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
1009 virt_to_phys(th),
1010 sizeof(struct
1011 tcphdr),
1012 DMA_TO_DEVICE);
1013 }
1014 if (ei_local->features & FE_TSO_V6) {
1015 ip6h = (struct ipv6hdr *)skb_network_header(skb);
1016 if ((ip6h->nexthdr == NEXTHDR_TCP) &&
1017 (ip6h->version == 6)) {
1018 th = (struct tcphdr *)skb_transport_header(skb);
1019 init_cpu_ptr->txd_info5.TSO = 1;
1020 th->check = htons(skb_shinfo(skb)->gso_size);
1021 dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
1022 virt_to_phys(th),
1023 sizeof(struct
1024 tcphdr),
1025 DMA_TO_DEVICE);
1026 }
1027 }
1028
1029 if (ei_local->features & FE_HW_SFQ) {
1030 init_cpu_ptr->txd_info5.VQID0 = 1;
1031 init_cpu_ptr->txd_info5.PROT = sfq_prot;
1032 /* no vlan */
1033 init_cpu_ptr->txd_info5.IPOFST =
1034 14 + (sfq_parse_result.vlan1_gap);
1035 }
1036 }
1037 /* dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); */
1038
1039 init_cpu_ptr->txd_info3.DDONE = 0;
1040 spin_lock_irqsave(&ei_local->page_lock, flags);
1041 prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
1042 ei_local->skb_free[ei_local->tx_cpu_idx] = magic_id;
1043 ei_local->tx_cpu_idx = ctx_idx;
1044 prev_cpu_ptr->txd_info1 = init_dummy_desc.txd_info1;
1045 prev_cpu_ptr->txd_info2 = init_dummy_desc.txd_info2;
1046 prev_cpu_ptr->txd_info4 = init_dummy_desc.txd_info4;
1047 prev_cpu_ptr->txd_info3 = init_dummy_desc.txd_info3;
1048 prev_cpu_ptr->txd_info5 = init_dummy_desc.txd_info5;
1049 prev_cpu_ptr->txd_info6 = init_dummy_desc.txd_info6;
1050 prev_cpu_ptr->txd_info7 = init_dummy_desc.txd_info7;
1051
1052 /* NOTE: add memory barrier to avoid
1053 * DMA access memory earlier than memory written
1054 */
1055 wmb();
1056 sys_reg_write(QTX_CTX_PTR,
1057 get_phy_addr(ei_local, ei_local->tx_cpu_idx));
1058 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1059
1060 if (ei_local->features & FE_GE2_SUPPORT) {
1061 if (gmac_no == 2) {
1062 if (ei_local->pseudo_dev) {
1063 p_ad = netdev_priv(ei_local->pseudo_dev);
1064 p_ad->stat.tx_packets++;
1065 p_ad->stat.tx_bytes += length;
1066 }
1067 } else {
1068 ei_local->stat.tx_packets++;
1069 ei_local->stat.tx_bytes += skb->len;
1070 }
1071 } else {
1072 ei_local->stat.tx_packets++;
1073 ei_local->stat.tx_bytes += skb->len;
1074 }
1075 if (ei_local->features & FE_INT_NAPI) {
1076 if (ei_local->tx_full == 1) {
1077 ei_local->tx_full = 0;
1078 netif_wake_queue(dev);
1079 }
1080 }
1081
1082 return length;
1083}
1084
1085/* QDMA functions */
1086int fe_qdma_wait_dma_idle(void)
1087{
1088 unsigned int reg_val;
1089
1090 while (1) {
1091 reg_val = sys_reg_read(QDMA_GLO_CFG);
1092 if ((reg_val & RX_DMA_BUSY)) {
1093 pr_err("\n RX_DMA_BUSY !!! ");
1094 continue;
1095 }
1096 if ((reg_val & TX_DMA_BUSY)) {
1097 pr_err("\n TX_DMA_BUSY !!! ");
1098 continue;
1099 }
1100 return 0;
1101 }
1102
1103 return -1;
1104}
1105
1106int fe_qdma_rx_dma_init(struct net_device *dev)
1107{
1108 int i;
1109 struct END_DEVICE *ei_local = netdev_priv(dev);
1110 unsigned int skb_size;
1111 /* Initial QDMA RX Ring */
1112
1113 skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
1114 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1115
1116 ei_local->qrx_ring =
1117 dma_alloc_coherent(&ei_local->qdma_pdev->dev,
1118 NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
1119 &ei_local->phy_qrx_ring,
1120 GFP_ATOMIC | __GFP_ZERO);
1121 for (i = 0; i < NUM_QRX_DESC; i++) {
1122 ei_local->netrx0_skb_data[i] =
1123 raeth_alloc_skb_data(skb_size, GFP_KERNEL);
1124 if (!ei_local->netrx0_skb_data[i]) {
1125 pr_err("rx skbuff buffer allocation failed!");
1126 goto no_rx_mem;
1127 }
1128
1129 memset(&ei_local->qrx_ring[i], 0, sizeof(struct PDMA_rxdesc));
1130 ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
1131 ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
1132 ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
1133 ei_local->qrx_ring[i].rxd_info1.PDP0 =
1134 dma_map_single(&ei_local->qdma_pdev->dev,
1135 ei_local->netrx0_skb_data[i] +
1136 NET_SKB_PAD,
1137 MAX_RX_LENGTH,
1138 DMA_FROM_DEVICE);
1139 if (unlikely
1140 (dma_mapping_error
1141 (&ei_local->qdma_pdev->dev,
1142 ei_local->qrx_ring[i].rxd_info1.PDP0))) {
1143 pr_err("[%s]dma_map_single() failed...\n", __func__);
1144 goto no_rx_mem;
1145 }
1146 }
1147 pr_err("\nphy_qrx_ring = 0x%p, qrx_ring = 0x%p\n",
1148 (void *)ei_local->phy_qrx_ring, ei_local->qrx_ring);
1149
1150 /* Tell the adapter where the RX rings are located. */
1151 sys_reg_write(QRX_BASE_PTR_0,
1152 phys_to_bus((u32)ei_local->phy_qrx_ring));
1153 sys_reg_write(QRX_MAX_CNT_0, cpu_to_le32((u32)NUM_QRX_DESC));
1154 sys_reg_write(QRX_CRX_IDX_0, cpu_to_le32((u32)(NUM_QRX_DESC - 1)));
1155
1156 sys_reg_write(QDMA_RST_CFG, PST_DRX_IDX0);
1157 ei_local->rx_ring[0] = ei_local->qrx_ring;
1158
1159 return 0;
1160
1161no_rx_mem:
1162 return -ENOMEM;
1163}
1164
1165int fe_qdma_tx_dma_init(struct net_device *dev)
1166{
1167 bool pass;
1168 struct END_DEVICE *ei_local = netdev_priv(dev_raether);
1169
1170 if (ei_local->features & FE_HW_SFQ)
1171 sfq_init(dev);
1172 /*tx desc alloc, add a NULL TXD to HW */
1173 pass = qdma_tx_desc_alloc();
1174 if (!pass)
1175 return -1;
1176
1177 pass = fq_qdma_init(dev);
1178 if (!pass)
1179 return -1;
1180
1181 return 0;
1182}
1183
1184void fe_qdma_rx_dma_deinit(struct net_device *dev)
1185{
1186 struct END_DEVICE *ei_local = netdev_priv(dev);
1187 int i;
1188
1189 /* free RX Ring */
1190 dma_free_coherent(&ei_local->qdma_pdev->dev,
1191 NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
1192 ei_local->qrx_ring, ei_local->phy_qrx_ring);
1193
1194 /* free RX skb */
1195 for (i = 0; i < NUM_QRX_DESC; i++) {
1196 raeth_free_skb_data(ei_local->netrx0_skb_data[i]);
1197 ei_local->netrx0_skb_data[i] = NULL;
1198 }
1199}
1200
1201void fe_qdma_tx_dma_deinit(struct net_device *dev)
1202{
1203 struct END_DEVICE *ei_local = netdev_priv(dev);
1204 int i;
1205
1206 /* free TX Ring */
1207 if (ei_local->txd_pool)
1208 dma_free_coherent(&ei_local->qdma_pdev->dev,
1209 num_tx_desc * QTXD_LEN,
1210 ei_local->txd_pool, ei_local->phy_txd_pool);
1211 if (ei_local->free_head)
1212 dma_free_coherent(&ei_local->qdma_pdev->dev,
1213 NUM_QDMA_PAGE * QTXD_LEN,
1214 ei_local->free_head, ei_local->phy_free_head);
1215 if (ei_local->free_page_head)
1216 dma_free_coherent(&ei_local->qdma_pdev->dev,
1217 NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
1218 ei_local->free_page_head,
1219 ei_local->phy_free_page_head);
1220
1221 /* free TX data */
1222 for (i = 0; i < num_tx_desc; i++) {
1223 if ((ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF) &&
1224 (ei_local->skb_free[i] != 0))
1225 dev_kfree_skb_any(ei_local->skb_free[i]);
1226 }
1227}
1228
1229void set_fe_qdma_glo_cfg(void)
1230{
1231 unsigned int reg_val;
1232 unsigned int dma_glo_cfg = 0;
1233 struct END_DEVICE *ei_local = netdev_priv(dev_raether);
1234
1235 reg_val = sys_reg_read(QDMA_GLO_CFG);
1236 reg_val &= 0x000000FF;
1237
1238 sys_reg_write(QDMA_GLO_CFG, reg_val);
1239 reg_val = sys_reg_read(QDMA_GLO_CFG);
1240
1241 /* Enable randon early drop and set drop threshold automatically */
1242 if (!(ei_local->features & FE_HW_SFQ))
1243 sys_reg_write(QDMA_FC_THRES, 0x4444);
1244 sys_reg_write(QDMA_HRED2, 0x0);
1245
1246 dma_glo_cfg =
1247 (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | PDMA_DESC_32B_E);
1248 dma_glo_cfg |= (RX_2B_OFFSET);
1249 sys_reg_write(QDMA_GLO_CFG, dma_glo_cfg);
1250
1251 pr_err("Enable QDMA TX NDP coherence check and re-read mechanism\n");
1252 reg_val = sys_reg_read(QDMA_GLO_CFG);
1253 reg_val = reg_val | 0x400 | 0x100000;
1254 sys_reg_write(QDMA_GLO_CFG, reg_val);
1255 //sys_reg_write(QDMA_GLO_CFG, 0x95404575);
1256 sys_reg_write(QDMA_GLO_CFG, 0x95404475);
1257 pr_err("***********QDMA_GLO_CFG=%x\n", sys_reg_read(QDMA_GLO_CFG));
1258}
1259
1260int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
1261{
1262 struct END_DEVICE *ei_local = netdev_priv(dev);
1263 unsigned int num_of_txd = 0;
1264 unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
1265 skb_frag_t * frag;
1266 struct PSEUDO_ADAPTER *p_ad;
1267 int ring_no;
1268
1269 ring_no = skb->queue_mapping + (gmac_no - 1) * gmac1_txq_num;
1270
1271#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
1272 if (ppe_hook_tx_eth) {
1273 if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
1274 dev_kfree_skb_any(skb);
1275 return 0;
1276 }
1277 }
1278#endif
1279
1280// dev->trans_start = jiffies; /* save the timestamp */
1281 netif_trans_update(dev);
1282 /*spin_lock_irqsave(&ei_local->page_lock, flags); */
1283
1284 /* check free_txd_num before calling rt288_eth_send() */
1285
1286 if (ei_local->features & FE_TSO) {
1287 num_of_txd += cal_frag_txd_num(skb->len - skb->data_len);
1288 if (nr_frags != 0) {
1289 for (i = 0; i < nr_frags; i++) {
1290 frag = &skb_shinfo(skb)->frags[i];
1291 num_of_txd += cal_frag_txd_num(skb_frag_size(frag));
1292 }
1293 }
1294 } else {
1295 num_of_txd = 1;
1296 }
1297
1298/* if ((ei_local->free_txd_num > num_of_txd + 1)) { */
1299 if (likely(atomic_read(&ei_local->free_txd_num[ring_no]) >
1300 (num_of_txd + 1))) {
1301 if (num_of_txd == 1)
1302 rt2880_qdma_eth_send(ei_local, dev, skb,
1303 gmac_no, ring_no);
1304 else
1305 rt2880_qdma_eth_send_tso(ei_local, dev, skb,
1306 gmac_no, ring_no);
1307 } else {
1308 if (ei_local->features & FE_GE2_SUPPORT) {
1309 if (gmac_no == 2) {
1310 if (ei_local->pseudo_dev) {
1311 p_ad =
1312 netdev_priv(ei_local->pseudo_dev);
1313 p_ad->stat.tx_dropped++;
1314 }
1315 } else {
1316 ei_local->stat.tx_dropped++;
1317 }
1318 } else {
1319 ei_local->stat.tx_dropped++;
1320 }
1321 /* kfree_skb(skb); */
1322 dev_kfree_skb_any(skb);
1323 /* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
1324 return 0;
1325 }
1326 /* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
1327 return 0;
1328}
1329
1330int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget)
1331{
1332 struct END_DEVICE *ei_local = netdev_priv(netdev);
1333
1334 dma_addr_t dma_ptr;
1335 struct QDMA_txdesc *cpu_ptr = NULL;
1336 dma_addr_t tmp_ptr;
1337 unsigned int ctx_offset = 0;
1338 unsigned int dtx_offset = 0;
1339 unsigned int rls_cnt[TOTAL_TXQ_NUM] = { 0 };
1340 int ring_no;
1341 int i;
1342
1343 dma_ptr = (dma_addr_t)sys_reg_read(QTX_DRX_PTR);
1344 ctx_offset = ei_local->rls_cpu_idx;
1345 dtx_offset = (dma_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
1346 cpu_ptr = (ei_local->txd_pool + (ctx_offset));
1347 while (ctx_offset != dtx_offset) {
1348 /* 1. keep cpu next TXD */
1349 tmp_ptr = (dma_addr_t)cpu_ptr->txd_info2.NDP;
1350 ring_no = ring_no_mapping(ctx_offset);
1351 rls_cnt[ring_no]++;
1352 /* 2. release TXD */
1353 ei_local->txd_pool_info[ei_local->free_txd_tail[ring_no]] =
1354 ctx_offset;
1355 ei_local->free_txd_tail[ring_no] = ctx_offset;
1356 /* atomic_add(1, &ei_local->free_txd_num[ring_no]); */
1357 /* 3. update ctx_offset and free skb memory */
1358 ctx_offset = (tmp_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
1359 if (ei_local->features & FE_TSO) {
1360 if (ei_local->skb_free[ctx_offset] != magic_id) {
1361 dev_kfree_skb_any(ei_local->skb_free
1362 [ctx_offset]);
1363 }
1364 } else {
1365 dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
1366 }
1367 ei_local->skb_free[ctx_offset] = 0;
1368 /* 4. update cpu_ptr */
1369 cpu_ptr = (ei_local->txd_pool + ctx_offset);
1370 }
1371 for (i = 0; i < TOTAL_TXQ_NUM; i++) {
1372 if (rls_cnt[i] > 0)
1373 atomic_add(rls_cnt[i], &ei_local->free_txd_num[i]);
1374 }
1375 /* atomic_add(rls_cnt, &ei_local->free_txd_num[0]); */
1376 ei_local->rls_cpu_idx = ctx_offset;
1377 netif_wake_queue(netdev);
1378 if (ei_local->features & FE_GE2_SUPPORT)
1379 netif_wake_queue(ei_local->pseudo_dev);
1380 ei_local->tx_ring_full = 0;
1381 sys_reg_write(QTX_CRX_PTR,
1382 (ei_local->phy_txd_pool + (ctx_offset * QTXD_LEN)));
1383
1384 return 0;
1385}
1386
1387int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
1388 struct qdma_ioctl_data *data)
1389{
1390 int ret = 0;
1391 struct END_DEVICE *ei_local = netdev_priv(dev);
1392 unsigned int cmd;
1393
1394 cmd = data->cmd;
1395
1396 switch (cmd) {
1397 case RAETH_QDMA_REG_READ:
1398
1399 if (data->off > REG_HQOS_MAX) {
1400 ret = -EINVAL;
1401 break;
1402 }
1403
1404 if (ei_local->chip_name == MT7622_FE) { /* harry */
1405 unsigned int page = 0;
1406
1407 /* q16~q31: 0x100 <= data->off < 0x200
1408 * q32~q47: 0x200 <= data->off < 0x300
1409 * q48~q63: 0x300 <= data->off < 0x400
1410 */
1411 if (data->off >= 0x100 && data->off < 0x200) {
1412 page = 1;
1413 data->off = data->off - 0x100;
1414 } else if (data->off >= 0x200 && data->off < 0x300) {
1415 page = 2;
1416 data->off = data->off - 0x200;
1417 } else if (data->off >= 0x300 && data->off < 0x400) {
1418 page = 3;
1419 data->off = data->off - 0x300;
1420 } else {
1421 page = 0;
1422 }
1423 /*magic number for ioctl identify CR 0x1b101a14*/
1424 if (data->off == 0x777) {
1425 page = 0;
1426 data->off = 0x214;
1427 }
1428
1429 sys_reg_write(QDMA_PAGE, page);
1430 /* pr_debug("page=%d, data->off =%x\n", page, data->off); */
1431 }
1432
1433 data->val = sys_reg_read(QTX_CFG_0 + data->off);
1434 pr_info("read reg off:%x val:%x\n", data->off, data->val);
1435 ret = copy_to_user(ifr->ifr_data, data, sizeof(*data));
1436 sys_reg_write(QDMA_PAGE, 0);
1437 if (ret) {
1438 pr_info("ret=%d\n", ret);
1439 ret = -EFAULT;
1440 }
1441 break;
1442 case RAETH_QDMA_REG_WRITE:
1443
1444 if (data->off > REG_HQOS_MAX) {
1445 ret = -EINVAL;
1446 break;
1447 }
1448
1449 if (ei_local->chip_name == MT7622_FE) { /* harry */
1450 unsigned int page = 0;
1451 /*QoS must enable QDMA drop packet policy*/
1452 sys_reg_write(QDMA_FC_THRES, 0x83834444);
1453 /* q16~q31: 0x100 <= data->off < 0x200
1454 * q32~q47: 0x200 <= data->off < 0x300
1455 * q48~q63: 0x300 <= data->off < 0x400
1456 */
1457 if (data->off >= 0x100 && data->off < 0x200) {
1458 page = 1;
1459 data->off = data->off - 0x100;
1460 } else if (data->off >= 0x200 && data->off < 0x300) {
1461 page = 2;
1462 data->off = data->off - 0x200;
1463 } else if (data->off >= 0x300 && data->off < 0x400) {
1464 page = 3;
1465 data->off = data->off - 0x300;
1466 } else {
1467 page = 0;
1468 }
1469 /*magic number for ioctl identify CR 0x1b101a14*/
1470 if (data->off == 0x777) {
1471 page = 0;
1472 data->off = 0x214;
1473 }
1474 sys_reg_write(QDMA_PAGE, page);
1475 /*pr_info("data->val =%x\n", data->val);*/
1476 sys_reg_write(QTX_CFG_0 + data->off, data->val);
1477 sys_reg_write(QDMA_PAGE, 0);
1478 } else {
1479 sys_reg_write(QTX_CFG_0 + data->off, data->val);
1480 }
1481 /* pr_ino("write reg off:%x val:%x\n", data->off, data->val); */
1482 break;
1483 case RAETH_QDMA_QUEUE_MAPPING:
1484 if ((data->off & 0x100) == 0x100) {
1485 lan_wan_separate = 1;
1486 data->off &= 0xff;
1487 } else {
1488 lan_wan_separate = 0;
1489 data->off &= 0xff;
1490 }
1491 M2Q_table[data->off] = data->val;
1492 break;
1493 case RAETH_QDMA_SFQ_WEB_ENABLE:
1494 if (ei_local->features & FE_HW_SFQ) {
1495 if ((data->val) == 0x1)
1496 web_sfq_enable = 1;
1497 else
1498 web_sfq_enable = 0;
1499 } else {
1500 ret = -EINVAL;
1501 }
1502 break;
1503 default:
1504 ret = 1;
1505 break;
1506 }
1507
1508 return ret;
1509}