blob: 344f3d5432e0a7ed4efa89085ee0f02e0ec8b868 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* Copyright 2016 MediaTek Inc.
2 * Author: Nelson Chang <nelson.chang@mediatek.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include "raether.h"
14
15int fe_pdma_wait_dma_idle(void)
16{
17 unsigned int reg_val;
18 unsigned int loop_cnt = 0;
19
20 while (1) {
21 if (loop_cnt++ > 1000)
22 break;
23 reg_val = sys_reg_read(PDMA_GLO_CFG);
24 if ((reg_val & RX_DMA_BUSY)) {
25 pr_warn("\n RX_DMA_BUSY !!! ");
26 continue;
27 }
28 if ((reg_val & TX_DMA_BUSY)) {
29 pr_warn("\n TX_DMA_BUSY !!! ");
30 continue;
31 }
32 return 0;
33 }
34
35 return -1;
36}
37
38int fe_pdma_rx_dma_init(struct net_device *dev)
39{
40 int i;
41 unsigned int skb_size;
42 struct END_DEVICE *ei_local = netdev_priv(dev);
43 dma_addr_t dma_addr;
44
45 skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
46 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
47
48 /* Initial RX Ring 0 */
49 ei_local->rx_ring[0] = dma_alloc_coherent(dev->dev.parent,
50 num_rx_desc *
51 sizeof(struct PDMA_rxdesc),
52 &ei_local->phy_rx_ring[0],
53 GFP_ATOMIC | __GFP_ZERO);
54 pr_debug("\nphy_rx_ring[0] = 0x%08x, rx_ring[0] = 0x%p\n",
55 (unsigned int)ei_local->phy_rx_ring[0],
56 (void *)ei_local->rx_ring[0]);
57
58 for (i = 0; i < num_rx_desc; i++) {
59 ei_local->netrx_skb_data[0][i] =
60 raeth_alloc_skb_data(skb_size, GFP_KERNEL);
61 if (!ei_local->netrx_skb_data[0][i]) {
62 pr_err("rx skbuff buffer allocation failed!");
63 goto no_rx_mem;
64 }
65
66 memset(&ei_local->rx_ring[0][i], 0, sizeof(struct PDMA_rxdesc));
67 ei_local->rx_ring[0][i].rxd_info2.DDONE_bit = 0;
68 ei_local->rx_ring[0][i].rxd_info2.LS0 = 0;
69 ei_local->rx_ring[0][i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
70 dma_addr = dma_map_single(dev->dev.parent,
71 ei_local->netrx_skb_data[0][i] +
72 NET_SKB_PAD,
73 MAX_RX_LENGTH,
74 DMA_FROM_DEVICE);
75 ei_local->rx_ring[0][i].rxd_info1.PDP0 = dma_addr;
76 if (unlikely
77 (dma_mapping_error
78 (dev->dev.parent,
79 ei_local->rx_ring[0][i].rxd_info1.PDP0))) {
80 pr_err("[%s]dma_map_single() failed...\n", __func__);
81 goto no_rx_mem;
82 }
83 }
84
85 /* Tell the adapter where the RX rings are located. */
86 sys_reg_write(RX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_rx_ring[0]));
87 sys_reg_write(RX_MAX_CNT0, cpu_to_le32((u32)num_rx_desc));
88 sys_reg_write(RX_CALC_IDX0, cpu_to_le32((u32)(num_rx_desc - 1)));
89
90 sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX0);
91
92 return 0;
93
94no_rx_mem:
95 return -ENOMEM;
96}
97
98int fe_pdma_tx_dma_init(struct net_device *dev)
99{
100 int i;
101 struct END_DEVICE *ei_local = netdev_priv(dev);
102
103 for (i = 0; i < num_tx_desc; i++)
104 ei_local->skb_free[i] = 0;
105
106 ei_local->tx_ring_full = 0;
107 ei_local->free_idx = 0;
108 ei_local->tx_ring0 =
109 dma_alloc_coherent(dev->dev.parent,
110 num_tx_desc * sizeof(struct PDMA_txdesc),
111 &ei_local->phy_tx_ring0,
112 GFP_ATOMIC | __GFP_ZERO);
113 pr_debug("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n",
114 (unsigned int)ei_local->phy_tx_ring0,
115 (void *)ei_local->tx_ring0);
116
117 for (i = 0; i < num_tx_desc; i++) {
118 memset(&ei_local->tx_ring0[i], 0, sizeof(struct PDMA_txdesc));
119 ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
120 ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
121 }
122
123 /* Tell the adapter where the TX rings are located. */
124 sys_reg_write(TX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_tx_ring0));
125 sys_reg_write(TX_MAX_CNT0, cpu_to_le32((u32)num_tx_desc));
126 sys_reg_write(TX_CTX_IDX0, 0);
127#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
128 ei_local->tx_cpu_owner_idx0 = 0;
129#endif
130 sys_reg_write(PDMA_RST_CFG, PST_DTX_IDX0);
131
132 return 0;
133}
134
135void fe_pdma_rx_dma_deinit(struct net_device *dev)
136{
137 struct END_DEVICE *ei_local = netdev_priv(dev);
138 int i;
139
140 /* free RX Ring */
141 dma_free_coherent(dev->dev.parent,
142 num_rx_desc * sizeof(struct PDMA_rxdesc),
143 ei_local->rx_ring[0], ei_local->phy_rx_ring[0]);
144
145 /* free RX data */
146 for (i = 0; i < num_rx_desc; i++) {
147 raeth_free_skb_data(ei_local->netrx_skb_data[0][i]);
148 ei_local->netrx_skb_data[0][i] = NULL;
149 }
150}
151
152void fe_pdma_tx_dma_deinit(struct net_device *dev)
153{
154 struct END_DEVICE *ei_local = netdev_priv(dev);
155 int i;
156
157 /* free TX Ring */
158 if (ei_local->tx_ring0)
159 dma_free_coherent(dev->dev.parent,
160 num_tx_desc *
161 sizeof(struct PDMA_txdesc),
162 ei_local->tx_ring0,
163 ei_local->phy_tx_ring0);
164
165 /* free TX data */
166 for (i = 0; i < num_tx_desc; i++) {
167 if ((ei_local->skb_free[i] != 0) &&
168 (ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF))
169 dev_kfree_skb_any(ei_local->skb_free[i]);
170 }
171}
172
173void set_fe_pdma_glo_cfg(void)
174{
175 unsigned int dma_glo_cfg = 0;
176
177 dma_glo_cfg =
178 (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS |
179 MULTI_EN | ADMA_RX_BT_SIZE_32DWORDS);
180// dma_glo_cfg |= (RX_2B_OFFSET);
181
182 sys_reg_write(PDMA_GLO_CFG, dma_glo_cfg);
183}
184
185/* @brief cal txd number for a page
186 *
187 * @parm size
188 *
189 * @return frag_txd_num
190 */
191static inline unsigned int pdma_cal_frag_txd_num(unsigned int size)
192{
193 unsigned int frag_txd_num = 0;
194
195 if (size == 0)
196 return 0;
197 while (size > 0) {
198 if (size > MAX_PTXD_LEN) {
199 frag_txd_num++;
200 size -= MAX_PTXD_LEN;
201 } else {
202 frag_txd_num++;
203 size = 0;
204 }
205 }
206 return frag_txd_num;
207}
208
209int fe_fill_tx_desc(struct net_device *dev,
210 unsigned long *tx_cpu_owner_idx,
211 struct sk_buff *skb,
212 int gmac_no)
213{
214 struct END_DEVICE *ei_local = netdev_priv(dev);
215 struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
216 struct PDMA_TXD_INFO2_T txd_info2_tmp;
217 struct PDMA_TXD_INFO4_T txd_info4_tmp;
218
219 tx_ring->txd_info1.SDP0 = virt_to_phys(skb->data);
220 txd_info2_tmp.SDL0 = skb->len;
221 txd_info4_tmp.FPORT = gmac_no;
222 txd_info4_tmp.TSO = 0;
223
224 if (ei_local->features & FE_CSUM_OFFLOAD) {
225 if (skb->ip_summed == CHECKSUM_PARTIAL)
226 txd_info4_tmp.TUI_CO = 7;
227 else
228 txd_info4_tmp.TUI_CO = 0;
229 }
230
231 if (ei_local->features & FE_HW_VLAN_TX) {
232 if (skb_vlan_tag_present(skb))
233 txd_info4_tmp.VLAN_TAG =
234 0x10000 | skb_vlan_tag_get(skb);
235 else
236 txd_info4_tmp.VLAN_TAG = 0;
237 }
238#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
239 if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
240 if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
241 if (ppe_hook_rx_eth) {
242 /* PPE */
243 txd_info4_tmp.FPORT = 4;
244 FOE_MAGIC_TAG(skb) = 0;
245 }
246 }
247 } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
248 if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
249 if (ppe_hook_rx_eth) {
250 /* PPE */
251 txd_info4_tmp.FPORT = 4;
252 FOE_MAGIC_TAG(skb) = 0;
253 }
254 }
255 }
256#endif
257
258 txd_info2_tmp.LS0_bit = 1;
259 txd_info2_tmp.DDONE_bit = 0;
260
261 tx_ring->txd_info4 = txd_info4_tmp;
262 tx_ring->txd_info2 = txd_info2_tmp;
263
264 return 0;
265}
266
267static int fe_fill_tx_tso_data(struct END_DEVICE *ei_local,
268 unsigned int frag_offset,
269 unsigned int frag_size,
270 unsigned long *tx_cpu_owner_idx,
271 unsigned int nr_frags,
272 int gmac_no)
273{
274 struct PSEUDO_ADAPTER *p_ad;
275 unsigned int size;
276 unsigned int frag_txd_num;
277 struct PDMA_txdesc *tx_ring;
278
279 frag_txd_num = pdma_cal_frag_txd_num(frag_size);
280 tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
281
282 while (frag_txd_num > 0) {
283 if (frag_size < MAX_PTXD_LEN)
284 size = frag_size;
285 else
286 size = MAX_PTXD_LEN;
287
288 if (ei_local->skb_txd_num % 2 == 0) {
289 *tx_cpu_owner_idx =
290 (*tx_cpu_owner_idx + 1) % num_tx_desc;
291 tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
292
293 while (tx_ring->txd_info2.DDONE_bit == 0) {
294 if (gmac_no == 2) {
295 p_ad =
296 netdev_priv(ei_local->pseudo_dev);
297 p_ad->stat.tx_errors++;
298 } else {
299 ei_local->stat.tx_errors++;
300 }
301 }
302 tx_ring->txd_info1.SDP0 = frag_offset;
303 tx_ring->txd_info2.SDL0 = size;
304 if (((nr_frags == 0)) && (frag_txd_num == 1))
305 tx_ring->txd_info2.LS0_bit = 1;
306 else
307 tx_ring->txd_info2.LS0_bit = 0;
308 tx_ring->txd_info2.DDONE_bit = 0;
309 tx_ring->txd_info4.FPORT = gmac_no;
310 } else {
311 tx_ring->txd_info3.SDP1 = frag_offset;
312 tx_ring->txd_info2.SDL1 = size;
313 if (((nr_frags == 0)) && (frag_txd_num == 1))
314 tx_ring->txd_info2.LS1_bit = 1;
315 else
316 tx_ring->txd_info2.LS1_bit = 0;
317 }
318 frag_offset += size;
319 frag_size -= size;
320 frag_txd_num--;
321 ei_local->skb_txd_num++;
322 }
323
324 return 0;
325}
326
327static int fe_fill_tx_tso_frag(struct net_device *netdev,
328 struct sk_buff *skb,
329 unsigned long *tx_cpu_owner_idx,
330 int gmac_no)
331{
332 struct END_DEVICE *ei_local = netdev_priv(netdev);
333 struct PSEUDO_ADAPTER *p_ad;
334 unsigned int size;
335 unsigned int frag_txd_num;
336 skb_frag_t * frag;
337 unsigned int nr_frags;
338 unsigned int frag_offset, frag_size;
339 struct PDMA_txdesc *tx_ring;
340 int i = 0, j = 0, unmap_idx = 0;
341
342 nr_frags = skb_shinfo(skb)->nr_frags;
343 tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
344
345 for (i = 0; i < nr_frags; i++) {
346 frag = &skb_shinfo(skb)->frags[i];
347 frag_offset = 0;
348 frag_size = skb_frag_size(frag);
349 frag_txd_num = pdma_cal_frag_txd_num(frag_size);
350
351 while (frag_txd_num > 0) {
352 if (frag_size < MAX_PTXD_LEN)
353 size = frag_size;
354 else
355 size = MAX_PTXD_LEN;
356
357 if (ei_local->skb_txd_num % 2 == 0) {
358 *tx_cpu_owner_idx =
359 (*tx_cpu_owner_idx + 1) % num_tx_desc;
360 tx_ring =
361 &ei_local->tx_ring0[*tx_cpu_owner_idx];
362
363 while (tx_ring->txd_info2.DDONE_bit == 0) {
364 if (gmac_no == 2) {
365 p_ad =
366 netdev_priv
367 (ei_local->pseudo_dev);
368 p_ad->stat.tx_errors++;
369 } else {
370 ei_local->stat.tx_errors++;
371 }
372 }
373
374 tx_ring->txd_info1.SDP0 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
375
376 if (unlikely
377 (dma_mapping_error
378 (netdev->dev.parent,
379 tx_ring->txd_info1.SDP0))) {
380 pr_err
381 ("[%s]dma_map_page() failed\n",
382 __func__);
383 goto err_dma;
384 }
385
386 tx_ring->txd_info2.SDL0 = size;
387
388 if ((frag_txd_num == 1) &&
389 (i == (nr_frags - 1)))
390 tx_ring->txd_info2.LS0_bit = 1;
391 else
392 tx_ring->txd_info2.LS0_bit = 0;
393 tx_ring->txd_info2.DDONE_bit = 0;
394 tx_ring->txd_info4.FPORT = gmac_no;
395 } else {
396 tx_ring->txd_info3.SDP1 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
397
398 if (unlikely
399 (dma_mapping_error
400 (netdev->dev.parent,
401 tx_ring->txd_info3.SDP1))) {
402 pr_err
403 ("[%s]dma_map_page() failed\n",
404 __func__);
405 goto err_dma;
406 }
407 tx_ring->txd_info2.SDL1 = size;
408 if ((frag_txd_num == 1) &&
409 (i == (nr_frags - 1)))
410 tx_ring->txd_info2.LS1_bit = 1;
411 else
412 tx_ring->txd_info2.LS1_bit = 0;
413 }
414 frag_offset += size;
415 frag_size -= size;
416 frag_txd_num--;
417 ei_local->skb_txd_num++;
418 }
419 }
420
421 return 0;
422
423err_dma:
424 /* unmap dma */
425 j = *tx_cpu_owner_idx;
426 unmap_idx = i;
427 for (i = 0; i < unmap_idx; i++) {
428 frag = &skb_shinfo(skb)->frags[i];
429 frag_size = skb_frag_size(frag);
430 frag_txd_num = pdma_cal_frag_txd_num(frag_size);
431
432 while (frag_txd_num > 0) {
433 if (frag_size < MAX_PTXD_LEN)
434 size = frag_size;
435 else
436 size = MAX_PTXD_LEN;
437 if (ei_local->skb_txd_num % 2 == 0) {
438 j = (j + 1) % num_tx_desc;
439 dma_unmap_page(netdev->dev.parent,
440 ei_local->tx_ring0[j].
441 txd_info1.SDP0,
442 ei_local->tx_ring0[j].
443 txd_info2.SDL0, DMA_TO_DEVICE);
444 /* reinit txd */
445 ei_local->tx_ring0[j].txd_info2.LS0_bit = 1;
446 ei_local->tx_ring0[j].txd_info2.DDONE_bit = 1;
447 } else {
448 dma_unmap_page(netdev->dev.parent,
449 ei_local->tx_ring0[j].
450 txd_info3.SDP1,
451 ei_local->tx_ring0[j].
452 txd_info2.SDL1, DMA_TO_DEVICE);
453 /* reinit txd */
454 ei_local->tx_ring0[j].txd_info2.LS1_bit = 1;
455 }
456 frag_size -= size;
457 frag_txd_num--;
458 ei_local->skb_txd_num++;
459 }
460 }
461
462 return -1;
463}
464
465int fe_fill_tx_desc_tso(struct net_device *dev,
466 unsigned long *tx_cpu_owner_idx,
467 struct sk_buff *skb,
468 int gmac_no)
469{
470 struct END_DEVICE *ei_local = netdev_priv(dev);
471 struct iphdr *iph = NULL;
472 struct ipv6hdr *ip6h = NULL;
473 struct tcphdr *th = NULL;
474 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
475 unsigned int len, offset;
476 int err;
477 struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
478
479 tx_ring->txd_info4.FPORT = gmac_no;
480 tx_ring->txd_info4.TSO = 0;
481
482 if (skb->ip_summed == CHECKSUM_PARTIAL)
483 tx_ring->txd_info4.TUI_CO = 7;
484 else
485 tx_ring->txd_info4.TUI_CO = 0;
486
487 if (ei_local->features & FE_HW_VLAN_TX) {
488 if (skb_vlan_tag_present(skb))
489 tx_ring->txd_info4.VLAN_TAG =
490 0x10000 | skb_vlan_tag_get(skb);
491 else
492 tx_ring->txd_info4.VLAN_TAG = 0;
493 }
494#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
495 if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
496 if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
497 if (ppe_hook_rx_eth) {
498 /* PPE */
499 tx_ring->txd_info4.FPORT = 4;
500 FOE_MAGIC_TAG(skb) = 0;
501 }
502 }
503 } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
504 if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
505 if (ppe_hook_rx_eth) {
506 /* PPE */
507 tx_ring->txd_info4.FPORT = 4;
508 FOE_MAGIC_TAG(skb) = 0;
509 }
510 }
511 }
512#endif
513 ei_local->skb_txd_num = 1;
514
515 /* skb data handle */
516 len = skb->len - skb->data_len;
517 offset = virt_to_phys(skb->data);
518 tx_ring->txd_info1.SDP0 = offset;
519 if (len < MAX_PTXD_LEN) {
520 tx_ring->txd_info2.SDL0 = len;
521 tx_ring->txd_info2.LS0_bit = nr_frags ? 0 : 1;
522 len = 0;
523 } else {
524 tx_ring->txd_info2.SDL0 = MAX_PTXD_LEN;
525 tx_ring->txd_info2.LS0_bit = 0;
526 len -= MAX_PTXD_LEN;
527 offset += MAX_PTXD_LEN;
528 }
529
530 if (len > 0)
531 fe_fill_tx_tso_data(ei_local, offset, len,
532 tx_cpu_owner_idx, nr_frags, gmac_no);
533
534 /* skb fragments handle */
535 if (nr_frags > 0) {
536 err = fe_fill_tx_tso_frag(dev, skb, tx_cpu_owner_idx, gmac_no);
537 if (unlikely(err))
538 return err;
539 }
540
541 /* fill in MSS info in tcp checksum field */
542 if (skb_shinfo(skb)->gso_segs > 1) {
543 /* TCP over IPv4 */
544 iph = (struct iphdr *)skb_network_header(skb);
545 if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
546 th = (struct tcphdr *)skb_transport_header(skb);
547 tx_ring->txd_info4.TSO = 1;
548 th->check = htons(skb_shinfo(skb)->gso_size);
549 dma_sync_single_for_device(dev->dev.parent,
550 virt_to_phys(th),
551 sizeof(struct tcphdr),
552 DMA_TO_DEVICE);
553 }
554
555 /* TCP over IPv6 */
556 if (ei_local->features & FE_TSO_V6) {
557 ip6h = (struct ipv6hdr *)skb_network_header(skb);
558 if ((ip6h->nexthdr == NEXTHDR_TCP) &&
559 (ip6h->version == 6)) {
560 th = (struct tcphdr *)skb_transport_header(skb);
561 tx_ring->txd_info4.TSO = 1;
562 th->check = htons(skb_shinfo(skb)->gso_size);
563 dma_sync_single_for_device(dev->dev.parent,
564 virt_to_phys(th),
565 sizeof(struct
566 tcphdr),
567 DMA_TO_DEVICE);
568 }
569 }
570 }
571 tx_ring->txd_info2.DDONE_bit = 0;
572
573 return 0;
574}
575
576static inline int rt2880_pdma_eth_send(struct net_device *dev,
577 struct sk_buff *skb, int gmac_no,
578 unsigned int num_of_frag)
579{
580 unsigned int length = skb->len;
581 struct END_DEVICE *ei_local = netdev_priv(dev);
582#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
583 unsigned long tx_cpu_owner_idx0 = ei_local->tx_cpu_owner_idx0;
584#else
585 unsigned long tx_cpu_owner_idx0 = sys_reg_read(TX_CTX_IDX0);
586#endif
587 struct PSEUDO_ADAPTER *p_ad;
588 int err;
589
590 while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
591 if (gmac_no == 2) {
592 if (ei_local->pseudo_dev) {
593 p_ad = netdev_priv(ei_local->pseudo_dev);
594 p_ad->stat.tx_errors++;
595 } else {
596 pr_err
597 ("pseudo_dev is still not initialize ");
598 pr_err
599 ("but receive packet from GMAC2\n");
600 }
601 } else {
602 ei_local->stat.tx_errors++;
603 }
604 }
605
606 if (num_of_frag > 1)
607 err = fe_fill_tx_desc_tso(dev, &tx_cpu_owner_idx0,
608 skb, gmac_no);
609 else
610 err = fe_fill_tx_desc(dev, &tx_cpu_owner_idx0, skb, gmac_no);
611 if (err)
612 return err;
613
614 tx_cpu_owner_idx0 = (tx_cpu_owner_idx0 + 1) % num_tx_desc;
615 while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
616 if (gmac_no == 2) {
617 p_ad = netdev_priv(ei_local->pseudo_dev);
618 p_ad->stat.tx_errors++;
619 } else {
620 ei_local->stat.tx_errors++;
621 }
622 }
623#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
624 ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx0;
625#endif
626 /* make sure that all changes to the dma ring are flushed before we
627 * continue
628 */
629 wmb();
630
631 sys_reg_write(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
632
633 if (gmac_no == 2) {
634 p_ad = netdev_priv(ei_local->pseudo_dev);
635 p_ad->stat.tx_packets++;
636 p_ad->stat.tx_bytes += length;
637 } else {
638 ei_local->stat.tx_packets++;
639 ei_local->stat.tx_bytes += length;
640 }
641
642 return length;
643}
644
645int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
646{
647 struct END_DEVICE *ei_local = netdev_priv(dev);
648 unsigned long tx_cpu_owner_idx;
649 unsigned int tx_cpu_owner_idx_next, tx_cpu_owner_idx_next2;
650 unsigned int num_of_txd, num_of_frag;
651 unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
652 skb_frag_t * frag;
653 struct PSEUDO_ADAPTER *p_ad;
654 unsigned int tx_cpu_cal_idx;
655
656#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
657 if (ppe_hook_tx_eth) {
658#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
659 if (FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
660#endif
661 if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
662 dev_kfree_skb_any(skb);
663 return 0;
664 }
665 }
666#endif
667
668// dev->trans_start = jiffies; /* save the timestamp */
669 netif_trans_update(dev);
670 spin_lock(&ei_local->page_lock);
671 dma_sync_single_for_device(dev->dev.parent, virt_to_phys(skb->data),
672 skb->len, DMA_TO_DEVICE);
673
674#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
675 tx_cpu_owner_idx = ei_local->tx_cpu_owner_idx0;
676#else
677 tx_cpu_owner_idx = sys_reg_read(TX_CTX_IDX0);
678#endif
679
680 if (ei_local->features & FE_TSO) {
681 num_of_txd = pdma_cal_frag_txd_num(skb->len - skb->data_len);
682 if (nr_frags != 0) {
683 for (i = 0; i < nr_frags; i++) {
684 frag = &skb_shinfo(skb)->frags[i];
685 num_of_txd += pdma_cal_frag_txd_num(skb_frag_size(frag));
686
687 }
688 }
689 num_of_frag = num_of_txd;
690 num_of_txd = (num_of_txd + 1) >> 1;
691 } else {
692 num_of_frag = 1;
693 num_of_txd = 1;
694 }
695
696 tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % num_tx_desc;
697
698 if ((ei_local->skb_free[tx_cpu_owner_idx_next] == 0) &&
699 (ei_local->skb_free[tx_cpu_owner_idx] == 0)) {
700 if (rt2880_pdma_eth_send(dev, skb, gmac_no, num_of_frag) < 0) {
701 dev_kfree_skb_any(skb);
702 if (gmac_no == 2) {
703 p_ad = netdev_priv(ei_local->pseudo_dev);
704 p_ad->stat.tx_dropped++;
705 } else {
706 ei_local->stat.tx_dropped++;
707 }
708 goto tx_err;
709 }
710
711 tx_cpu_owner_idx_next2 =
712 (tx_cpu_owner_idx_next + 1) % num_tx_desc;
713
714 if (ei_local->skb_free[tx_cpu_owner_idx_next2] != 0)
715 ei_local->tx_ring_full = 1;
716 } else {
717 if (gmac_no == 2) {
718 p_ad = netdev_priv(ei_local->pseudo_dev);
719 p_ad->stat.tx_dropped++;
720 } else {
721 ei_local->stat.tx_dropped++;
722 }
723
724 dev_kfree_skb_any(skb);
725 spin_unlock(&ei_local->page_lock);
726 return NETDEV_TX_OK;
727 }
728
729 /* SG: use multiple TXD to send the packet (only have one skb) */
730 tx_cpu_cal_idx = (tx_cpu_owner_idx + num_of_txd - 1) % num_tx_desc;
731 ei_local->skb_free[tx_cpu_cal_idx] = skb;
732 while (--num_of_txd)
733 /* MAGIC ID */
734 ei_local->skb_free[(--tx_cpu_cal_idx) % num_tx_desc] =
735 (struct sk_buff *)0xFFFFFFFF;
736
737tx_err:
738 spin_unlock(&ei_local->page_lock);
739 return NETDEV_TX_OK;
740}
741
742int ei_pdma_xmit_housekeeping(struct net_device *netdev, int budget)
743{
744 struct END_DEVICE *ei_local = netdev_priv(netdev);
745 struct PDMA_txdesc *tx_desc;
746 unsigned long skb_free_idx;
747 int tx_processed = 0;
748
749 tx_desc = ei_local->tx_ring0;
750 skb_free_idx = ei_local->free_idx;
751
752 while (budget &&
753 (ei_local->skb_free[skb_free_idx] != 0) &&
754 (tx_desc[skb_free_idx].txd_info2.DDONE_bit == 1)) {
755 if (ei_local->skb_free[skb_free_idx] !=
756 (struct sk_buff *)0xFFFFFFFF)
757 dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
758
759 ei_local->skb_free[skb_free_idx] = 0;
760 skb_free_idx = (skb_free_idx + 1) % num_tx_desc;
761 budget--;
762 tx_processed++;
763 }
764
765 ei_local->tx_ring_full = 0;
766 ei_local->free_idx = skb_free_idx;
767
768 return tx_processed;
769}
770