blob: 3e04f551e293d39fc5277dc6c11c6f03ede7beac [file] [log] [blame]
Vignesh Raghavendra27e72502021-06-07 19:47:53 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
4 */
5
6#define UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT (16)
7
8/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
9#define UDMA_RFLOW_SRCTAG_NONE 0
10#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
11#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
12#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
13
14#define UDMA_RFLOW_DSTTAG_NONE 0
15#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
16#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
17#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
18#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
19
20#define UDMA_RFLOW_RFC_DEFAULT \
21 ((UDMA_RFLOW_SRCTAG_NONE << UDMA_RFLOW_RFC_SRC_TAG_HI_SEL_SHIFT) | \
22 (UDMA_RFLOW_SRCTAG_SRC_TAG << UDMA_RFLOW_RFC_SRC_TAG_LO_SEL_SHIFT) | \
23 (UDMA_RFLOW_DSTTAG_DST_TAG_HI << UDMA_RFLOW_RFC_DST_TAG_HI_SEL_SHIFT) | \
24 (UDMA_RFLOW_DSTTAG_DST_TAG_LO << UDMA_RFLOW_RFC_DST_TAG_LO_SE_SHIFT))
25
26#define UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT (16)
27
28/* TCHAN */
29static inline u32 udma_tchan_read(struct udma_tchan *tchan, int reg)
30{
31 if (!tchan)
32 return 0;
33 return udma_read(tchan->reg_chan, reg);
34}
35
36static inline void udma_tchan_write(struct udma_tchan *tchan, int reg, u32 val)
37{
38 if (!tchan)
39 return;
40 udma_write(tchan->reg_chan, reg, val);
41}
42
43static inline void udma_tchan_update_bits(struct udma_tchan *tchan, int reg,
44 u32 mask, u32 val)
45{
46 if (!tchan)
47 return;
48 udma_update_bits(tchan->reg_chan, reg, mask, val);
49}
50
51/* RCHAN */
52static inline u32 udma_rchan_read(struct udma_rchan *rchan, int reg)
53{
54 if (!rchan)
55 return 0;
56 return udma_read(rchan->reg_chan, reg);
57}
58
59static inline void udma_rchan_write(struct udma_rchan *rchan, int reg, u32 val)
60{
61 if (!rchan)
62 return;
63 udma_write(rchan->reg_chan, reg, val);
64}
65
66static inline void udma_rchan_update_bits(struct udma_rchan *rchan, int reg,
67 u32 mask, u32 val)
68{
69 if (!rchan)
70 return;
71 udma_update_bits(rchan->reg_chan, reg, mask, val);
72}
73
74/* RFLOW */
75static inline u32 udma_rflow_read(struct udma_rflow *rflow, int reg)
76{
77 if (!rflow)
78 return 0;
79 return udma_read(rflow->reg_rflow, reg);
80}
81
82static inline void udma_rflow_write(struct udma_rflow *rflow, int reg, u32 val)
83{
84 if (!rflow)
85 return;
86 udma_write(rflow->reg_rflow, reg, val);
87}
88
89static inline void udma_rflow_update_bits(struct udma_rflow *rflow, int reg,
90 u32 mask, u32 val)
91{
92 if (!rflow)
93 return;
94 udma_update_bits(rflow->reg_rflow, reg, mask, val);
95}
96
97static void udma_alloc_tchan_raw(struct udma_chan *uc)
98{
99 u32 mode, fetch_size;
100
101 if (uc->config.pkt_mode)
102 mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR;
103 else
104 mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR;
105
106 udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG,
107 UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode);
108
109 if (uc->config.dir == DMA_MEM_TO_MEM)
110 fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
111 else
112 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
113 uc->config.psd_size, 0) >> 2;
114
115 udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG,
116 UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size);
117 udma_tchan_write(uc->tchan, UDMA_TCHAN_TCQ_REG,
118 k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring));
119}
120
121static void udma_alloc_rchan_raw(struct udma_chan *uc)
122{
123 struct udma_dev *ud = uc->ud;
124 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
125 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
126 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
127 u32 rx_einfo_present = 0, rx_psinfo_present = 0;
128 u32 mode, fetch_size, rxcq_num;
129
130 if (uc->config.pkt_mode)
131 mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR;
132 else
133 mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR;
134
135 udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG,
136 UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode);
137
138 if (uc->config.dir == DMA_MEM_TO_MEM) {
139 fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
140 rxcq_num = tc_ring;
141 } else {
142 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
143 uc->config.psd_size, 0) >> 2;
144 rxcq_num = rx_ring;
145 }
146
147 udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG,
148 UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size);
149 udma_rchan_write(uc->rchan, UDMA_RCHAN_RCQ_REG, rxcq_num);
150
151 if (uc->config.dir == DMA_MEM_TO_MEM)
152 return;
153
154 if (ud->match_data->type == DMA_TYPE_UDMA &&
155 uc->rflow->id != uc->rchan->id &&
156 uc->config.dir != DMA_MEM_TO_MEM)
157 udma_rchan_write(uc->rchan, UDMA_RCHAN_RFLOW_RNG_REG, uc->rflow->id |
158 1 << UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT);
159
160 if (uc->config.needs_epib)
161 rx_einfo_present = UDMA_RFLOW_RFA_EINFO;
162
163 if (uc->config.psd_size)
164 rx_psinfo_present = UDMA_RFLOW_RFA_PSINFO;
165
166 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(A),
167 rx_einfo_present | rx_psinfo_present | rxcq_num);
168
169 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(C), UDMA_RFLOW_RFC_DEFAULT);
170 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(D),
171 fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
172 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(E),
173 fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
174 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(G), fd_ring);
175 udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(H),
176 fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
177}