blob: d99460f2fb2bc965fb4df4915f47332f42cfaa84 [file] [log] [blame]
Álvaro Fernández Rojas1b412c52018-12-01 19:00:15 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
4 *
5 * Derived from linux/drivers/dma/bcm63xx-iudma.c:
6 * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu>
7 *
8 * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c:
9 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
10 *
11 * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
12 * Copyright (C) 2000-2010 Broadcom Corporation
13 *
14 * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c:
15 * Copyright (C) 2010 Broadcom Corporation
16 */
17
18#include <common.h>
19#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -070020#include <cpu_func.h>
Álvaro Fernández Rojas1b412c52018-12-01 19:00:15 +010021#include <dm.h>
22#include <dma-uclass.h>
Simon Glass9bc15642020-02-03 07:36:16 -070023#include <malloc.h>
Álvaro Fernández Rojas1b412c52018-12-01 19:00:15 +010024#include <memalign.h>
25#include <reset.h>
26#include <asm/io.h>
27
28#define DMA_RX_DESC 6
29#define DMA_TX_DESC 1
30
31/* DMA Channels */
32#define DMA_CHAN_FLOWC(x) ((x) >> 1)
33#define DMA_CHAN_MAX 16
34#define DMA_CHAN_SIZE 0x10
35#define DMA_CHAN_TOUT 500
36
37/* DMA Global Configuration register */
38#define DMA_CFG_REG 0x00
39#define DMA_CFG_ENABLE_SHIFT 0
40#define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT)
41#define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1)
42#define DMA_CFG_NCHANS_SHIFT 24
43#define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT)
44
45/* DMA Global Flow Control registers */
46#define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c)
47#define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c)
48#define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c)
49#define DMA_FLOWC_ALLOC_FORCE_SHIFT 31
50#define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT)
51
52/* DMA Global Reset register */
53#define DMA_RST_REG 0x34
54#define DMA_RST_CHAN_SHIFT 0
55#define DMA_RST_CHAN_MASK(x) (1 << x)
56
57/* DMA Channel Configuration register */
58#define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
59#define DMAC_CFG_ENABLE_SHIFT 0
60#define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT)
61#define DMAC_CFG_PKT_HALT_SHIFT 1
62#define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT)
63#define DMAC_CFG_BRST_HALT_SHIFT 2
64#define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT)
65
66/* DMA Channel Max Burst Length register */
67#define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
68
69/* DMA SRAM Descriptor Ring Start register */
70#define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
71
72/* DMA SRAM State/Bytes done/ring offset register */
73#define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04)
74
75/* DMA SRAM Buffer Descriptor status and length register */
76#define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08)
77
78/* DMA SRAM Buffer Descriptor status and length register */
79#define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
80
81/* DMA Descriptor Status */
82#define DMAD_ST_CRC_SHIFT 8
83#define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT)
84#define DMAD_ST_WRAP_SHIFT 12
85#define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT)
86#define DMAD_ST_SOP_SHIFT 13
87#define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT)
88#define DMAD_ST_EOP_SHIFT 14
89#define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT)
90#define DMAD_ST_OWN_SHIFT 15
91#define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT)
92
93#define DMAD6348_ST_OV_ERR_SHIFT 0
94#define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT)
95#define DMAD6348_ST_CRC_ERR_SHIFT 1
96#define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT)
97#define DMAD6348_ST_RX_ERR_SHIFT 2
98#define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT)
99#define DMAD6348_ST_OS_ERR_SHIFT 4
100#define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT)
101#define DMAD6348_ST_UN_ERR_SHIFT 9
102#define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT)
103
104struct bcm6348_dma_desc {
105 uint16_t length;
106 uint16_t status;
107 uint32_t address;
108};
109
110struct bcm6348_chan_priv {
111 void __iomem *dma_ring;
112 uint8_t dma_ring_size;
113 uint8_t desc_id;
114 uint8_t desc_cnt;
115 bool *busy_desc;
116 bool running;
117};
118
119struct bcm6348_iudma_hw {
120 uint16_t err_mask;
121};
122
123struct bcm6348_iudma_priv {
124 const struct bcm6348_iudma_hw *hw;
125 void __iomem *base;
126 void __iomem *chan;
127 void __iomem *sram;
128 struct bcm6348_chan_priv **ch_priv;
129 uint8_t n_channels;
130};
131
132static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch)
133{
134 return !(ch & 1);
135}
136
137static inline void bcm6348_iudma_fdc(void *ptr, ulong size)
138{
139 ulong start = (ulong) ptr;
140
141 flush_dcache_range(start, start + size);
142}
143
144static inline void bcm6348_iudma_idc(void *ptr, ulong size)
145{
146 ulong start = (ulong) ptr;
147
148 invalidate_dcache_range(start, start + size);
149}
150
151static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv,
152 uint8_t ch)
153{
154 unsigned int timeout = DMA_CHAN_TOUT;
155
156 do {
157 uint32_t cfg, halt;
158
159 if (timeout > DMA_CHAN_TOUT / 2)
160 halt = DMAC_CFG_PKT_HALT_MASK;
161 else
162 halt = DMAC_CFG_BRST_HALT_MASK;
163
164 /* try to stop dma channel */
165 writel_be(halt, priv->chan + DMAC_CFG_REG(ch));
166 mb();
167
168 /* check if channel was stopped */
169 cfg = readl_be(priv->chan + DMAC_CFG_REG(ch));
170 if (!(cfg & DMAC_CFG_ENABLE_MASK))
171 break;
172
173 udelay(1);
174 } while (--timeout);
175
176 if (!timeout)
177 pr_err("unable to stop channel %u\n", ch);
178
179 /* reset dma channel */
180 setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
181 mb();
182 clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
183}
184
185static int bcm6348_iudma_disable(struct dma *dma)
186{
187 struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
188 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
189
190 /* stop dma channel */
191 bcm6348_iudma_chan_stop(priv, dma->id);
192
193 /* dma flow control */
194 if (bcm6348_iudma_chan_is_rx(dma->id))
195 writel_be(DMA_FLOWC_ALLOC_FORCE_MASK,
196 DMA_FLOWC_ALLOC_REG(dma->id));
197
198 /* init channel config */
199 ch_priv->running = false;
200 ch_priv->desc_id = 0;
201 if (bcm6348_iudma_chan_is_rx(dma->id))
202 ch_priv->desc_cnt = 0;
203 else
204 ch_priv->desc_cnt = ch_priv->dma_ring_size;
205
206 return 0;
207}
208
209static int bcm6348_iudma_enable(struct dma *dma)
210{
211 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
212 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
213 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
214 uint8_t i;
215
216 /* dma ring init */
217 for (i = 0; i < ch_priv->desc_cnt; i++) {
218 if (bcm6348_iudma_chan_is_rx(dma->id)) {
219 ch_priv->busy_desc[i] = false;
220 dma_desc->status |= DMAD_ST_OWN_MASK;
221 } else {
222 dma_desc->status = 0;
223 dma_desc->length = 0;
224 dma_desc->address = 0;
225 }
226
227 if (i == ch_priv->desc_cnt - 1)
228 dma_desc->status |= DMAD_ST_WRAP_MASK;
229
230 dma_desc++;
231 }
232
233 /* init to first descriptor */
234 ch_priv->desc_id = 0;
235
236 /* force cache writeback */
237 bcm6348_iudma_fdc(ch_priv->dma_ring,
238 sizeof(*dma_desc) * ch_priv->desc_cnt);
239
240 /* clear sram */
241 writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id));
242 writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id));
243 writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id));
244
245 /* set dma ring start */
246 writel_be(virt_to_phys(ch_priv->dma_ring),
247 priv->sram + DMAS_RSTART_REG(dma->id));
248
249 /* set flow control */
250 if (bcm6348_iudma_chan_is_rx(dma->id)) {
251 u32 val;
252
253 setbits_be32(priv->base + DMA_CFG_REG,
254 DMA_CFG_FLOWC_ENABLE(dma->id));
255
256 val = ch_priv->desc_cnt / 3;
257 writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id));
258
259 val = (ch_priv->desc_cnt * 2) / 3;
260 writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id));
261
262 writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id));
263 }
264
265 /* set dma max burst */
266 writel_be(ch_priv->desc_cnt,
267 priv->chan + DMAC_BURST_REG(dma->id));
268
269 /* kick rx dma channel */
270 if (bcm6348_iudma_chan_is_rx(dma->id))
271 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
272 DMAC_CFG_ENABLE_MASK);
273
274 /* channel is now enabled */
275 ch_priv->running = true;
276
277 return 0;
278}
279
280static int bcm6348_iudma_request(struct dma *dma)
281{
282 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
283 struct bcm6348_chan_priv *ch_priv;
284
285 /* check if channel is valid */
286 if (dma->id >= priv->n_channels)
287 return -ENODEV;
288
289 /* alloc channel private data */
290 priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv));
291 if (!priv->ch_priv[dma->id])
292 return -ENOMEM;
293 ch_priv = priv->ch_priv[dma->id];
294
295 /* alloc dma ring */
296 if (bcm6348_iudma_chan_is_rx(dma->id))
297 ch_priv->dma_ring_size = DMA_RX_DESC;
298 else
299 ch_priv->dma_ring_size = DMA_TX_DESC;
300
301 ch_priv->dma_ring =
302 malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) *
303 ch_priv->dma_ring_size);
304 if (!ch_priv->dma_ring)
305 return -ENOMEM;
306
307 /* init channel config */
308 ch_priv->running = false;
309 ch_priv->desc_id = 0;
310 if (bcm6348_iudma_chan_is_rx(dma->id)) {
311 ch_priv->desc_cnt = 0;
312 ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool));
313 } else {
314 ch_priv->desc_cnt = ch_priv->dma_ring_size;
315 ch_priv->busy_desc = NULL;
316 }
317
318 return 0;
319}
320
321static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata)
322{
323 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
324 const struct bcm6348_iudma_hw *hw = priv->hw;
325 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
326 struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring;
327 int ret;
328
Álvaro Fernández Rojasd7b59322019-03-22 18:22:31 +0100329 if (!ch_priv->running)
330 return -EINVAL;
331
Álvaro Fernández Rojas1b412c52018-12-01 19:00:15 +0100332 /* get dma ring descriptor address */
333 dma_desc += ch_priv->desc_id;
334
335 /* invalidate cache data */
336 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
337
338 /* check dma own */
339 if (dma_desc->status & DMAD_ST_OWN_MASK)
340 return -EAGAIN;
341
342 /* check pkt */
343 if (!(dma_desc->status & DMAD_ST_EOP_MASK) ||
344 !(dma_desc->status & DMAD_ST_SOP_MASK) ||
345 (dma_desc->status & hw->err_mask)) {
346 pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n",
347 dma->id, ch_priv->desc_id, dma_desc->status);
348 ret = -EAGAIN;
349 } else {
350 /* set dma buffer address */
351 *dst = phys_to_virt(dma_desc->address);
352
353 /* invalidate cache data */
354 bcm6348_iudma_idc(*dst, dma_desc->length);
355
356 /* return packet length */
357 ret = dma_desc->length;
358 }
359
360 /* busy dma descriptor */
361 ch_priv->busy_desc[ch_priv->desc_id] = true;
362
363 /* increment dma descriptor */
364 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
365
366 return ret;
367}
368
369static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len,
370 void *metadata)
371{
372 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
373 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
374 struct bcm6348_dma_desc *dma_desc;
375 uint16_t status;
376
Álvaro Fernández Rojasd7b59322019-03-22 18:22:31 +0100377 if (!ch_priv->running)
378 return -EINVAL;
379
Álvaro Fernández Rojas1b412c52018-12-01 19:00:15 +0100380 /* flush cache */
381 bcm6348_iudma_fdc(src, len);
382
383 /* get dma ring descriptor address */
384 dma_desc = ch_priv->dma_ring;
385 dma_desc += ch_priv->desc_id;
386
387 /* config dma descriptor */
388 status = (DMAD_ST_OWN_MASK |
389 DMAD_ST_EOP_MASK |
390 DMAD_ST_CRC_MASK |
391 DMAD_ST_SOP_MASK);
392 if (ch_priv->desc_id == ch_priv->desc_cnt - 1)
393 status |= DMAD_ST_WRAP_MASK;
394
395 /* set dma descriptor */
396 dma_desc->address = virt_to_phys(src);
397 dma_desc->length = len;
398 dma_desc->status = status;
399
400 /* flush cache */
401 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
402
403 /* kick tx dma channel */
404 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK);
405
406 /* poll dma status */
407 do {
408 /* invalidate cache */
409 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
410
411 if (!(dma_desc->status & DMAD_ST_OWN_MASK))
412 break;
413 } while(1);
414
415 /* increment dma descriptor */
416 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
417
418 return 0;
419}
420
421static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size)
422{
423 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
424 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
425 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
426 uint16_t status;
427 uint8_t i;
428 u32 cfg;
429
430 /* get dirty dma descriptor */
431 for (i = 0; i < ch_priv->desc_cnt; i++) {
432 if (phys_to_virt(dma_desc->address) == dst)
433 break;
434
435 dma_desc++;
436 }
437
438 /* dma descriptor not found */
439 if (i == ch_priv->desc_cnt) {
440 pr_err("dirty dma descriptor not found\n");
441 return -ENOENT;
442 }
443
444 /* invalidate cache */
445 bcm6348_iudma_idc(ch_priv->dma_ring,
446 sizeof(*dma_desc) * ch_priv->desc_cnt);
447
448 /* free dma descriptor */
449 ch_priv->busy_desc[i] = false;
450
451 status = DMAD_ST_OWN_MASK;
452 if (i == ch_priv->desc_cnt - 1)
453 status |= DMAD_ST_WRAP_MASK;
454
455 dma_desc->status |= status;
456 dma_desc->length = PKTSIZE_ALIGN;
457
458 /* tell dma we allocated one buffer */
459 writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id));
460
461 /* flush cache */
462 bcm6348_iudma_fdc(ch_priv->dma_ring,
463 sizeof(*dma_desc) * ch_priv->desc_cnt);
464
465 /* kick rx dma channel if disabled */
466 cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id));
467 if (!(cfg & DMAC_CFG_ENABLE_MASK))
468 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
469 DMAC_CFG_ENABLE_MASK);
470
471 return 0;
472}
473
474static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size)
475{
476 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
477 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
478 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
479
480 /* no more dma descriptors available */
481 if (ch_priv->desc_cnt == ch_priv->dma_ring_size) {
482 pr_err("max number of buffers reached\n");
483 return -EINVAL;
484 }
485
486 /* get next dma descriptor */
487 dma_desc += ch_priv->desc_cnt;
488
489 /* init dma descriptor */
490 dma_desc->address = virt_to_phys(dst);
491 dma_desc->length = size;
492 dma_desc->status = 0;
493
494 /* flush cache */
495 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
496
497 /* increment dma descriptors */
498 ch_priv->desc_cnt++;
499
500 return 0;
501}
502
503static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst,
504 size_t size)
505{
506 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
507 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
508
509 /* only add new rx buffers if channel isn't running */
510 if (ch_priv->running)
511 return bcm6348_iudma_free_rcv_buf(dma, dst, size);
512 else
513 return bcm6348_iudma_add_rcv_buf(dma, dst, size);
514}
515
516static const struct dma_ops bcm6348_iudma_ops = {
517 .disable = bcm6348_iudma_disable,
518 .enable = bcm6348_iudma_enable,
519 .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf,
520 .request = bcm6348_iudma_request,
521 .receive = bcm6348_iudma_receive,
522 .send = bcm6348_iudma_send,
523};
524
525static const struct bcm6348_iudma_hw bcm6348_hw = {
526 .err_mask = (DMAD6348_ST_OV_ERR_MASK |
527 DMAD6348_ST_CRC_ERR_MASK |
528 DMAD6348_ST_RX_ERR_MASK |
529 DMAD6348_ST_OS_ERR_MASK |
530 DMAD6348_ST_UN_ERR_MASK),
531};
532
533static const struct bcm6348_iudma_hw bcm6368_hw = {
534 .err_mask = 0,
535};
536
537static const struct udevice_id bcm6348_iudma_ids[] = {
538 {
539 .compatible = "brcm,bcm6348-iudma",
540 .data = (ulong)&bcm6348_hw,
541 }, {
542 .compatible = "brcm,bcm6368-iudma",
543 .data = (ulong)&bcm6368_hw,
544 }, { /* sentinel */ }
545};
546
547static int bcm6348_iudma_probe(struct udevice *dev)
548{
549 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
550 struct bcm6348_iudma_priv *priv = dev_get_priv(dev);
551 const struct bcm6348_iudma_hw *hw =
552 (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev);
553 uint8_t ch;
554 int i;
555
556 uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM |
557 DMA_SUPPORTS_MEM_TO_DEV);
558 priv->hw = hw;
559
560 /* dma global base address */
561 priv->base = dev_remap_addr_name(dev, "dma");
562 if (!priv->base)
563 return -EINVAL;
564
565 /* dma channels base address */
566 priv->chan = dev_remap_addr_name(dev, "dma-channels");
567 if (!priv->chan)
568 return -EINVAL;
569
570 /* dma sram base address */
571 priv->sram = dev_remap_addr_name(dev, "dma-sram");
572 if (!priv->sram)
573 return -EINVAL;
574
575 /* get number of channels */
576 priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8);
577 if (priv->n_channels > DMA_CHAN_MAX)
578 return -EINVAL;
579
580 /* try to enable clocks */
581 for (i = 0; ; i++) {
582 struct clk clk;
583 int ret;
584
585 ret = clk_get_by_index(dev, i, &clk);
586 if (ret < 0)
587 break;
588
589 ret = clk_enable(&clk);
590 if (ret < 0) {
591 pr_err("error enabling clock %d\n", i);
592 return ret;
593 }
594
595 ret = clk_free(&clk);
596 if (ret < 0) {
597 pr_err("error freeing clock %d\n", i);
598 return ret;
599 }
600 }
601
602 /* try to perform resets */
603 for (i = 0; ; i++) {
604 struct reset_ctl reset;
605 int ret;
606
607 ret = reset_get_by_index(dev, i, &reset);
608 if (ret < 0)
609 break;
610
611 ret = reset_deassert(&reset);
612 if (ret < 0) {
613 pr_err("error deasserting reset %d\n", i);
614 return ret;
615 }
616
617 ret = reset_free(&reset);
618 if (ret < 0) {
619 pr_err("error freeing reset %d\n", i);
620 return ret;
621 }
622 }
623
624 /* disable dma controller */
625 clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
626
627 /* alloc channel private data pointers */
628 priv->ch_priv = calloc(priv->n_channels,
629 sizeof(struct bcm6348_chan_priv*));
630 if (!priv->ch_priv)
631 return -ENOMEM;
632
633 /* stop dma channels */
634 for (ch = 0; ch < priv->n_channels; ch++)
635 bcm6348_iudma_chan_stop(priv, ch);
636
637 /* enable dma controller */
638 setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
639
640 return 0;
641}
642
643U_BOOT_DRIVER(bcm6348_iudma) = {
644 .name = "bcm6348_iudma",
645 .id = UCLASS_DMA,
646 .of_match = bcm6348_iudma_ids,
647 .ops = &bcm6348_iudma_ops,
648 .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv),
649 .probe = bcm6348_iudma_probe,
650};