blob: 66c1276f4b876cbe90ec0c8e6b7176d5d711bf67 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warrenadf3abd2016-07-18 12:17:11 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
Stephen Warrenadf3abd2016-07-18 12:17:11 -06004 */
5
6#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07007#include <cpu_func.h>
Stephen Warrenadf3abd2016-07-18 12:17:11 -06008#include <asm/io.h>
9#include <asm/arch-tegra/ivc.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060010#include <linux/bug.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060011#include <linux/printk.h>
Stephen Warrenadf3abd2016-07-18 12:17:11 -060012
13#define TEGRA_IVC_ALIGN 64
14
15/*
16 * IVC channel reset protocol.
17 *
18 * Each end uses its tx_channel.state to indicate its synchronization state.
19 */
20enum ivc_state {
21 /*
22 * This value is zero for backwards compatibility with services that
23 * assume channels to be initially zeroed. Such channels are in an
24 * initially valid state, but cannot be asynchronously reset, and must
25 * maintain a valid state at all times.
26 *
27 * The transmitting end can enter the established state from the sync or
28 * ack state when it observes the receiving endpoint in the ack or
29 * established state, indicating that has cleared the counters in our
30 * rx_channel.
31 */
32 ivc_state_established = 0,
33
34 /*
35 * If an endpoint is observed in the sync state, the remote endpoint is
36 * allowed to clear the counters it owns asynchronously with respect to
37 * the current endpoint. Therefore, the current endpoint is no longer
38 * allowed to communicate.
39 */
40 ivc_state_sync,
41
42 /*
43 * When the transmitting end observes the receiving end in the sync
44 * state, it can clear the w_count and r_count and transition to the ack
45 * state. If the remote endpoint observes us in the ack state, it can
46 * return to the established state once it has cleared its counters.
47 */
48 ivc_state_ack
49};
50
51/*
52 * This structure is divided into two-cache aligned parts, the first is only
53 * written through the tx_channel pointer, while the second is only written
54 * through the rx_channel pointer. This delineates ownership of the cache lines,
55 * which is critical to performance and necessary in non-cache coherent
56 * implementations.
57 */
58struct tegra_ivc_channel_header {
59 union {
60 /* fields owned by the transmitting end */
61 struct {
62 uint32_t w_count;
63 uint32_t state;
64 };
65 uint8_t w_align[TEGRA_IVC_ALIGN];
66 };
67 union {
68 /* fields owned by the receiving end */
69 uint32_t r_count;
70 uint8_t r_align[TEGRA_IVC_ALIGN];
71 };
72};
73
74static inline void tegra_ivc_invalidate_counter(struct tegra_ivc *ivc,
75 struct tegra_ivc_channel_header *h,
76 ulong offset)
77{
78 ulong base = ((ulong)h) + offset;
79 invalidate_dcache_range(base, base + TEGRA_IVC_ALIGN);
80}
81
82static inline void tegra_ivc_flush_counter(struct tegra_ivc *ivc,
83 struct tegra_ivc_channel_header *h,
84 ulong offset)
85{
86 ulong base = ((ulong)h) + offset;
87 flush_dcache_range(base, base + TEGRA_IVC_ALIGN);
88}
89
90static inline ulong tegra_ivc_frame_addr(struct tegra_ivc *ivc,
91 struct tegra_ivc_channel_header *h,
92 uint32_t frame)
93{
94 BUG_ON(frame >= ivc->nframes);
95
96 return ((ulong)h) + sizeof(struct tegra_ivc_channel_header) +
97 (ivc->frame_size * frame);
98}
99
100static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc,
101 struct tegra_ivc_channel_header *ch,
102 uint32_t frame)
103{
104 return (void *)tegra_ivc_frame_addr(ivc, ch, frame);
105}
106
107static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
108 struct tegra_ivc_channel_header *h,
109 unsigned frame)
110{
111 ulong base = tegra_ivc_frame_addr(ivc, h, frame);
112 invalidate_dcache_range(base, base + ivc->frame_size);
113}
114
115static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
116 struct tegra_ivc_channel_header *h,
117 unsigned frame)
118{
119 ulong base = tegra_ivc_frame_addr(ivc, h, frame);
120 flush_dcache_range(base, base + ivc->frame_size);
121}
122
123static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc,
124 struct tegra_ivc_channel_header *ch)
125{
126 /*
127 * This function performs multiple checks on the same values with
Tom Rini52a31c12020-05-14 08:30:03 -0400128 * security implications, so create snapshots with READ_ONCE() to
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600129 * ensure that these checks use the same values.
130 */
Tom Rini52a31c12020-05-14 08:30:03 -0400131 uint32_t w_count = READ_ONCE(ch->w_count);
132 uint32_t r_count = READ_ONCE(ch->r_count);
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600133
134 /*
135 * Perform an over-full check to prevent denial of service attacks where
136 * a server could be easily fooled into believing that there's an
137 * extremely large number of frames ready, since receivers are not
138 * expected to check for full or over-full conditions.
139 *
140 * Although the channel isn't empty, this is an invalid case caused by
141 * a potentially malicious peer, so returning empty is safer, because it
142 * gives the impression that the channel has gone silent.
143 */
144 if (w_count - r_count > ivc->nframes)
145 return 1;
146
147 return w_count == r_count;
148}
149
150static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc,
151 struct tegra_ivc_channel_header *ch)
152{
153 /*
154 * Invalid cases where the counters indicate that the queue is over
155 * capacity also appear full.
156 */
Tom Rini52a31c12020-05-14 08:30:03 -0400157 return (READ_ONCE(ch->w_count) - READ_ONCE(ch->r_count)) >=
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600158 ivc->nframes;
159}
160
161static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
162{
Tom Rini52a31c12020-05-14 08:30:03 -0400163 WRITE_ONCE(ivc->rx_channel->r_count,
164 READ_ONCE(ivc->rx_channel->r_count) + 1);
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600165
166 if (ivc->r_pos == ivc->nframes - 1)
167 ivc->r_pos = 0;
168 else
169 ivc->r_pos++;
170}
171
172static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
173{
Tom Rini52a31c12020-05-14 08:30:03 -0400174 WRITE_ONCE(ivc->tx_channel->w_count,
175 READ_ONCE(ivc->tx_channel->w_count) + 1);
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600176
177 if (ivc->w_pos == ivc->nframes - 1)
178 ivc->w_pos = 0;
179 else
180 ivc->w_pos++;
181}
182
183static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
184{
185 ulong offset;
186
187 /*
188 * tx_channel->state is set locally, so it is not synchronized with
189 * state from the remote peer. The remote peer cannot reset its
190 * transmit counters until we've acknowledged its synchronization
191 * request, so no additional synchronization is required because an
192 * asynchronous transition of rx_channel->state to ivc_state_ack is not
193 * allowed.
194 */
195 if (ivc->tx_channel->state != ivc_state_established)
196 return -ECONNRESET;
197
198 /*
199 * Avoid unnecessary invalidations when performing repeated accesses to
200 * an IVC channel by checking the old queue pointers first.
201 * Synchronization is only necessary when these pointers indicate empty
202 * or full.
203 */
204 if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel))
205 return 0;
206
207 offset = offsetof(struct tegra_ivc_channel_header, w_count);
208 tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
209 return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
210}
211
212static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
213{
214 ulong offset;
215
216 if (ivc->tx_channel->state != ivc_state_established)
217 return -ECONNRESET;
218
219 if (!tegra_ivc_channel_full(ivc, ivc->tx_channel))
220 return 0;
221
222 offset = offsetof(struct tegra_ivc_channel_header, r_count);
223 tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
224 return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
225}
226
227static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc,
228 struct tegra_ivc_channel_header *ch)
229{
230 /*
231 * This function isn't expected to be used in scenarios where an
232 * over-full situation can lead to denial of service attacks. See the
233 * comment in tegra_ivc_channel_empty() for an explanation about
234 * special over-full considerations.
235 */
Tom Rini52a31c12020-05-14 08:30:03 -0400236 return READ_ONCE(ch->w_count) - READ_ONCE(ch->r_count);
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600237}
238
239int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame)
240{
241 int result = tegra_ivc_check_read(ivc);
242 if (result < 0)
243 return result;
244
245 /*
246 * Order observation of w_pos potentially indicating new data before
247 * data read.
248 */
249 mb();
250
251 tegra_ivc_invalidate_frame(ivc, ivc->rx_channel, ivc->r_pos);
252 *frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
253
254 return 0;
255}
256
257int tegra_ivc_read_advance(struct tegra_ivc *ivc)
258{
259 ulong offset;
260 int result;
261
262 /*
263 * No read barriers or synchronization here: the caller is expected to
264 * have already observed the channel non-empty. This check is just to
265 * catch programming errors.
266 */
267 result = tegra_ivc_check_read(ivc);
268 if (result)
269 return result;
270
271 tegra_ivc_advance_rx(ivc);
272 offset = offsetof(struct tegra_ivc_channel_header, r_count);
273 tegra_ivc_flush_counter(ivc, ivc->rx_channel, offset);
274
275 /*
276 * Ensure our write to r_pos occurs before our read from w_pos.
277 */
278 mb();
279
280 offset = offsetof(struct tegra_ivc_channel_header, w_count);
281 tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
282
283 if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) ==
284 ivc->nframes - 1)
285 ivc->notify(ivc);
286
287 return 0;
288}
289
290int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame)
291{
292 int result = tegra_ivc_check_write(ivc);
293 if (result)
294 return result;
295
296 *frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
297
298 return 0;
299}
300
301int tegra_ivc_write_advance(struct tegra_ivc *ivc)
302{
303 ulong offset;
304 int result;
305
306 result = tegra_ivc_check_write(ivc);
307 if (result)
308 return result;
309
310 tegra_ivc_flush_frame(ivc, ivc->tx_channel, ivc->w_pos);
311
312 /*
313 * Order any possible stores to the frame before update of w_pos.
314 */
315 mb();
316
317 tegra_ivc_advance_tx(ivc);
318 offset = offsetof(struct tegra_ivc_channel_header, w_count);
319 tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
320
321 /*
322 * Ensure our write to w_pos occurs before our read from r_pos.
323 */
324 mb();
325
326 offset = offsetof(struct tegra_ivc_channel_header, r_count);
327 tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
328
329 if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1)
330 ivc->notify(ivc);
331
332 return 0;
333}
334
335/*
336 * ===============================================================
337 * IVC State Transition Table - see tegra_ivc_channel_notified()
338 * ===============================================================
339 *
340 * local remote action
341 * ----- ------ -----------------------------------
342 * SYNC EST <none>
343 * SYNC ACK reset counters; move to EST; notify
344 * SYNC SYNC reset counters; move to ACK; notify
345 * ACK EST move to EST; notify
346 * ACK ACK move to EST; notify
347 * ACK SYNC reset counters; move to ACK; notify
348 * EST EST <none>
349 * EST ACK <none>
350 * EST SYNC reset counters; move to ACK; notify
351 *
352 * ===============================================================
353 */
354int tegra_ivc_channel_notified(struct tegra_ivc *ivc)
355{
356 ulong offset;
357 enum ivc_state peer_state;
358
359 /* Copy the receiver's state out of shared memory. */
360 offset = offsetof(struct tegra_ivc_channel_header, w_count);
361 tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
Tom Rini52a31c12020-05-14 08:30:03 -0400362 peer_state = READ_ONCE(ivc->rx_channel->state);
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600363
364 if (peer_state == ivc_state_sync) {
365 /*
366 * Order observation of ivc_state_sync before stores clearing
367 * tx_channel.
368 */
369 mb();
370
371 /*
372 * Reset tx_channel counters. The remote end is in the SYNC
373 * state and won't make progress until we change our state,
374 * so the counters are not in use at this time.
375 */
376 ivc->tx_channel->w_count = 0;
377 ivc->rx_channel->r_count = 0;
378
379 ivc->w_pos = 0;
380 ivc->r_pos = 0;
381
382 /*
383 * Ensure that counters appear cleared before new state can be
384 * observed.
385 */
386 mb();
387
388 /*
389 * Move to ACK state. We have just cleared our counters, so it
390 * is now safe for the remote end to start using these values.
391 */
392 ivc->tx_channel->state = ivc_state_ack;
393 offset = offsetof(struct tegra_ivc_channel_header, w_count);
394 tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
395
396 /*
397 * Notify remote end to observe state transition.
398 */
399 ivc->notify(ivc);
400 } else if (ivc->tx_channel->state == ivc_state_sync &&
401 peer_state == ivc_state_ack) {
402 /*
403 * Order observation of ivc_state_sync before stores clearing
404 * tx_channel.
405 */
406 mb();
407
408 /*
409 * Reset tx_channel counters. The remote end is in the ACK
410 * state and won't make progress until we change our state,
411 * so the counters are not in use at this time.
412 */
413 ivc->tx_channel->w_count = 0;
414 ivc->rx_channel->r_count = 0;
415
416 ivc->w_pos = 0;
417 ivc->r_pos = 0;
418
419 /*
420 * Ensure that counters appear cleared before new state can be
421 * observed.
422 */
423 mb();
424
425 /*
426 * Move to ESTABLISHED state. We know that the remote end has
427 * already cleared its counters, so it is safe to start
428 * writing/reading on this channel.
429 */
430 ivc->tx_channel->state = ivc_state_established;
431 offset = offsetof(struct tegra_ivc_channel_header, w_count);
432 tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
433
434 /*
435 * Notify remote end to observe state transition.
436 */
437 ivc->notify(ivc);
438 } else if (ivc->tx_channel->state == ivc_state_ack) {
439 /*
440 * At this point, we have observed the peer to be in either
441 * the ACK or ESTABLISHED state. Next, order observation of
442 * peer state before storing to tx_channel.
443 */
444 mb();
445
446 /*
447 * Move to ESTABLISHED state. We know that we have previously
448 * cleared our counters, and we know that the remote end has
449 * cleared its counters, so it is safe to start writing/reading
450 * on this channel.
451 */
452 ivc->tx_channel->state = ivc_state_established;
453 offset = offsetof(struct tegra_ivc_channel_header, w_count);
454 tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
455
456 /*
457 * Notify remote end to observe state transition.
458 */
459 ivc->notify(ivc);
460 } else {
461 /*
462 * There is no need to handle any further action. Either the
463 * channel is already fully established, or we are waiting for
464 * the remote end to catch up with our current state. Refer
465 * to the diagram in "IVC State Transition Table" above.
466 */
467 }
468
469 if (ivc->tx_channel->state != ivc_state_established)
470 return -EAGAIN;
471
472 return 0;
473}
474
475void tegra_ivc_channel_reset(struct tegra_ivc *ivc)
476{
477 ulong offset;
478
479 ivc->tx_channel->state = ivc_state_sync;
480 offset = offsetof(struct tegra_ivc_channel_header, w_count);
481 tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
482 ivc->notify(ivc);
483}
484
485static int check_ivc_params(ulong qbase1, ulong qbase2, uint32_t nframes,
486 uint32_t frame_size)
487{
488 int ret = 0;
489
490 BUG_ON(offsetof(struct tegra_ivc_channel_header, w_count) &
491 (TEGRA_IVC_ALIGN - 1));
492 BUG_ON(offsetof(struct tegra_ivc_channel_header, r_count) &
493 (TEGRA_IVC_ALIGN - 1));
494 BUG_ON(sizeof(struct tegra_ivc_channel_header) &
495 (TEGRA_IVC_ALIGN - 1));
496
497 if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900498 pr_err("tegra_ivc: nframes * frame_size overflows\n");
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600499 return -EINVAL;
500 }
501
502 /*
503 * The headers must at least be aligned enough for counters
504 * to be accessed atomically.
505 */
506 if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) ||
507 (qbase2 & (TEGRA_IVC_ALIGN - 1))) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900508 pr_err("tegra_ivc: channel start not aligned\n");
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600509 return -EINVAL;
510 }
511
512 if (frame_size & (TEGRA_IVC_ALIGN - 1)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900513 pr_err("tegra_ivc: frame size not adequately aligned\n");
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600514 return -EINVAL;
515 }
516
517 if (qbase1 < qbase2) {
518 if (qbase1 + frame_size * nframes > qbase2)
519 ret = -EINVAL;
520 } else {
521 if (qbase2 + frame_size * nframes > qbase1)
522 ret = -EINVAL;
523 }
524
525 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900526 pr_err("tegra_ivc: queue regions overlap\n");
Stephen Warrenadf3abd2016-07-18 12:17:11 -0600527 return ret;
528 }
529
530 return 0;
531}
532
533int tegra_ivc_init(struct tegra_ivc *ivc, ulong rx_base, ulong tx_base,
534 uint32_t nframes, uint32_t frame_size,
535 void (*notify)(struct tegra_ivc *))
536{
537 int ret;
538
539 if (!ivc)
540 return -EINVAL;
541
542 ret = check_ivc_params(rx_base, tx_base, nframes, frame_size);
543 if (ret)
544 return ret;
545
546 ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base;
547 ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base;
548 ivc->w_pos = 0;
549 ivc->r_pos = 0;
550 ivc->nframes = nframes;
551 ivc->frame_size = frame_size;
552 ivc->notify = notify;
553
554 return 0;
555}