blob: 3b30992cb6e1dee0e2d8d31d794fe58b5fe27dd4 [file] [log] [blame]
Suneel Garapati8666ae82020-08-26 14:37:42 +02001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6/**
7 * Atomically adds a signed value to a 64 bit (aligned) memory location,
8 * and returns previous value.
9 *
10 * This version does not perform 'sync' operations to enforce memory
11 * operations. This should only be used when there are no memory operation
12 * ordering constraints. (This should NOT be used for reference counting -
13 * use the standard version instead.)
14 *
15 * @param ptr address in memory to add incr to
16 * @param incr amount to increment memory location by (signed)
17 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010018 * Return: Value of memory location before increment
Suneel Garapati8666ae82020-08-26 14:37:42 +020019 */
20static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr)
21{
22 s64 result;
23 /* Atomic add with no ordering */
24 asm volatile("ldadd %x[i], %x[r], [%[b]]"
25 : [r] "=r" (result), "+m" (*ptr)
26 : [i] "r" (incr), [b] "r" (ptr)
27 : "memory");
28 return result;
29}
30
31static inline void lmt_cancel(const struct nix *nix)
32{
33 writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL());
34}
35
36static inline u64 *lmt_store_ptr(struct nix *nix)
37{
38 return (u64 *)((u8 *)(nix->lmt_base) +
39 LMT_LF_LMTLINEX(0));
40}
41
42static inline s64 lmt_submit(u64 io_address)
43{
44 s64 result = 0;
45
46 asm volatile("ldeor xzr, %x[rf],[%[rs]]"
47 : [rf] "=r"(result) : [rs] "r"(io_address));
48 return result;
49}