blob: 8cbc771a127f949af95273c274f84e371969c84c [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -07002/*
3 * Copyright (C) 2014 Freescale Semiconductor
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -07004 */
5
6#include "qbman_private.h"
7#include <fsl-mc/fsl_qbman_portal.h>
8#include <fsl-mc/fsl_dpaa_fd.h>
9
10/* All QBMan command and result structures use this "valid bit" encoding */
11#define QB_VALID_BIT ((uint32_t)0x80)
12
13/* Management command result codes */
14#define QBMAN_MC_RSLT_OK 0xf0
15
Priyanka Jain9de71602016-12-07 12:04:05 +053016#define QBMAN_VER_4_0_DQRR_SIZE 4
17#define QBMAN_VER_4_1_DQRR_SIZE 8
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +053018
19
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -070020/* --------------------- */
21/* portal data structure */
22/* --------------------- */
23
24struct qbman_swp {
25 const struct qbman_swp_desc *desc;
26 /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
27 * needs in here. */
28 struct qbman_swp_sys sys;
29 /* Management commands */
30 struct {
31#ifdef QBMAN_CHECKING
32 enum swp_mc_check {
33 swp_mc_can_start, /* call __qbman_swp_mc_start() */
34 swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
35 swp_mc_can_poll, /* call __qbman_swp_mc_result() */
36 } check;
37#endif
38 uint32_t valid_bit; /* 0x00 or 0x80 */
39 } mc;
40 /* Push dequeues */
41 uint32_t sdq;
42 /* Volatile dequeues */
43 struct {
44 /* VDQCR supports a "1 deep pipeline", meaning that if you know
45 * the last-submitted command is already executing in the
46 * hardware (as evidenced by at least 1 valid dequeue result),
47 * you can write another dequeue command to the register, the
48 * hardware will start executing it as soon as the
49 * already-executing command terminates. (This minimises latency
50 * and stalls.) With that in mind, this "busy" variable refers
51 * to whether or not a command can be submitted, not whether or
52 * not a previously-submitted command is still executing. In
53 * other words, once proof is seen that the previously-submitted
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +053054 * command is executing, "vdq" is no longer "busy".
55 */
56 atomic_t busy;
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -070057 uint32_t valid_bit; /* 0x00 or 0x80 */
58 /* We need to determine when vdq is no longer busy. This depends
59 * on whether the "busy" (last-submitted) dequeue command is
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +053060 * targeting DQRR or main-memory, and detected is based on the
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -070061 * presence of the dequeue command's "token" showing up in
62 * dequeue entries in DQRR or main-memory (respectively). Debug
63 * builds will, when submitting vdq commands, verify that the
64 * dequeue result location is not already equal to the command's
65 * token value. */
66 struct ldpaa_dq *storage; /* NULL if DQRR */
67 uint32_t token;
68 } vdq;
69 /* DQRR */
70 struct {
71 uint32_t next_idx;
72 uint32_t valid_bit;
Priyanka Jain9de71602016-12-07 12:04:05 +053073 uint8_t dqrr_size;
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -070074 } dqrr;
75};
76
77/* -------------------------- */
78/* portal management commands */
79/* -------------------------- */
80
81/* Different management commands all use this common base layer of code to issue
82 * commands and poll for results. The first function returns a pointer to where
83 * the caller should fill in their MC command (though they should ignore the
84 * verb byte), the second function commits merges in the caller-supplied command
85 * verb (which should not include the valid-bit) and submits the command to
86 * hardware, and the third function checks for a completed response (returns
87 * non-NULL if only if the response is complete). */
88void *qbman_swp_mc_start(struct qbman_swp *p);
89void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
90void *qbman_swp_mc_result(struct qbman_swp *p);
91
92/* Wraps up submit + poll-for-result */
93static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
94 uint32_t cmd_verb)
95{
96 int loopvar;
97
98 qbman_swp_mc_submit(swp, cmd, cmd_verb);
99 DBG_POLL_START(loopvar);
100 do {
101 DBG_POLL_CHECK(loopvar);
102 cmd = qbman_swp_mc_result(swp);
103 } while (!cmd);
104 return cmd;
105}
106
107/* ------------ */
108/* qb_attr_code */
109/* ------------ */
110
111/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
112 * is either serving as a configuration command or a query result. The
113 * representation is inherently little-endian, as the indexing of the words is
114 * itself little-endian in nature and layerscape is little endian for anything
115 * that crosses a word boundary too (64-bit fields are the obvious examples).
116 */
117struct qb_attr_code {
118 unsigned int word; /* which uint32_t[] array member encodes the field */
119 unsigned int lsoffset; /* encoding offset from ls-bit */
120 unsigned int width; /* encoding width. (bool must be 1.) */
121};
122
123/* Macros to define codes */
124#define QB_CODE(a, b, c) { a, b, c}
125
126/* decode a field from a cacheline */
127static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
128 const uint32_t *cacheline)
129{
130 return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
131}
132
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +0530133
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -0700134/* encode a field to a cacheline */
135static inline void qb_attr_code_encode(const struct qb_attr_code *code,
136 uint32_t *cacheline, uint32_t val)
137{
138 cacheline[code->word] =
139 r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
140 | e32_uint32_t(code->lsoffset, code->width, val);
141}
142
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +0530143static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
144 uint64_t *cacheline, uint64_t val)
145{
146 cacheline[code->word / 2] = val;
147}
148
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -0700149/* ---------------------- */
150/* Descriptors/cachelines */
151/* ---------------------- */
152
153/* To avoid needless dynamic allocation, the driver API often gives the caller
154 * a "descriptor" type that the caller can instantiate however they like.
155 * Ultimately though, it is just a cacheline of binary storage (or something
156 * smaller when it is known that the descriptor doesn't need all 64 bytes) for
Prabhakar Kushwaha93694a62015-07-02 11:29:00 +0530157 * holding pre-formatted pieces of hardware commands. The performance-critical
Prabhakar Kushwahacfd9fbf2015-03-19 09:20:45 -0700158 * code can then copy these descriptors directly into hardware command
159 * registers more efficiently than trying to construct/format commands
160 * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
161 * order for the compiler to know its size, but the internal details are not
162 * exposed. The following macro is used within the driver for converting *any*
163 * descriptor pointer to a usable array pointer. The use of a macro (instead of
164 * an inline) is necessary to work with different descriptor types and to work
165 * correctly with const and non-const inputs (and similarly-qualified outputs).
166 */
167#define qb_cl(d) (&(d)->dont_manipulate_directly[0])