blob: cbe7f0a4afea6ef1b0fd50a224410a033c1ffb37 [file] [log] [blame]
developer88837432019-05-02 22:01:39 +08001/*
2 * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <common/debug.h>
11#include <lib/mmio.h>
12#include <scu.h>
13#include <mcucfg.h>
14#include <drivers/delay_timer.h>
15#include <mcsi/mcsi.h>
16
17#define MAX_CLUSTERS 5
18
19static unsigned long cci_base_addr;
20static unsigned int cci_cluster_ix_to_iface[MAX_CLUSTERS];
21
22void mcsi_init(unsigned long cci_base,
23 unsigned int num_cci_masters)
24{
25 int i;
26
27 assert(cci_base);
28 assert(num_cci_masters < MAX_CLUSTERS);
29
30 cci_base_addr = cci_base;
31
32 for (i = 0; i < num_cci_masters; i++)
33 cci_cluster_ix_to_iface[i] = SLAVE_IFACE_OFFSET(i);
34}
35
36void mcsi_cache_flush(void)
37{
38 /* timeout is 10ms */
39 int timeout = 10000;
40
41 /* to make flush by SF safe, need to disable BIU DCM */
42 mmio_clrbits_32(CCI_CLK_CTRL, 1 << 8);
43 mmio_write_32(cci_base_addr + FLUSH_SF, 0x1);
44
45 for (; timeout; timeout--, udelay(1)) {
46 if ((mmio_read_32(cci_base_addr + FLUSH_SF) & 0x1) == 0x0)
47 break;
48 }
49
50 if (!timeout) {
51 INFO("SF lush timeout\n");
52 return;
53 }
54
55 /* enable BIU DCM as it was */
56 mmio_setbits_32(CCI_CLK_CTRL, 1 << 8);
57}
58
59static inline unsigned long get_slave_iface_base(unsigned long mpidr)
60{
61 /*
62 * We assume the TF topology code allocates affinity instances
63 * consecutively from zero.
64 * It is a programming error if this is called without initializing
65 * the slave interface to use for this cluster.
66 */
67 unsigned int cluster_id =
68 (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
69
70 assert(cluster_id < MAX_CLUSTERS);
71 assert(cci_cluster_ix_to_iface[cluster_id] != 0);
72
73 return cci_base_addr + cci_cluster_ix_to_iface[cluster_id];
74}
75
76void cci_enable_cluster_coherency(unsigned long mpidr)
77{
78 unsigned long slave_base;
79 unsigned int support_ability;
80 unsigned int config = 0;
81 unsigned int pending = 0;
82
83 assert(cci_base_addr);
84 slave_base = get_slave_iface_base(mpidr);
85 support_ability = mmio_read_32(slave_base);
86
87 pending = (mmio_read_32(
88 cci_base_addr + SNP_PENDING_REG)) >> SNP_PENDING;
89 while (pending) {
90 pending = (mmio_read_32(
91 cci_base_addr + SNP_PENDING_REG)) >> SNP_PENDING;
92 }
93
94 if (support_ability & SNP_SUPPORT)
95 config |= SNOOP_EN_BIT;
96 if (support_ability & DVM_SUPPORT)
97 config |= DVM_EN_BIT;
98
99 mmio_write_32(slave_base, support_ability | config);
100
101 /* Wait for the dust to settle down */
102 while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
103 ;
104}
105
106#if ERRATA_MCSIB_SW
107#pragma weak mcsib_sw_workaround_main
108#endif
109
110void cci_disable_cluster_coherency(unsigned long mpidr)
111{
112 unsigned long slave_base;
113 unsigned int config = 0;
114
115 assert(cci_base_addr);
116 slave_base = get_slave_iface_base(mpidr);
117
118 while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
119 ;
120
121 config = mmio_read_32(slave_base);
122 config &= ~(DVM_EN_BIT | SNOOP_EN_BIT);
123
124 /* Disable Snoops and DVM messages */
125 mmio_write_32(slave_base, config);
126
127#if ERRATA_MCSIB_SW
128 mcsib_sw_workaround_main();
129#endif
130
131 /* Wait for the dust to settle down */
132 while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
133 ;
134}
135
136void cci_secure_switch(unsigned int status)
137{
138 unsigned int config;
139
140 config = mmio_read_32(cci_base_addr + CENTRAL_CTRL_REG);
141 if (status == NS_ACC)
142 config |= SECURE_ACC_EN;
143 else
144 config &= ~SECURE_ACC_EN;
145 mmio_write_32(cci_base_addr + CENTRAL_CTRL_REG, config);
146}
147
148void cci_pmu_secure_switch(unsigned int status)
149{
150 unsigned int config;
151
152 config = mmio_read_32(cci_base_addr + CENTRAL_CTRL_REG);
153 if (status == NS_ACC)
154 config |= PMU_SECURE_ACC_EN;
155 else
156 config &= ~PMU_SECURE_ACC_EN;
157 mmio_write_32(cci_base_addr + CENTRAL_CTRL_REG, config);
158}
159
160void cci_init_sf(void)
161{
162 while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
163 ;
164 /* init sf1 */
165 mmio_write_32(cci_base_addr + SF_INIT_REG, TRIG_SF1_INIT);
166 while (mmio_read_32(cci_base_addr + SF_INIT_REG) & TRIG_SF1_INIT)
167 ;
168 while (!(mmio_read_32(cci_base_addr + SF_INIT_REG) & SF1_INIT_DONE))
169 ;
170 /* init sf2 */
171 mmio_write_32(cci_base_addr + SF_INIT_REG, TRIG_SF2_INIT);
172 while (mmio_read_32(cci_base_addr + SF_INIT_REG) & TRIG_SF2_INIT)
173 ;
174 while (!(mmio_read_32(cci_base_addr + SF_INIT_REG) & SF2_INIT_DONE))
175 ;
176}
177
178void cci_interrupt_en(void)
179{
180 mmio_setbits_32(cci_base_addr + CENTRAL_CTRL_REG, INT_EN);
181}
182
183unsigned long cci_reg_access(unsigned int op, unsigned long offset,
184 unsigned long val)
185{
186 unsigned long ret = 0;
187
188 if ((cci_base_addr == 0) || (offset > MSCI_MEMORY_SZ))
189 panic();
190
191 switch (op) {
192 case MCSI_REG_ACCESS_READ:
193 ret = mmio_read_32(cci_base_addr + offset);
194 break;
195 case MCSI_REG_ACCESS_WRITE:
196 mmio_write_32(cci_base_addr + offset, val);
197 dsb();
198 break;
199 case MCSI_REG_ACCESS_SET_BITMASK:
200 mmio_setbits_32(cci_base_addr + offset, val);
201 dsb();
202 break;
203 case MCSI_REG_ACCESS_CLEAR_BITMASK:
204 mmio_clrbits_32(cci_base_addr + offset, val);
205 dsb();
206 break;
207 default:
208 break;
209 }
210 return ret;
211}