blob: f99465aa4799c64d452f12d6125cb301d950eda4 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsonfaf4f052016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsonfaf4f052016-03-28 10:05:44 -07006 */
Eric Nelsonfaf4f052016-03-28 10:05:44 -07007#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -06008#include <blk.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -070010#include <malloc.h>
11#include <part.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060012#include <asm/global_data.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -070013#include <linux/ctype.h>
14#include <linux/list.h>
15
Ovidiu Panait34c25812020-07-24 14:12:24 +030016#ifdef CONFIG_NEEDS_MANUAL_RELOC
17DECLARE_GLOBAL_DATA_PTR;
18#endif
19
Eric Nelsonfaf4f052016-03-28 10:05:44 -070020struct block_cache_node {
21 struct list_head lh;
22 int iftype;
23 int devnum;
24 lbaint_t start;
25 lbaint_t blkcnt;
26 unsigned long blksz;
27 char *cache;
28};
29
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010030static LIST_HEAD(block_cache);
Eric Nelsonfaf4f052016-03-28 10:05:44 -070031
32static struct block_cache_stats _stats = {
Marek Vasut0ebd0332018-08-08 13:20:29 +020033 .max_blocks_per_entry = 8,
Eric Nelsonfaf4f052016-03-28 10:05:44 -070034 .max_entries = 32
35};
36
Ovidiu Panait34c25812020-07-24 14:12:24 +030037#ifdef CONFIG_NEEDS_MANUAL_RELOC
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010038int blkcache_init(void)
39{
Ovidiu Panait34c25812020-07-24 14:12:24 +030040 struct list_head *head = &block_cache;
41
42 head->next = (uintptr_t)head->next + gd->reloc_off;
43 head->prev = (uintptr_t)head->prev + gd->reloc_off;
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010044
45 return 0;
46}
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010047#endif
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010048
Eric Nelsonfaf4f052016-03-28 10:05:44 -070049static struct block_cache_node *cache_find(int iftype, int devnum,
50 lbaint_t start, lbaint_t blkcnt,
51 unsigned long blksz)
52{
53 struct block_cache_node *node;
54
55 list_for_each_entry(node, &block_cache, lh)
56 if ((node->iftype == iftype) &&
57 (node->devnum == devnum) &&
58 (node->blksz == blksz) &&
59 (node->start <= start) &&
60 (node->start + node->blkcnt >= start + blkcnt)) {
61 if (block_cache.next != &node->lh) {
62 /* maintain MRU ordering */
63 list_del(&node->lh);
64 list_add(&node->lh, &block_cache);
65 }
66 return node;
67 }
68 return 0;
69}
70
71int blkcache_read(int iftype, int devnum,
72 lbaint_t start, lbaint_t blkcnt,
73 unsigned long blksz, void *buffer)
74{
75 struct block_cache_node *node = cache_find(iftype, devnum, start,
76 blkcnt, blksz);
77 if (node) {
78 const char *src = node->cache + (start - node->start) * blksz;
79 memcpy(buffer, src, blksz * blkcnt);
80 debug("hit: start " LBAF ", count " LBAFU "\n",
81 start, blkcnt);
82 ++_stats.hits;
83 return 1;
84 }
85
86 debug("miss: start " LBAF ", count " LBAFU "\n",
87 start, blkcnt);
88 ++_stats.misses;
89 return 0;
90}
91
92void blkcache_fill(int iftype, int devnum,
93 lbaint_t start, lbaint_t blkcnt,
94 unsigned long blksz, void const *buffer)
95{
96 lbaint_t bytes;
97 struct block_cache_node *node;
98
99 /* don't cache big stuff */
100 if (blkcnt > _stats.max_blocks_per_entry)
101 return;
102
103 if (_stats.max_entries == 0)
104 return;
105
106 bytes = blksz * blkcnt;
107 if (_stats.max_entries <= _stats.entries) {
108 /* pop LRU */
109 node = (struct block_cache_node *)block_cache.prev;
110 list_del(&node->lh);
111 _stats.entries--;
112 debug("drop: start " LBAF ", count " LBAFU "\n",
113 node->start, node->blkcnt);
114 if (node->blkcnt * node->blksz < bytes) {
115 free(node->cache);
116 node->cache = 0;
117 }
118 } else {
119 node = malloc(sizeof(*node));
120 if (!node)
121 return;
122 node->cache = 0;
123 }
124
125 if (!node->cache) {
126 node->cache = malloc(bytes);
127 if (!node->cache) {
128 free(node);
129 return;
130 }
131 }
132
133 debug("fill: start " LBAF ", count " LBAFU "\n",
134 start, blkcnt);
135
136 node->iftype = iftype;
137 node->devnum = devnum;
138 node->start = start;
139 node->blkcnt = blkcnt;
140 node->blksz = blksz;
141 memcpy(node->cache, buffer, bytes);
142 list_add(&node->lh, &block_cache);
143 _stats.entries++;
144}
145
146void blkcache_invalidate(int iftype, int devnum)
147{
148 struct list_head *entry, *n;
149 struct block_cache_node *node;
150
151 list_for_each_safe(entry, n, &block_cache) {
152 node = (struct block_cache_node *)entry;
Simon Glass76c62692022-10-29 19:47:08 -0600153 if (iftype == -1 ||
154 (node->iftype == iftype && node->devnum == devnum)) {
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700155 list_del(entry);
156 free(node->cache);
157 free(node);
158 --_stats.entries;
159 }
160 }
161}
162
163void blkcache_configure(unsigned blocks, unsigned entries)
164{
Simon Glass76c62692022-10-29 19:47:08 -0600165 /* invalidate cache if there is a change */
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700166 if ((blocks != _stats.max_blocks_per_entry) ||
Simon Glass76c62692022-10-29 19:47:08 -0600167 (entries != _stats.max_entries))
168 blkcache_invalidate(-1, 0);
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700169
170 _stats.max_blocks_per_entry = blocks;
171 _stats.max_entries = entries;
172
173 _stats.hits = 0;
174 _stats.misses = 0;
175}
176
177void blkcache_stats(struct block_cache_stats *stats)
178{
179 memcpy(stats, &_stats, sizeof(*stats));
180 _stats.hits = 0;
181 _stats.misses = 0;
182}
Simon Glass76c62692022-10-29 19:47:08 -0600183
184void blkcache_free(void)
185{
186 blkcache_invalidate(-1, 0);
187}