blob: eca66198edf72dfe9bfc81e0a1f09c338fe45732 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsonfaf4f052016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsonfaf4f052016-03-28 10:05:44 -07006 */
Eric Nelsonfaf4f052016-03-28 10:05:44 -07007#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -06008#include <blk.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -070010#include <malloc.h>
11#include <part.h>
12#include <linux/ctype.h>
13#include <linux/list.h>
14
Ovidiu Panait34c25812020-07-24 14:12:24 +030015#ifdef CONFIG_NEEDS_MANUAL_RELOC
16DECLARE_GLOBAL_DATA_PTR;
17#endif
18
Eric Nelsonfaf4f052016-03-28 10:05:44 -070019struct block_cache_node {
20 struct list_head lh;
21 int iftype;
22 int devnum;
23 lbaint_t start;
24 lbaint_t blkcnt;
25 unsigned long blksz;
26 char *cache;
27};
28
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010029static LIST_HEAD(block_cache);
Eric Nelsonfaf4f052016-03-28 10:05:44 -070030
31static struct block_cache_stats _stats = {
Marek Vasut0ebd0332018-08-08 13:20:29 +020032 .max_blocks_per_entry = 8,
Eric Nelsonfaf4f052016-03-28 10:05:44 -070033 .max_entries = 32
34};
35
Ovidiu Panait34c25812020-07-24 14:12:24 +030036#ifdef CONFIG_NEEDS_MANUAL_RELOC
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010037int blkcache_init(void)
38{
Ovidiu Panait34c25812020-07-24 14:12:24 +030039 struct list_head *head = &block_cache;
40
41 head->next = (uintptr_t)head->next + gd->reloc_off;
42 head->prev = (uintptr_t)head->prev + gd->reloc_off;
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010043
44 return 0;
45}
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010046#endif
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010047
Eric Nelsonfaf4f052016-03-28 10:05:44 -070048static struct block_cache_node *cache_find(int iftype, int devnum,
49 lbaint_t start, lbaint_t blkcnt,
50 unsigned long blksz)
51{
52 struct block_cache_node *node;
53
54 list_for_each_entry(node, &block_cache, lh)
55 if ((node->iftype == iftype) &&
56 (node->devnum == devnum) &&
57 (node->blksz == blksz) &&
58 (node->start <= start) &&
59 (node->start + node->blkcnt >= start + blkcnt)) {
60 if (block_cache.next != &node->lh) {
61 /* maintain MRU ordering */
62 list_del(&node->lh);
63 list_add(&node->lh, &block_cache);
64 }
65 return node;
66 }
67 return 0;
68}
69
70int blkcache_read(int iftype, int devnum,
71 lbaint_t start, lbaint_t blkcnt,
72 unsigned long blksz, void *buffer)
73{
74 struct block_cache_node *node = cache_find(iftype, devnum, start,
75 blkcnt, blksz);
76 if (node) {
77 const char *src = node->cache + (start - node->start) * blksz;
78 memcpy(buffer, src, blksz * blkcnt);
79 debug("hit: start " LBAF ", count " LBAFU "\n",
80 start, blkcnt);
81 ++_stats.hits;
82 return 1;
83 }
84
85 debug("miss: start " LBAF ", count " LBAFU "\n",
86 start, blkcnt);
87 ++_stats.misses;
88 return 0;
89}
90
91void blkcache_fill(int iftype, int devnum,
92 lbaint_t start, lbaint_t blkcnt,
93 unsigned long blksz, void const *buffer)
94{
95 lbaint_t bytes;
96 struct block_cache_node *node;
97
98 /* don't cache big stuff */
99 if (blkcnt > _stats.max_blocks_per_entry)
100 return;
101
102 if (_stats.max_entries == 0)
103 return;
104
105 bytes = blksz * blkcnt;
106 if (_stats.max_entries <= _stats.entries) {
107 /* pop LRU */
108 node = (struct block_cache_node *)block_cache.prev;
109 list_del(&node->lh);
110 _stats.entries--;
111 debug("drop: start " LBAF ", count " LBAFU "\n",
112 node->start, node->blkcnt);
113 if (node->blkcnt * node->blksz < bytes) {
114 free(node->cache);
115 node->cache = 0;
116 }
117 } else {
118 node = malloc(sizeof(*node));
119 if (!node)
120 return;
121 node->cache = 0;
122 }
123
124 if (!node->cache) {
125 node->cache = malloc(bytes);
126 if (!node->cache) {
127 free(node);
128 return;
129 }
130 }
131
132 debug("fill: start " LBAF ", count " LBAFU "\n",
133 start, blkcnt);
134
135 node->iftype = iftype;
136 node->devnum = devnum;
137 node->start = start;
138 node->blkcnt = blkcnt;
139 node->blksz = blksz;
140 memcpy(node->cache, buffer, bytes);
141 list_add(&node->lh, &block_cache);
142 _stats.entries++;
143}
144
145void blkcache_invalidate(int iftype, int devnum)
146{
147 struct list_head *entry, *n;
148 struct block_cache_node *node;
149
150 list_for_each_safe(entry, n, &block_cache) {
151 node = (struct block_cache_node *)entry;
152 if ((node->iftype == iftype) &&
153 (node->devnum == devnum)) {
154 list_del(entry);
155 free(node->cache);
156 free(node);
157 --_stats.entries;
158 }
159 }
160}
161
162void blkcache_configure(unsigned blocks, unsigned entries)
163{
164 struct block_cache_node *node;
165 if ((blocks != _stats.max_blocks_per_entry) ||
166 (entries != _stats.max_entries)) {
167 /* invalidate cache */
168 while (!list_empty(&block_cache)) {
169 node = (struct block_cache_node *)block_cache.next;
170 list_del(&node->lh);
171 free(node->cache);
172 free(node);
173 }
174 _stats.entries = 0;
175 }
176
177 _stats.max_blocks_per_entry = blocks;
178 _stats.max_entries = entries;
179
180 _stats.hits = 0;
181 _stats.misses = 0;
182}
183
184void blkcache_stats(struct block_cache_stats *stats)
185{
186 memcpy(stats, &_stats, sizeof(*stats));
187 _stats.hits = 0;
188 _stats.misses = 0;
189}