blob: 0e69160249c7f12e7c8484f3c4dec620836c2df4 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsonfaf4f052016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsonfaf4f052016-03-28 10:05:44 -07006 */
Simon Glass655306c2020-05-10 11:39:58 -06007#include <blk.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -07009#include <malloc.h>
10#include <part.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060011#include <asm/global_data.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -070012#include <linux/ctype.h>
13#include <linux/list.h>
14
15struct block_cache_node {
16 struct list_head lh;
17 int iftype;
18 int devnum;
19 lbaint_t start;
20 lbaint_t blkcnt;
21 unsigned long blksz;
22 char *cache;
23};
24
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010025static LIST_HEAD(block_cache);
Eric Nelsonfaf4f052016-03-28 10:05:44 -070026
27static struct block_cache_stats _stats = {
Marek Vasut0ebd0332018-08-08 13:20:29 +020028 .max_blocks_per_entry = 8,
Eric Nelsonfaf4f052016-03-28 10:05:44 -070029 .max_entries = 32
30};
31
Eric Nelsonfaf4f052016-03-28 10:05:44 -070032static struct block_cache_node *cache_find(int iftype, int devnum,
33 lbaint_t start, lbaint_t blkcnt,
34 unsigned long blksz)
35{
36 struct block_cache_node *node;
37
38 list_for_each_entry(node, &block_cache, lh)
39 if ((node->iftype == iftype) &&
40 (node->devnum == devnum) &&
41 (node->blksz == blksz) &&
42 (node->start <= start) &&
43 (node->start + node->blkcnt >= start + blkcnt)) {
44 if (block_cache.next != &node->lh) {
45 /* maintain MRU ordering */
46 list_del(&node->lh);
47 list_add(&node->lh, &block_cache);
48 }
49 return node;
50 }
51 return 0;
52}
53
54int blkcache_read(int iftype, int devnum,
55 lbaint_t start, lbaint_t blkcnt,
56 unsigned long blksz, void *buffer)
57{
58 struct block_cache_node *node = cache_find(iftype, devnum, start,
59 blkcnt, blksz);
60 if (node) {
61 const char *src = node->cache + (start - node->start) * blksz;
62 memcpy(buffer, src, blksz * blkcnt);
63 debug("hit: start " LBAF ", count " LBAFU "\n",
64 start, blkcnt);
65 ++_stats.hits;
66 return 1;
67 }
68
69 debug("miss: start " LBAF ", count " LBAFU "\n",
70 start, blkcnt);
71 ++_stats.misses;
72 return 0;
73}
74
75void blkcache_fill(int iftype, int devnum,
76 lbaint_t start, lbaint_t blkcnt,
77 unsigned long blksz, void const *buffer)
78{
79 lbaint_t bytes;
80 struct block_cache_node *node;
81
82 /* don't cache big stuff */
83 if (blkcnt > _stats.max_blocks_per_entry)
84 return;
85
86 if (_stats.max_entries == 0)
87 return;
88
89 bytes = blksz * blkcnt;
90 if (_stats.max_entries <= _stats.entries) {
91 /* pop LRU */
92 node = (struct block_cache_node *)block_cache.prev;
93 list_del(&node->lh);
94 _stats.entries--;
95 debug("drop: start " LBAF ", count " LBAFU "\n",
96 node->start, node->blkcnt);
97 if (node->blkcnt * node->blksz < bytes) {
98 free(node->cache);
99 node->cache = 0;
100 }
101 } else {
102 node = malloc(sizeof(*node));
103 if (!node)
104 return;
105 node->cache = 0;
106 }
107
108 if (!node->cache) {
109 node->cache = malloc(bytes);
110 if (!node->cache) {
111 free(node);
112 return;
113 }
114 }
115
116 debug("fill: start " LBAF ", count " LBAFU "\n",
117 start, blkcnt);
118
119 node->iftype = iftype;
120 node->devnum = devnum;
121 node->start = start;
122 node->blkcnt = blkcnt;
123 node->blksz = blksz;
124 memcpy(node->cache, buffer, bytes);
125 list_add(&node->lh, &block_cache);
126 _stats.entries++;
127}
128
129void blkcache_invalidate(int iftype, int devnum)
130{
131 struct list_head *entry, *n;
132 struct block_cache_node *node;
133
134 list_for_each_safe(entry, n, &block_cache) {
135 node = (struct block_cache_node *)entry;
Simon Glass76c62692022-10-29 19:47:08 -0600136 if (iftype == -1 ||
137 (node->iftype == iftype && node->devnum == devnum)) {
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700138 list_del(entry);
139 free(node->cache);
140 free(node);
141 --_stats.entries;
142 }
143 }
144}
145
146void blkcache_configure(unsigned blocks, unsigned entries)
147{
Simon Glass76c62692022-10-29 19:47:08 -0600148 /* invalidate cache if there is a change */
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700149 if ((blocks != _stats.max_blocks_per_entry) ||
Simon Glass76c62692022-10-29 19:47:08 -0600150 (entries != _stats.max_entries))
151 blkcache_invalidate(-1, 0);
Eric Nelsonfaf4f052016-03-28 10:05:44 -0700152
153 _stats.max_blocks_per_entry = blocks;
154 _stats.max_entries = entries;
155
156 _stats.hits = 0;
157 _stats.misses = 0;
158}
159
160void blkcache_stats(struct block_cache_stats *stats)
161{
162 memcpy(stats, &_stats, sizeof(*stats));
163 _stats.hits = 0;
164 _stats.misses = 0;
165}
Simon Glass76c62692022-10-29 19:47:08 -0600166
167void blkcache_free(void)
168{
169 blkcache_invalidate(-1, 0);
170}