blob: b6fc72fe983e0ac01a506de718675314440ff581 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsonfaf4f052016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsonfaf4f052016-03-28 10:05:44 -07006 */
Eric Nelsonfaf4f052016-03-28 10:05:44 -07007#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -06008#include <blk.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Eric Nelsonfaf4f052016-03-28 10:05:44 -070010#include <malloc.h>
11#include <part.h>
12#include <linux/ctype.h>
13#include <linux/list.h>
14
15struct block_cache_node {
16 struct list_head lh;
17 int iftype;
18 int devnum;
19 lbaint_t start;
20 lbaint_t blkcnt;
21 unsigned long blksz;
22 char *cache;
23};
24
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010025#ifndef CONFIG_M68K
26static LIST_HEAD(block_cache);
27#else
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010028static struct list_head block_cache;
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010029#endif
Eric Nelsonfaf4f052016-03-28 10:05:44 -070030
31static struct block_cache_stats _stats = {
Marek Vasut0ebd0332018-08-08 13:20:29 +020032 .max_blocks_per_entry = 8,
Eric Nelsonfaf4f052016-03-28 10:05:44 -070033 .max_entries = 32
34};
35
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010036#ifdef CONFIG_M68K
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010037int blkcache_init(void)
38{
39 INIT_LIST_HEAD(&block_cache);
40
41 return 0;
42}
Angelo Durgehello2c54cf52020-01-26 19:31:22 +010043#endif
Angelo Durgehello3d8e4c12020-01-21 10:37:27 +010044
Eric Nelsonfaf4f052016-03-28 10:05:44 -070045static struct block_cache_node *cache_find(int iftype, int devnum,
46 lbaint_t start, lbaint_t blkcnt,
47 unsigned long blksz)
48{
49 struct block_cache_node *node;
50
51 list_for_each_entry(node, &block_cache, lh)
52 if ((node->iftype == iftype) &&
53 (node->devnum == devnum) &&
54 (node->blksz == blksz) &&
55 (node->start <= start) &&
56 (node->start + node->blkcnt >= start + blkcnt)) {
57 if (block_cache.next != &node->lh) {
58 /* maintain MRU ordering */
59 list_del(&node->lh);
60 list_add(&node->lh, &block_cache);
61 }
62 return node;
63 }
64 return 0;
65}
66
67int blkcache_read(int iftype, int devnum,
68 lbaint_t start, lbaint_t blkcnt,
69 unsigned long blksz, void *buffer)
70{
71 struct block_cache_node *node = cache_find(iftype, devnum, start,
72 blkcnt, blksz);
73 if (node) {
74 const char *src = node->cache + (start - node->start) * blksz;
75 memcpy(buffer, src, blksz * blkcnt);
76 debug("hit: start " LBAF ", count " LBAFU "\n",
77 start, blkcnt);
78 ++_stats.hits;
79 return 1;
80 }
81
82 debug("miss: start " LBAF ", count " LBAFU "\n",
83 start, blkcnt);
84 ++_stats.misses;
85 return 0;
86}
87
88void blkcache_fill(int iftype, int devnum,
89 lbaint_t start, lbaint_t blkcnt,
90 unsigned long blksz, void const *buffer)
91{
92 lbaint_t bytes;
93 struct block_cache_node *node;
94
95 /* don't cache big stuff */
96 if (blkcnt > _stats.max_blocks_per_entry)
97 return;
98
99 if (_stats.max_entries == 0)
100 return;
101
102 bytes = blksz * blkcnt;
103 if (_stats.max_entries <= _stats.entries) {
104 /* pop LRU */
105 node = (struct block_cache_node *)block_cache.prev;
106 list_del(&node->lh);
107 _stats.entries--;
108 debug("drop: start " LBAF ", count " LBAFU "\n",
109 node->start, node->blkcnt);
110 if (node->blkcnt * node->blksz < bytes) {
111 free(node->cache);
112 node->cache = 0;
113 }
114 } else {
115 node = malloc(sizeof(*node));
116 if (!node)
117 return;
118 node->cache = 0;
119 }
120
121 if (!node->cache) {
122 node->cache = malloc(bytes);
123 if (!node->cache) {
124 free(node);
125 return;
126 }
127 }
128
129 debug("fill: start " LBAF ", count " LBAFU "\n",
130 start, blkcnt);
131
132 node->iftype = iftype;
133 node->devnum = devnum;
134 node->start = start;
135 node->blkcnt = blkcnt;
136 node->blksz = blksz;
137 memcpy(node->cache, buffer, bytes);
138 list_add(&node->lh, &block_cache);
139 _stats.entries++;
140}
141
142void blkcache_invalidate(int iftype, int devnum)
143{
144 struct list_head *entry, *n;
145 struct block_cache_node *node;
146
147 list_for_each_safe(entry, n, &block_cache) {
148 node = (struct block_cache_node *)entry;
149 if ((node->iftype == iftype) &&
150 (node->devnum == devnum)) {
151 list_del(entry);
152 free(node->cache);
153 free(node);
154 --_stats.entries;
155 }
156 }
157}
158
159void blkcache_configure(unsigned blocks, unsigned entries)
160{
161 struct block_cache_node *node;
162 if ((blocks != _stats.max_blocks_per_entry) ||
163 (entries != _stats.max_entries)) {
164 /* invalidate cache */
165 while (!list_empty(&block_cache)) {
166 node = (struct block_cache_node *)block_cache.next;
167 list_del(&node->lh);
168 free(node->cache);
169 free(node);
170 }
171 _stats.entries = 0;
172 }
173
174 _stats.max_blocks_per_entry = blocks;
175 _stats.max_entries = entries;
176
177 _stats.hits = 0;
178 _stats.misses = 0;
179}
180
181void blkcache_stats(struct block_cache_stats *stats)
182{
183 memcpy(stats, &_stats, sizeof(*stats));
184 _stats.hits = 0;
185 _stats.misses = 0;
186}