drivers: block: add block device cache
Add a block device cache to speed up repeated reads of block devices by
various filesystems.
This small amount of cache can dramatically speed up filesystem
operations by skipping repeated reads of common areas of a block
device (typically directory structures).
This has shown to have some benefit on FAT filesystem operations of
loading a kernel and RAM disk, but more dramatic benefits on ext4
filesystems when the kernel and/or RAM disk are spread across
multiple extent header structures as described in commit fc0fc50.
The cache is implemented through a minimal list (block_cache) maintained
in most-recently-used order and count of the current number of entries
(cache_count). It uses a maximum block count setting to prevent copies
of large block reads and an upper bound on the number of cached areas.
The maximum number of entries in the cache defaults to 32 and the maximum
number of blocks per cache entry has a default of 2, which has shown to
produce the best results on testing of ext4 and FAT filesystems.
The 'blkcache' command (enabled through CONFIG_CMD_BLOCK_CACHE) allows
changing these values and can be used to tune for a particular filesystem
layout.
Signed-off-by: Eric Nelson <eric@nelint.com>
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f35c4d4..fcc9ccd 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -18,3 +18,12 @@
types can use this, such as AHCI/SATA. It does not provide any standard
operations at present. The block device interface has not been converted
to driver model.
+
+config BLOCK_CACHE
+ bool "Use block device cache"
+ default n
+ help
+ This option enables a disk-block cache for all block devices.
+ This is most useful when accessing filesystems under U-Boot since
+ it will prevent repeated reads from directory structures and other
+ filesystem data structures.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index b5c7ae1..b4cbb09 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -24,3 +24,4 @@
obj-$(CONFIG_SANDBOX) += sandbox.o
obj-$(CONFIG_SCSI_SYM53C8XX) += sym53c8xx.o
obj-$(CONFIG_SYSTEMACE) += systemace.o
+obj-$(CONFIG_BLOCK_CACHE) += blkcache.o
diff --git a/drivers/block/blk-uclass.c b/drivers/block/blk-uclass.c
index 49df2a6..617db22 100644
--- a/drivers/block/blk-uclass.c
+++ b/drivers/block/blk-uclass.c
@@ -80,11 +80,20 @@
{
struct udevice *dev = block_dev->bdev;
const struct blk_ops *ops = blk_get_ops(dev);
+ ulong blks_read;
if (!ops->read)
return -ENOSYS;
- return ops->read(dev, start, blkcnt, buffer);
+ if (blkcache_read(block_dev->if_type, block_dev->devnum,
+ start, blkcnt, block_dev->blksz, buffer))
+ return blkcnt;
+ blks_read = ops->read(dev, start, blkcnt, buffer);
+ if (blks_read == blkcnt)
+ blkcache_fill(block_dev->if_type, block_dev->devnum,
+ start, blkcnt, block_dev->blksz, buffer);
+
+ return blks_read;
}
unsigned long blk_dwrite(struct blk_desc *block_dev, lbaint_t start,
@@ -96,6 +105,7 @@
if (!ops->write)
return -ENOSYS;
+ blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return ops->write(dev, start, blkcnt, buffer);
}
@@ -108,6 +118,7 @@
if (!ops->erase)
return -ENOSYS;
+ blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return ops->erase(dev, start, blkcnt);
}
diff --git a/drivers/block/blkcache.c b/drivers/block/blkcache.c
new file mode 100644
index 0000000..46a6059
--- /dev/null
+++ b/drivers/block/blkcache.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) Nelson Integration, LLC 2016
+ * Author: Eric Nelson<eric@nelint.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ *
+ */
+#include <config.h>
+#include <common.h>
+#include <malloc.h>
+#include <part.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+
+struct block_cache_node {
+ struct list_head lh;
+ int iftype;
+ int devnum;
+ lbaint_t start;
+ lbaint_t blkcnt;
+ unsigned long blksz;
+ char *cache;
+};
+
+static LIST_HEAD(block_cache);
+
+static struct block_cache_stats _stats = {
+ .max_blocks_per_entry = 2,
+ .max_entries = 32
+};
+
+static struct block_cache_node *cache_find(int iftype, int devnum,
+ lbaint_t start, lbaint_t blkcnt,
+ unsigned long blksz)
+{
+ struct block_cache_node *node;
+
+ list_for_each_entry(node, &block_cache, lh)
+ if ((node->iftype == iftype) &&
+ (node->devnum == devnum) &&
+ (node->blksz == blksz) &&
+ (node->start <= start) &&
+ (node->start + node->blkcnt >= start + blkcnt)) {
+ if (block_cache.next != &node->lh) {
+ /* maintain MRU ordering */
+ list_del(&node->lh);
+ list_add(&node->lh, &block_cache);
+ }
+ return node;
+ }
+ return 0;
+}
+
+int blkcache_read(int iftype, int devnum,
+ lbaint_t start, lbaint_t blkcnt,
+ unsigned long blksz, void *buffer)
+{
+ struct block_cache_node *node = cache_find(iftype, devnum, start,
+ blkcnt, blksz);
+ if (node) {
+ const char *src = node->cache + (start - node->start) * blksz;
+ memcpy(buffer, src, blksz * blkcnt);
+ debug("hit: start " LBAF ", count " LBAFU "\n",
+ start, blkcnt);
+ ++_stats.hits;
+ return 1;
+ }
+
+ debug("miss: start " LBAF ", count " LBAFU "\n",
+ start, blkcnt);
+ ++_stats.misses;
+ return 0;
+}
+
+void blkcache_fill(int iftype, int devnum,
+ lbaint_t start, lbaint_t blkcnt,
+ unsigned long blksz, void const *buffer)
+{
+ lbaint_t bytes;
+ struct block_cache_node *node;
+
+ /* don't cache big stuff */
+ if (blkcnt > _stats.max_blocks_per_entry)
+ return;
+
+ if (_stats.max_entries == 0)
+ return;
+
+ bytes = blksz * blkcnt;
+ if (_stats.max_entries <= _stats.entries) {
+ /* pop LRU */
+ node = (struct block_cache_node *)block_cache.prev;
+ list_del(&node->lh);
+ _stats.entries--;
+ debug("drop: start " LBAF ", count " LBAFU "\n",
+ node->start, node->blkcnt);
+ if (node->blkcnt * node->blksz < bytes) {
+ free(node->cache);
+ node->cache = 0;
+ }
+ } else {
+ node = malloc(sizeof(*node));
+ if (!node)
+ return;
+ node->cache = 0;
+ }
+
+ if (!node->cache) {
+ node->cache = malloc(bytes);
+ if (!node->cache) {
+ free(node);
+ return;
+ }
+ }
+
+ debug("fill: start " LBAF ", count " LBAFU "\n",
+ start, blkcnt);
+
+ node->iftype = iftype;
+ node->devnum = devnum;
+ node->start = start;
+ node->blkcnt = blkcnt;
+ node->blksz = blksz;
+ memcpy(node->cache, buffer, bytes);
+ list_add(&node->lh, &block_cache);
+ _stats.entries++;
+}
+
+void blkcache_invalidate(int iftype, int devnum)
+{
+ struct list_head *entry, *n;
+ struct block_cache_node *node;
+
+ list_for_each_safe(entry, n, &block_cache) {
+ node = (struct block_cache_node *)entry;
+ if ((node->iftype == iftype) &&
+ (node->devnum == devnum)) {
+ list_del(entry);
+ free(node->cache);
+ free(node);
+ --_stats.entries;
+ }
+ }
+}
+
+void blkcache_configure(unsigned blocks, unsigned entries)
+{
+ struct block_cache_node *node;
+ if ((blocks != _stats.max_blocks_per_entry) ||
+ (entries != _stats.max_entries)) {
+ /* invalidate cache */
+ while (!list_empty(&block_cache)) {
+ node = (struct block_cache_node *)block_cache.next;
+ list_del(&node->lh);
+ free(node->cache);
+ free(node);
+ }
+ _stats.entries = 0;
+ }
+
+ _stats.max_blocks_per_entry = blocks;
+ _stats.max_entries = entries;
+
+ _stats.hits = 0;
+ _stats.misses = 0;
+}
+
+void blkcache_stats(struct block_cache_stats *stats)
+{
+ memcpy(stats, &_stats, sizeof(*stats));
+ _stats.hits = 0;
+ _stats.misses = 0;
+}