xv6-lab/kernel/bio.c
2023-02-04 14:17:55 +00:00

240 lines
6.1 KiB
C

// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents. Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.
#include "types.h"
#include "param.h"
#include "spinlock.h"
#include "sleeplock.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
#include "buf.h"
struct {
struct spinlock lock;
struct buf buf[NBUF];
// Linked list of all buffers, through prev/next.
// Sorted by how recently the buffer was used.
// head.next is most recent, head.prev is least.
struct buf head;
} bcache;
#define NLOCK_BUCKETS 13
// #define ORIGINAL
struct spinlock hash_locks[NLOCK_BUCKETS];
static char lock_names [NLOCK_BUCKETS][20];
struct hash_tbl {
struct buf* buf;
struct hash_tbl *next;
} hash_tbl;
struct hash_tbl* hash_entry[NLOCK_BUCKETS];
struct hash_tbl hash_tbls[NBUF];
void
binit(void)
{
struct buf *b;
initlock(&bcache.lock, "bcache");
for (int i = 0; i < NLOCK_BUCKETS; ++ i) {
snprintf(lock_names[i], 20, "bcache.lk%d", i);
initlock(&hash_locks[i], lock_names[i]);
hash_entry[i] = 0;
}
for (int i = 0; i < NBUF; ++ i) {
hash_tbls[i].buf = &bcache.buf[i];
hash_tbls[i].next = 0;
}
// Create linked list of buffers
bcache.head.prev = &bcache.head;
bcache.head.next = &bcache.head;
for(b = bcache.buf; b < bcache.buf+NBUF; b++){
b->next = bcache.head.next;
b->prev = &bcache.head;
initsleeplock(&b->lock, "buffer");
bcache.head.next->prev = b;
bcache.head.next = b;
}
}
// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint blockno)
{
struct buf *b = 0;
// acquire(&bcache.lock);
#ifndef ORIGINAL
int bucket = blockno % NLOCK_BUCKETS;
acquire(&hash_locks[bucket]);
struct hash_tbl* h = hash_entry[bucket];
struct hash_tbl* ph = 0;
while (h != 0) {
if (h->buf->dev == dev && h->buf->blockno == blockno) {
b = h->buf;
b->refcnt ++;
release(&hash_locks[bucket]);
// release(&bcache.lock);
acquiresleep(&b->lock);
return b;
}
h = h->next;
}
release(&hash_locks[bucket]);
// release the unfound bucket, or there'll be deadlock
acquire(&bcache.lock);
// after bcache.lock is aquired, only b->refcnt maybe out of our control
// so, have to aquire hash_locks[victim_bucket] before test refcnt
for(int i = 0; i < NBUF; ++ i){
b = &bcache.buf[i];
int victim_bucket = b->blockno % NLOCK_BUCKETS;
acquire(&hash_locks[victim_bucket]);
if(b->refcnt == 0) {
b->dev = dev;
b->blockno = blockno;
b->valid = 0;
b->refcnt = 1;
// try to release hash entry from the old bucket
h = hash_entry[victim_bucket];
ph = 0;
while (h != 0) {
if (h->buf->dev == b->dev && h->buf->blockno == b->blockno) {
if (ph) ph->next = h->next;
else hash_entry[victim_bucket] = h->next;
break;
}
ph = h;
h = h->next;
}
release(&hash_locks[victim_bucket]);
release(&bcache.lock);
// this should avoid deadlock and meanwhile guard the hashtable
// because after b is set, the only thing we need is hashtable rather than buf attributes
h = &hash_tbls[i];
acquire(&hash_locks[bucket]);
h->next = hash_entry[bucket];
hash_entry[bucket] = h;
release(&hash_locks[bucket]);
acquiresleep(&b->lock);
return b;
}
release(&hash_locks[victim_bucket]);
}
#else
// Is the block already cached?
// for(b = bcache.head.next; b != &bcache.head; b = b->next){
for (b = &bcache.buf[0]; b < &bcache.buf[NBUF]; ++ b) {
if(b->dev == dev && b->blockno == blockno){
b->refcnt++;
release(&bcache.lock);
acquiresleep(&b->lock);
return b;
}
}
// Not cached.
// Recycle the least recently used (LRU) unused buffer.
// for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
for(b = &bcache.buf[0]; b < &bcache.buf[NBUF]; ++ b){
if(b->refcnt == 0) {
b->dev = dev;
b->blockno = blockno;
b->valid = 0;
b->refcnt = 1;
release(&bcache.lock);
acquiresleep(&b->lock);
return b;
}
}
#endif
panic("bget: no buffers");
}
// Return a locked buf with the contents of the indicated block.
struct buf*
bread(uint dev, uint blockno)
{
struct buf *b;
b = bget(dev, blockno);
if(!b->valid) {
virtio_disk_rw(b, 0);
b->valid = 1;
}
return b;
}
// Write b's contents to disk. Must be locked.
void
bwrite(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("bwrite");
virtio_disk_rw(b, 1);
}
// Release a locked buffer.
// Move to the head of the most-recently-used list.
void
brelse(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("brelse");
releasesleep(&b->lock);
// if you use bcache lock, test0 fails without doubt
// the reason why this works is that, whenever b->refcnt is written or read
// its 'hash_locks[H(b->blockno)]' will always been locked
// the reason why b->blockno is valid is that, only when refcnt==0 could this field be written
// but here it must be non-zero before lock aquired
int bucket = b->blockno % NLOCK_BUCKETS;
acquire(&hash_locks[bucket]);
b->refcnt--;
// if (b->refcnt == 0) {
// // no one is waiting for it.
// b->next->prev = b->prev;
// b->prev->next = b->next;
// b->next = bcache.head.next;
// b->prev = &bcache.head;
// bcache.head.next->prev = b;
// bcache.head.next = b;
// }
release(&hash_locks[bucket]);
}
void
bpin(struct buf *b) {
acquire(&bcache.lock);
b->refcnt++;
release(&bcache.lock);
}
void
bunpin(struct buf *b) {
acquire(&bcache.lock);
b->refcnt--;
release(&bcache.lock);
}