# HG changeset patch # User Josef 'Jeff' Sipek # Date 1706806648 18000 # Node ID e74729772b7511853098da1c8bcf2f8b12cc0314 # Parent b3e0798761b7341be80420f958b9f68ffd28eeee objstore: add a per-page lock Aside from just adding the lock to the structure, this commit also adds the appropriate page_{lock,unlock} calls to the cache code and replaces an open-coded free with a call to free_page. Signed-off-by: Josef 'Jeff' Sipek diff -r b3e0798761b7 -r e74729772b75 src/objstore/cache.c --- a/src/objstore/cache.c Thu Feb 01 11:54:09 2024 -0500 +++ b/src/objstore/cache.c Thu Feb 01 11:57:28 2024 -0500 @@ -25,7 +25,21 @@ #include "objstore_impl.h" +/* + * In general, the lock ordering is: + * + * object ver read/write: + * obj lock -> page_cache_lock (allocate a new page) + * obj lock -> page_cache_lock (free new page on error) + * obj lock -> page lock (init newly allocated page) + * obj lock -> page lock (lock existing page) + * + * objver inval range: + * obj lock -> page lock + * obj lock -> page_cache_lock (free the page) + */ static LOCK_CLASS(page_cache_lc); +static LOCK_CLASS(page_lc); /* constant for the lifetime of the process, no locking necessary */ static size_t npages; @@ -53,8 +67,10 @@ list_create(&unallocated_pages, sizeof(struct page), offsetof(struct page, pages)); - for (i = 0; i < npages; i++) + for (i = 0; i < npages; i++) { + MXINIT(&pages[i].lock, &page_lc); list_insert_tail(&unallocated_pages, &pages[i]); + } return 0; } @@ -67,6 +83,8 @@ ASSERT(!pages[i].inuse); free(pages[i].ptr); + + MXDESTROY(&pages[i].lock); } free(pages); @@ -99,6 +117,7 @@ rb_destroy(&ver->pages); } +/* returns an unlocked page */ static struct page *get_free_page(void) { struct page *page; @@ -130,8 +149,12 @@ return page; } +/* frees a locked page */ static void free_page(struct page *page) { + page->inuse = false; + page_unlock(page); + MXLOCK(&page_cache_lock); if (page->ptr) @@ -164,6 +187,8 @@ if (IS_ERR(page)) return page; + page_lock(page); + page->objver = ver; page->pgno = pgno; VERIFY(page->ptr); @@ -244,8 +269,16 @@ for (;;) { struct page *next; - if (!page || (page->pgno > last_pgno)) + if (!page) + break; /* no pages beyond this point */ + + page_lock(page); + + if (page->pgno > last_pgno) { + /* page beyond inval range */ + page_unlock(page); break; + } ASSERT(page->inuse); ASSERT(!page->dirty); /* catch data loss */ @@ -255,9 +288,7 @@ rb_remove(&ver->pages, page); - page->inuse = false; - - list_insert_head(&free_pages, page); + free_page(page); page = next; } diff -r b3e0798761b7 -r e74729772b75 src/objstore/objstore_impl.h --- a/src/objstore/objstore_impl.h Thu Feb 01 11:54:09 2024 -0500 +++ b/src/objstore/objstore_impl.h Thu Feb 01 11:57:28 2024 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020,2022 Josef 'Jeff' Sipek + * Copyright (c) 2015-2020,2022,2024 Josef 'Jeff' Sipek * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -46,9 +46,11 @@ struct rb_node node; /* in-use: objver pages tree */ struct list pages; /* free: internal page list */ }; + uint8_t *ptr; /* the data buffer */ + + struct lock lock; /* protects everything below */ struct objver *objver; uint64_t pgno; - uint8_t *ptr; bool inuse:1; /* used by an object version */ bool filled:1; /* contains data */ bool dirty:1; /* must be written back */ @@ -180,9 +182,8 @@ extern struct page *page_lookup(struct objver *ver, uint64_t pgno, int flags); extern void page_inval_range(struct objver *ver, uint64_t pgno, size_t pgcnt); -/* for now, we rely on the obj lock */ -#define page_lock(page) do { } while (0) -#define page_unlock(page) do { } while (0) +#define page_lock(page) MXLOCK(&page->lock) +#define page_unlock(page) MXUNLOCK(&page->lock) REFCNT_INLINE_FXNS(struct obj, obj, refcnt, obj_free, NULL);