changeset 503:4e44881e27de

cp: implement mutex dependency tracking Inspired by the Linux kernel's lockdep code. Currently, ldep is completely oblivious when it comes to spinlocks. Additionally, it does not actually check for circular/recursive locking. Signed-off-by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
author Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
date Fri, 22 Apr 2011 15:42:29 -0400
parents d11407b235c3
children bb72b65e387c
files cp/fs/edf.c cp/include/ldep.h cp/include/mutex.h cp/include/sched.h cp/nucleus/Makefile cp/nucleus/ldep.c cp/nucleus/mutex.c cp/nucleus/sched.c cp/shell/init.c
diffstat 9 files changed, 223 insertions(+), 16 deletions(-) [+]
line wrap: on
line diff
--- a/cp/fs/edf.c	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/fs/edf.c	Fri Apr 22 15:42:29 2011 -0400
@@ -13,6 +13,9 @@
 #include <ebcdic.h>
 #include <edf.h>
 
+static LOCK_CLASS(edf_fs);
+static LOCK_CLASS(edf_file);
+
 struct fs *edf_mount(struct device *dev, int nosched)
 {
 	struct page *page;
@@ -37,7 +40,7 @@
 	if (ret)
 		goto out_free;
 
-	mutex_init(&fs->lock);
+	mutex_init(&fs->lock, &edf_fs);
 	INIT_LIST_HEAD(&fs->files);
 	fs->dev = dev;
 	fs->tmp_buf = tmp;
@@ -125,7 +128,7 @@
 		goto out_unlock;
 	}
 
-	mutex_init(&file->lock);
+	mutex_init(&file->lock, &edf_file);
 	list_add_tail(&file->files, &fs->files);
 
 	mutex_unlock(&fs->lock);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cp/include/ldep.h	Fri Apr 22 15:42:29 2011 -0400
@@ -0,0 +1,40 @@
+/*
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ *
+ * This file is released under the GPLv2.  See the COPYING file for more
+ * details.
+ */
+
+#ifndef __LDEP_H
+#define __LDEP_H
+
+#define LDEP_STACK_SIZE 10
+
+struct held_lock {
+	void *ra;		/* return address */
+	void *lock;		/* the lock */
+	char *lockname;		/* name of this lock */
+};
+
+struct lock_class {
+	char *name;
+	void *ra;
+	int ndeps;
+	struct lock_class **deps;
+};
+
+#define LOCK_CLASS(cname)	\
+		struct lock_class cname = { \
+			.name = #cname, \
+			.ra = NULL, \
+			.ndeps = 0, \
+			.deps = NULL, \
+		}
+
+extern void ldep_on();
+
+extern void ldep_lock(void *lock, struct lock_class *c, char *lockname);
+extern void ldep_unlock(void *lock, char *lockname);
+extern void ldep_no_locks();
+
+#endif
--- a/cp/include/mutex.h	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/include/mutex.h	Fri Apr 22 15:42:29 2011 -0400
@@ -1,5 +1,5 @@
 /*
- * (C) Copyright 2007-2010  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  *
  * This file is released under the GPLv2.  See the COPYING file for more
  * details.
@@ -13,29 +13,34 @@
 #include <spinlock.h>
 #include <sched.h>
 #include <interrupt.h>
+#include <ldep.h>
 
 typedef struct {
 	atomic_t state;
 	spinlock_t queue_lock;
 	struct list_head queue;
+	struct lock_class *lclass;
 } mutex_t;
 
-#define UNLOCKED_MUTEX(name)	mutex_t name = { \
+#define UNLOCKED_MUTEX(name, lc)	\
+		mutex_t name = { \
 			.state = ATOMIC_INIT(1), \
 			.queue = LIST_HEAD_INIT(name.queue), \
 			.queue_lock = SPIN_LOCK_UNLOCKED, \
+			.lclass = (lc), \
 		}
 
-static inline void mutex_init(mutex_t *lock)
+static inline void mutex_init(mutex_t *lock, struct lock_class *lc)
 {
 	atomic_set(&lock->state, 1);
 	INIT_LIST_HEAD(&lock->queue);
 	lock->queue_lock = SPIN_LOCK_UNLOCKED;
+	lock->lclass = lc;
 }
 
-extern void __mutex_lock(mutex_t *lock);
+extern void __mutex_lock_slow(mutex_t *lock);
 
-static inline void mutex_lock(mutex_t *lock)
+static inline void __mutex_lock(mutex_t *lock, char *lname)
 {
 	/*
 	 * if we are not interruptable, we shouldn't call any functions that
@@ -43,14 +48,20 @@
 	 */
 	BUG_ON(!interruptable());
 
+	ldep_lock(lock, lock->lclass, lname);
+
 	if (unlikely(atomic_add_unless(&lock->state, -1, 0) == 0))
-		__mutex_lock(lock); /* the slow-path */
+		__mutex_lock_slow(lock); /* the slow-path */
 }
 
-static inline void mutex_unlock(mutex_t *lock)
+#define mutex_lock(l)	__mutex_lock((l), #l)
+
+static inline void __mutex_unlock(mutex_t *lock, char *lname)
 {
 	struct task *task;
 
+	ldep_unlock(lock, lname);
+
 	spin_lock(&lock->queue_lock);
 
 	if (likely(list_empty(&lock->queue))) {
@@ -71,4 +82,6 @@
 	make_runnable(task);
 }
 
+#define mutex_unlock(l)	__mutex_unlock((l), #l)
+
 #endif
--- a/cp/include/sched.h	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/include/sched.h	Fri Apr 22 15:42:29 2011 -0400
@@ -1,5 +1,5 @@
 /*
- * (C) Copyright 2007-2010  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  *
  * This file is released under the GPLv2.  See the COPYING file for more
  * details.
@@ -13,6 +13,7 @@
 #include <dat.h>
 #include <clock.h>
 #include <interrupt.h>
+#include <ldep.h>
 
 #define CAN_SLEEP		1	/* safe to sleep */
 #define CAN_LOOP		2	/* safe to busy-wait */
@@ -115,6 +116,10 @@
 	int state;			/* state */
 
 	char name[TASK_NAME_LEN+1];	/* task name */
+
+	/* lock dependency tracking */
+	int nr_locks;
+	struct held_lock lock_stack[LDEP_STACK_SIZE];
 };
 
 struct virt_sys {
--- a/cp/nucleus/Makefile	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/nucleus/Makefile	Fri Apr 22 15:42:29 2011 -0400
@@ -1,2 +1,2 @@
 objs-nucleus := init.o io.o printf.o int.o ext.o svc.o pgm.o spinlock.o \
-	mutex.o sched.o config.o
+	mutex.o sched.o config.o ldep.o
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cp/nucleus/ldep.c	Fri Apr 22 15:42:29 2011 -0400
@@ -0,0 +1,143 @@
+/*
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ *
+ * This file is released under the GPLv2.  See the COPYING file for more
+ * details.
+ */
+
+#include <sched.h>
+#include <list.h>
+#include <spinlock.h>
+#include <ldep.h>
+
+static spinlock_t __lock = SPIN_LOCK_UNLOCKED;
+static int ldep_enabled;
+
+void ldep_on()
+{
+	unsigned long mask;
+
+	spin_lock_intsave(&__lock, &mask);
+	ldep_enabled = 1;
+	spin_unlock_intrestore(&__lock, mask);
+}
+
+static int __get_stack_slot()
+{
+	current->nr_locks++;
+
+	if (current->nr_locks == LDEP_STACK_SIZE) {
+		con_printf(NULL, "task '%s' exceeded the number of tracked "
+			   "locks (%d)! disabling ldep!\n", current->name,
+			   LDEP_STACK_SIZE);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void print_held_locks()
+{
+	struct held_lock *cur;
+	int i;
+
+	con_printf(NULL, "\nlocks currently held:\n");
+	for(i=0; i<current->nr_locks; i++) {
+		cur = &current->lock_stack[i];
+		con_printf(NULL, " #%d:   (%s), at %p\n", i, cur->lockname,
+		       cur->ra);
+	}
+}
+
+void ldep_lock(void *lock, struct lock_class *c, char *lockname)
+{
+	void *ra = __builtin_return_address(0);
+	struct held_lock *cur;
+	unsigned long mask;
+
+	// LOCK
+	spin_lock_intsave(&__lock, &mask);
+
+	if (!ldep_enabled)
+		goto out;
+
+	/* ok, no issues, add the lock we're trying to get to the stack */
+	if (__get_stack_slot())
+		goto out;
+
+	cur = &current->lock_stack[current->nr_locks-1];
+	cur->ra = ra;
+	cur->lock = lock;
+	cur->lockname = lockname;
+
+out:
+	// UNLOCK
+	spin_unlock_intrestore(&__lock, mask);
+}
+
+void ldep_unlock(void *lock, char *lockname)
+{
+	void *ra = __builtin_return_address(0);
+	struct held_lock *cur;
+	unsigned long mask;
+	int i;
+
+	// LOCK
+	spin_lock_intsave(&__lock, &mask);
+
+	if (!ldep_enabled)
+		goto out;
+
+	for(i=0; i<current->nr_locks; i++) {
+		cur = &current->lock_stack[i];
+		if (cur->lock == lock)
+			goto found;
+	}
+
+	con_printf(NULL, "task '%s' is trying to release lock it doesn't have:\n",
+		   current->name);
+	con_printf(NULL, " (%s), at %p\n", lockname, ra);
+	print_held_locks();
+
+	ldep_enabled = 0;
+
+	goto out;
+
+found:
+	if (i != current->nr_locks-1)
+		memcpy(&current->lock_stack[i],
+		       &current->lock_stack[i+1],
+		       current->nr_locks - i - 1);
+
+	current->nr_locks--;
+
+out:
+	// UNLOCK
+	spin_unlock_intrestore(&__lock, mask);
+}
+
+void ldep_no_locks()
+{
+	void *ra = __builtin_return_address(0);
+	unsigned long mask;
+
+	// LOCK
+	spin_lock_intsave(&__lock, &mask);
+
+	if (!ldep_enabled)
+		goto out;
+
+	if (!current->nr_locks)
+		goto out;
+
+	con_printf(NULL, "task '%s' is holding a lock when it shouldn't have:\n",
+		   current->name);
+	con_printf(NULL, " at %p\n", ra);
+	print_held_locks();
+
+	ldep_enabled = 0;
+
+out:
+	// UNLOCK
+	spin_unlock_intrestore(&__lock, mask);
+}
--- a/cp/nucleus/mutex.c	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/nucleus/mutex.c	Fri Apr 22 15:42:29 2011 -0400
@@ -1,5 +1,5 @@
 /*
- * (C) Copyright 2007-2010  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  *
  * This file is released under the GPLv2.  See the COPYING file for more
  * details.
@@ -8,7 +8,7 @@
 #include <mutex.h>
 
 /* slow-path mutex locking */
-void __mutex_lock(mutex_t *lock)
+void __mutex_lock_slow(mutex_t *lock)
 {
 	spin_lock(&lock->queue_lock);
 
--- a/cp/nucleus/sched.c	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/nucleus/sched.c	Fri Apr 22 15:42:29 2011 -0400
@@ -1,5 +1,5 @@
 /*
- * (C) Copyright 2007-2010  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  *
  * This file is released under the GPLv2.  See the COPYING file for more
  * details.
@@ -67,6 +67,8 @@
 	task->regs.gpr[15] = ((u64) stack) + PAGE_SIZE - STACK_FRAME_SIZE;
 
 	task->state = TASK_SLEEPING;
+
+	task->nr_locks = 0;
 }
 
 /**
--- a/cp/shell/init.c	Fri Apr 22 13:19:40 2011 -0400
+++ b/cp/shell/init.c	Fri Apr 22 15:42:29 2011 -0400
@@ -1,5 +1,5 @@
 /*
- * (C) Copyright 2007-2010  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+ * (C) Copyright 2007-2011  Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  *
  * This file is released under the GPLv2.  See the COPYING file for more
  * details.
@@ -25,8 +25,9 @@
  */
 struct console *oper_con;
 
+static LOCK_CLASS(online_users_lc);
 static LIST_HEAD(online_users);
-static UNLOCKED_MUTEX(online_users_lock);
+static UNLOCKED_MUTEX(online_users_lock, &online_users_lc);
 
 static int __alloc_guest_devices(struct virt_sys *sys)
 {