[patch 4/4] increase traffic on linux-kernel

Andrew Morton (akpm@digeo.com)
Wed, 25 Sep 2002 21:09:08 -0700


[This has four scalps already. Thomas Molina has agreed
to track things as they are identified ]

Infrastructure to detect sleep-inside-spinlock bugs. Really only
useful if compiled with CONFIG_PREEMPT=y. It prints out a whiny
message and a stack backtrace if someone calls a function which might
sleep from within an atomic region.

This patch generates a storm of output at boot, due to
drivers/ide/ide-probe.c:init_irq() calling lots of things which it
shouldn't under ide_lock.

It'll find other bugs too.

include/asm-i386/semaphore.h | 4 ++--
include/linux/kernel.h | 7 +++++++
include/linux/rwsem.h | 2 ++
kernel/ksyms.c | 4 +++-
kernel/sched.c | 17 +++++++++++++++++
mm/page_alloc.c | 3 +++
mm/slab.c | 3 +++
7 files changed, 37 insertions(+), 3 deletions(-)

--- 2.5.38/include/asm-i386/semaphore.h~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/include/asm-i386/semaphore.h Wed Sep 25 20:15:27 2002
@@ -116,7 +116,7 @@ static inline void down(struct semaphore
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
LOCK "decl %0\n\t" /* --sem->count */
@@ -142,7 +142,7 @@ static inline int down_interruptible(str
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
LOCK "decl %1\n\t" /* --sem->count */
--- 2.5.38/include/linux/kernel.h~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/include/linux/kernel.h Wed Sep 25 20:15:27 2002
@@ -40,6 +40,13 @@

struct completion;

+#ifdef CONFIG_DEBUG_KERNEL
+void __might_sleep(char *file, int line);
+#define might_sleep() __might_sleep(__FILE__, __LINE__)
+#else
+#define might_sleep() do {} while(0)
+#endif
+
extern struct notifier_block *panic_notifier_list;
NORET_TYPE void panic(const char * fmt, ...)
__attribute__ ((NORET_AND format (printf, 1, 2)));
--- 2.5.38/include/linux/rwsem.h~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/include/linux/rwsem.h Wed Sep 25 20:15:27 2002
@@ -40,6 +40,7 @@ extern void FASTCALL(rwsemtrace(struct r
*/
static inline void down_read(struct rw_semaphore *sem)
{
+ might_sleep();
rwsemtrace(sem,"Entering down_read");
__down_read(sem);
rwsemtrace(sem,"Leaving down_read");
@@ -62,6 +63,7 @@ static inline int down_read_trylock(stru
*/
static inline void down_write(struct rw_semaphore *sem)
{
+ might_sleep();
rwsemtrace(sem,"Entering down_write");
__down_write(sem);
rwsemtrace(sem,"Leaving down_write");
--- 2.5.38/kernel/ksyms.c~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/kernel/ksyms.c Wed Sep 25 20:15:27 2002
@@ -497,7 +497,9 @@ EXPORT_SYMBOL(jiffies_64);
EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(do_gettimeofday);
EXPORT_SYMBOL(do_settimeofday);
-
+#ifdef CONFIG_DEBUG_KERNEL
+EXPORT_SYMBOL(__might_sleep);
+#endif
#if !defined(__ia64__)
EXPORT_SYMBOL(loops_per_jiffy);
#endif
--- 2.5.38/kernel/sched.c~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/kernel/sched.c Wed Sep 25 20:15:28 2002
@@ -2150,3 +2150,20 @@ void __init sched_init(void)
enter_lazy_tlb(&init_mm, current, smp_processor_id());
}

+#ifdef CONFIG_DEBUG_KERNEL
+void __might_sleep(char *file, int line)
+{
+#if defined(in_atomic)
+ static unsigned long prev_jiffy; /* ratelimiting */
+
+ if (in_atomic()) {
+ if (time_before(jiffies, prev_jiffy + HZ))
+ return;
+ prev_jiffy = jiffies;
+ printk("Sleeping function called from illegal"
+ " context at %s:%d\n", file, line);
+ dump_stack();
+ }
+#endif
+}
+#endif
--- 2.5.38/mm/page_alloc.c~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/mm/page_alloc.c Wed Sep 25 20:15:28 2002
@@ -321,6 +321,9 @@ __alloc_pages(unsigned int gfp_mask, uns
struct page * page;
int freed, i;

+ if (gfp_mask & __GFP_WAIT)
+ might_sleep();
+
KERNEL_STAT_ADD(pgalloc, 1<<order);

zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
--- 2.5.38/mm/slab.c~might_sleep Wed Sep 25 20:15:27 2002
+++ 2.5.38-akpm/mm/slab.c Wed Sep 25 20:15:28 2002
@@ -1370,6 +1370,9 @@ static inline void * __kmem_cache_alloc
unsigned long save_flags;
void* objp;

+ if (flags & __GFP_WAIT)
+ might_sleep();
+
kmem_cache_alloc_head(cachep, flags);
try_again:
local_irq_save(save_flags);

.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/