diff -rup linux-2.4.24.orig/fs/inode.c linux-2.4.24.new/fs/inode.c
--- linux-2.4.24.orig/fs/inode.c	Fri Nov 28 21:26:21 2003
+++ linux-2.4.24.new/fs/inode.c	Tue Mar 16 13:24:35 2004
@@ -471,7 +471,7 @@ void sync_inodes(kdev_t dev)
 	}
 }
 
-static void try_to_sync_unused_inodes(void * arg)
+static void try_to_sync_unused_inodes(void)
 {
 	struct super_block * sb;
 	int nr_inodes = inodes_stat.nr_unused;
@@ -490,7 +490,8 @@ static void try_to_sync_unused_inodes(vo
 	spin_unlock(&inode_lock);
 }
 
-static struct tq_struct unused_inodes_flush_task;
+static DECLARE_WAIT_QUEUE_HEAD(kinoded_wait) ;
+static atomic_t kinoded_goal = ATOMIC_INIT(0) ;
 
 /**
  *	write_inode_now	-	write an inode to disk
@@ -753,7 +754,7 @@ int invalidate_device(kdev_t dev, int do
 	 !inode_has_buffers(inode))
 #define INODE(entry)	(list_entry(entry, struct inode, i_list))
 
-void prune_icache(int goal)
+static void _prune_icache(int goal)
 {
 	LIST_HEAD(list);
 	struct list_head *entry, *freeable = &list;
@@ -787,35 +788,24 @@ void prune_icache(int goal)
 	spin_unlock(&inode_lock);
 
 	dispose_list(freeable);
-
-	/* 
-	 * If we didn't freed enough clean inodes schedule
-	 * a sync of the dirty inodes, we cannot do it
-	 * from here or we're either synchronously dogslow
-	 * or we deadlock with oom.
-	 */
+	kmem_cache_shrink(inode_cachep);
 	if (goal)
-		schedule_task(&unused_inodes_flush_task);
+		try_to_sync_unused_inodes();
+}
+
+void prune_icache(int goal) {
+	atomic_add(goal, &kinoded_goal);
+	if (atomic_read(&kinoded_goal) > 16) {
+		wake_up_interruptible(&kinoded_wait);
+	} 
 }
 
 int shrink_icache_memory(int priority, int gfp_mask)
 {
 	int count = 0;
-
-	/*
-	 * Nasty deadlock avoidance..
-	 *
-	 * We may hold various FS locks, and we don't
-	 * want to recurse into the FS that called us
-	 * in clear_inode() and friends..
-	 */
-	if (!(gfp_mask & __GFP_FS))
-		return 0;
-
 	count = inodes_stat.nr_unused / priority;
-
 	prune_icache(count);
-	return kmem_cache_shrink(inode_cachep);
+	return 0;
 }
 
 /*
@@ -1198,6 +1188,35 @@ int bmap(struct inode * inode, int block
 	return res;
 }
 
+int kinoded(void *startup) {
+
+	struct task_struct *tsk = current;
+	int goal ;
+
+	daemonize();
+	strcpy(tsk->comm, "kinoded");
+
+	/* avoid getting signals */
+	spin_lock_irq(&tsk->sigmask_lock);
+	flush_signals(tsk);
+	sigfillset(&tsk->blocked);
+	recalc_sigpending(tsk);
+	spin_unlock_irq(&tsk->sigmask_lock);
+
+	printk("kinoded started\n") ;
+	complete((struct completion *)startup);
+	while(1) {
+	        wait_event_interruptible(kinoded_wait,
+	                                atomic_read(&kinoded_goal));
+	        while((goal = atomic_read(&kinoded_goal))) {
+			_prune_icache(goal);
+			atomic_sub(goal, &kinoded_goal);
+			if (current->need_resched)
+				schedule();
+		}
+	} 
+}
+
 /*
  * Initialize the hash tables.
  */
@@ -1249,8 +1268,17 @@ void __init inode_init(unsigned long mem
 					 NULL);
 	if (!inode_cachep)
 		panic("cannot create inode slab cache");
+}
 
-	unused_inodes_flush_task.routine = try_to_sync_unused_inodes;
+/* we need to start a thread, and inode_init happens too early for that
+** to work.  So, add a second init func through module_init
+*/
+static int __init inode_mod_init(void)
+{
+	static struct completion startup __initdata = COMPLETION_INITIALIZER(startup);
+	kernel_thread(kinoded, &startup, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
+	wait_for_completion(&startup);
+	return 0;
 }
 
 /**
@@ -1344,3 +1372,5 @@ void remove_dquot_ref(struct super_block
 }
 
 #endif
+
+module_init(inode_mod_init) ;
