
diff -X /dontdiff -rupN linux-2.6.13/fs/reiser4/as_ops.c linux-2.6.13-a/fs/reiser4/as_ops.c

 fs/reiser4/blocknrset.c                   |    2 
 fs/reiser4/context.c                      |    2 
 fs/reiser4/context.h                      |    1 
 fs/reiser4/coord.c                        |    2 
 fs/reiser4/debug.c                        |   11 
 fs/reiser4/debug.h                        |   14 
 fs/reiser4/emergency_flush.h              |    2 
 fs/reiser4/entd.c                         |    3 
 fs/reiser4/fsdata.h                       |    7 
 fs/reiser4/init_super.c                   |    4 
 fs/reiser4/ktxnmgrd.h                     |    1 
 fs/reiser4/plugin/file/funcs.h            |    5 
 fs/reiser4/plugin/file_ops.c              |   15 
 fs/reiser4/plugin/space/bitmap.c          |   12 
 fs/reiser4/readahead.c                    |    8 
 fs/reiser4/reiser4.h                      |    2 
 fs/reiser4/seal.c                         |    8 
 fs/reiser4/super.c                        |    8 
 fs/reiser4/super_ops.c                    |    2 
 fs/reiser4/vfs_ops.h                      |    2 
 fs/reiser4/znode.h                        |    7 

diff -puN fs/reiser4/as_ops.c~reiser4-spinlock-cleanup fs/reiser4/as_ops.c


 fs/reiser4/as_ops.c                       |   46 --
 fs/reiser4/block_alloc.c                  |  106 ++---
 fs/reiser4/blocknrset.c                   |    2 
 fs/reiser4/carry.c                        |   25 -
 fs/reiser4/carry_ops.c                    |   53 +-
 fs/reiser4/context.c                      |    2 
 fs/reiser4/context.h                      |    1 
 fs/reiser4/coord.c                        |    2 
 fs/reiser4/debug.c                        |   11 
 fs/reiser4/debug.h                        |   14 
 fs/reiser4/emergency_flush.c              |  111 +++---
 fs/reiser4/emergency_flush.h              |    2 
 fs/reiser4/entd.c                         |    3 
 fs/reiser4/eottl.c                        |   29 -
 fs/reiser4/flush.c                        |  103 +++--
 fs/reiser4/flush_queue.c                  |  115 +++---
 fs/reiser4/fsdata.c                       |   29 +
 fs/reiser4/fsdata.h                       |    7 
 fs/reiser4/init_super.c                   |    4 
 fs/reiser4/inode.c                        |   27 -
 fs/reiser4/inode.h                        |   73 ++-
 fs/reiser4/jnode.c                        |  158 ++++----
 fs/reiser4/jnode.h                        |   89 ++--
 fs/reiser4/ktxnmgrd.h                     |    1 
 fs/reiser4/lock.c                         |   77 ++--
 fs/reiser4/lock.h                         |  114 ++++--
 fs/reiser4/oid.c                          |   24 -
 fs/reiser4/page_cache.c                   |   53 --
 fs/reiser4/plugin/file/cryptcompress.c    |   52 +-
 fs/reiser4/plugin/file/file.c             |   78 ++--
 fs/reiser4/plugin/file/funcs.h            |    5 
 fs/reiser4/plugin/file_ops.c              |   15 
 fs/reiser4/plugin/item/ctail.c            |   26 -
 fs/reiser4/plugin/item/extent_file_ops.c  |   52 +-
 fs/reiser4/plugin/item/extent_flush_ops.c |   39 +-
 fs/reiser4/plugin/item/extent_item_ops.c  |   21 -
 fs/reiser4/plugin/item/internal.c         |   34 -
 fs/reiser4/plugin/node/node40.c           |   45 +-
 fs/reiser4/plugin/space/bitmap.c          |   12 
 fs/reiser4/readahead.c                    |    8 
 fs/reiser4/reiser4.h                      |    2 
 fs/reiser4/seal.c                         |    8 
 fs/reiser4/search.c                       |  110 ++---
 fs/reiser4/super.c                        |    8 
 fs/reiser4/super.h                        |   46 --
 fs/reiser4/super_ops.c                    |    2 
 fs/reiser4/tree.c                         |   79 ++--
 fs/reiser4/tree.h                         |  172 ++++++---
 fs/reiser4/tree_mod.c                     |   24 -
 fs/reiser4/tree_walk.c                    |   65 +--
 fs/reiser4/txnmgr.c                       |  555 +++++++++++++++---------------
 fs/reiser4/txnmgr.h                       |  168 +++++++--
 fs/reiser4/vfs_ops.h                      |    2 
 fs/reiser4/wander.c                       |   57 +--
 fs/reiser4/znode.c                        |  112 +-----
 fs/reiser4/znode.h                        |    7 
 fs/reiser4/spin_macros.h                  |  474 -------------------------
 57 files changed, 1632 insertions(+), 1927 deletions(-)

diff -puN fs/reiser4/as_ops.c~reiser4-spinlock-cleanup fs/reiser4/as_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/as_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.456971750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/as_ops.c	2005-10-20 14:01:52.740989500 +0400
@@ -226,7 +226,7 @@ int reiser4_invalidatepage(struct page *
 	assert("", offset == 0);
 
 	node = jprivate(page);
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	if (!JF_ISSET(node, JNODE_DIRTY) && !JF_ISSET(node, JNODE_FLUSH_QUEUED) &&
 	    !JF_ISSET(node, JNODE_WRITEBACK)) {
 		/* there is not need to capture */
@@ -238,7 +238,7 @@ int reiser4_invalidatepage(struct page *
 		jput(node);
 		return 0;
 	}
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 
 
 	ctx = init_context(inode->i_sb);
@@ -247,10 +247,8 @@ int reiser4_invalidatepage(struct page *
 
 	/* capture page being truncated. */
 	ret = try_capture_page_to_invalidate(page);
-	if (ret != 0) {
+	if (ret != 0)
 		warning("nikita-3141", "Cannot capture: %i", ret);
-		print_page("page", page);
-	}
 
 	if (offset == 0) {
 		/* remove jnode from transaction and detach it from page. */
@@ -263,8 +261,9 @@ int reiser4_invalidatepage(struct page *
 
 		/* this detaches page from jnode, so that jdelete will not try
 		 * to lock page which is already locked */
-		UNDER_SPIN_VOID(jnode,
-				node, page_clear_jnode(page, node));
+		spin_lock_jnode(node);
+		page_clear_jnode(page, node);
+		spin_unlock_jnode(node);
 		unhash_unformatted_jnode(node);
 
 		jput(node);
@@ -274,18 +273,12 @@ int reiser4_invalidatepage(struct page *
 	return ret;
 }
 
-#define INC_STAT(page, node, counter)						\
-	reiser4_stat_inc_at(page->mapping->host->i_sb, 				\
-			    level[jnode_get_level(node)].counter);
-
-#define INC_NSTAT(node, counter) INC_STAT(jnode_page(node), node, counter)
-
 /* help function called from reiser4_releasepage(). It returns true if jnode
  * can be detached from its page and page released. */
-static int releasable(const jnode * node /* node to check */ )
+int jnode_is_releasable(jnode * node /* node to check */ )
 {
 	assert("nikita-2781", node != NULL);
-	assert("nikita-2783", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	/* is some thread is currently using jnode page, later cannot be
 	 * detached */
@@ -317,7 +310,7 @@ static int releasable(const jnode * node
 	}
 	/* dirty jnode cannot be released. It can however be submitted to disk
 	 * as part of early flushing, but only after getting flush-prepped. */
-	if (jnode_is_dirty(node)) {
+	if (JF_ISSET(node, JNODE_DIRTY)) {
 		return 0;
 	}
 	/* overwrite set is only written by log writer. */
@@ -343,13 +336,6 @@ static int releasable(const jnode * node
 	return 1;
 }
 
-#if REISER4_DEBUG
-int jnode_is_releasable(jnode * node)
-{
-	return UNDER_SPIN(jload, node, releasable(node));
-}
-#endif
-
 /*
  * ->releasepage method for reiser4
  *
@@ -387,9 +373,9 @@ int reiser4_releasepage(struct page *pag
 
 	/* releasable() needs jnode lock, because it looks at the jnode fields
 	 * and we need jload_lock here to avoid races with jload(). */
-	LOCK_JNODE(node);
-	LOCK_JLOAD(node);
-	if (releasable(node)) {
+	spin_lock_jnode(node);
+	spin_lock(&(node->load));
+	if (jnode_is_releasable(node)) {
 		struct address_space *mapping;
 
 		mapping = page->mapping;
@@ -398,8 +384,8 @@ int reiser4_releasepage(struct page *pag
 		 * jnode_extent_write() here, because pages seen by
 		 * jnode_extent_write() are !releasable(). */
 		page_clear_jnode(page, node);
-		UNLOCK_JLOAD(node);
-		UNLOCK_JNODE(node);
+		spin_unlock(&(node->load));
+		spin_unlock_jnode(node);
 
 		/* we are under memory pressure so release jnode also. */
 		jput(node);
@@ -414,8 +400,8 @@ int reiser4_releasepage(struct page *pag
 
 		return 1;
 	} else {
-		UNLOCK_JLOAD(node);
-		UNLOCK_JNODE(node);
+		spin_unlock(&(node->load));
+		spin_unlock_jnode(node);
 		assert("nikita-3020", schedulable());
 		return 0;
 	}
diff -puN fs/reiser4/block_alloc.c~reiser4-spinlock-cleanup fs/reiser4/block_alloc.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/block_alloc.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.460972000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/block_alloc.c	2005-10-20 14:01:52.744989750 +0400
@@ -202,7 +202,7 @@ sub_from_cluster_reserved(reiser4_super_
 static void add_to_atom_flush_reserved_nolock(txn_atom * atom, __u32 count)
 {
 	assert("zam-772", atom != NULL);
-	assert("zam-773", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	atom->flush_reserved += count;
 }
 
@@ -210,7 +210,7 @@ static void add_to_atom_flush_reserved_n
 static void sub_from_atom_flush_reserved_nolock(txn_atom * atom, __u32 count)
 {
 	assert("zam-774", atom != NULL);
-	assert("zam-775", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("nikita-2790", atom->flush_reserved >= count);
 	atom->flush_reserved -= count;
 }
@@ -275,7 +275,7 @@ reiser4_grab(reiser4_context * ctx, __u6
 
 	sbinfo = get_super_private(ctx->super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	free_blocks = sbinfo->blocks_free;
 
@@ -300,7 +300,7 @@ reiser4_grab(reiser4_context * ctx, __u6
 	ctx->grab_enabled = 0;
 
       unlock_and_ret:
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 
 	return ret;
 }
@@ -409,7 +409,7 @@ static reiser4_super_info_data *grabbed2
 	sub_from_ctx_grabbed(ctx, 1);
 
 	sbinfo = get_super_private(ctx->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_grabbed(sbinfo, 1);
 	/* return sbinfo locked */
@@ -427,7 +427,7 @@ static void grabbed2fake_allocated_forma
 
 	assert("vs-922", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 static void grabbed2fake_allocated_unformatted(void)
@@ -439,7 +439,7 @@ static void grabbed2fake_allocated_unfor
 
 	assert("vs-9221", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 void grabbed2cluster_reserved(int count)
@@ -451,14 +451,14 @@ void grabbed2cluster_reserved(int count)
 	sub_from_ctx_grabbed(ctx, count);
 
 	sbinfo = get_super_private(ctx->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_grabbed(sbinfo, count);
 	sbinfo->blocks_clustered += count;
 
 	assert("edward-504", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 void cluster_reserved2grabbed(int count)
@@ -469,14 +469,14 @@ void cluster_reserved2grabbed(int count)
 	ctx = get_current_context();
 
 	sbinfo = get_super_private(ctx->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_cluster_reserved(sbinfo, count);
 	sbinfo->blocks_grabbed += count;
 
 	assert("edward-505", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	add_to_ctx_grabbed(ctx, count);
 }
 
@@ -489,14 +489,14 @@ void cluster_reserved2free(int count)
 
 	ctx = get_current_context();
 	sbinfo = get_super_private(ctx->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_cluster_reserved(sbinfo, count);
 	sbinfo->blocks_free += count;
 
 	assert("edward-502", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 static spinlock_t fake_lock = SPIN_LOCK_UNLOCKED;
@@ -543,14 +543,14 @@ grabbed2used(reiser4_context * ctx, reis
 {
 	sub_from_ctx_grabbed(ctx, count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_grabbed(sbinfo, count);
 	sbinfo->blocks_used += count;
 
 	assert("nikita-2679", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* adjust sb block counters when @count unallocated blocks get mapped to disk */
@@ -558,14 +558,14 @@ static void
 fake_allocated2used(reiser4_super_info_data * sbinfo, __u64 count,
 		    reiser4_ba_flags_t flags)
 {
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_fake_allocated(sbinfo, count, flags);
 	sbinfo->blocks_used += count;
 
 	assert("nikita-2680", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 static void flush_reserved2used(txn_atom * atom, __u64 count)
@@ -573,19 +573,19 @@ static void flush_reserved2used(txn_atom
 	reiser4_super_info_data *sbinfo;
 
 	assert("zam-787", atom != NULL);
-	assert("zam-788", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	sub_from_atom_flush_reserved_nolock(atom, (__u32) count);
 
 	sbinfo = get_current_super_private();
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_flush_reserved(sbinfo, count);
 	sbinfo->blocks_used += count;
 
 	assert("zam-789", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* update the per fs  blocknr hint default value. */
@@ -597,7 +597,7 @@ update_blocknr_hint_default(const struct
 
 	assert("nikita-3342", !blocknr_is_fake(block));
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	if (*block < sbinfo->block_count) {
 		sbinfo->blocknr_hint_default = *block;
 	} else {
@@ -607,7 +607,7 @@ update_blocknr_hint_default(const struct
 		dump_stack();
 		DEBUGON(1);
 	}
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* get current value of the default blocknr hint. */
@@ -615,10 +615,10 @@ void get_blocknr_hint_default(reiser4_bl
 {
 	reiser4_super_info_data *sbinfo = get_current_super_private();
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	*result = sbinfo->blocknr_hint_default;
 	assert("zam-677", *result < sbinfo->block_count);
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* Allocate "real" disk blocks by calling a proper space allocation plugin
@@ -679,7 +679,7 @@ reiser4_alloc_blocks(reiser4_blocknr_hin
 			/* we assume that current atom exists at this moment */
 			txn_atom *atom = get_current_atom_locked();
 			atom->nr_blocks_allocated += *len;
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
 
 		switch (hint->block_stage) {
@@ -694,7 +694,7 @@ reiser4_alloc_blocks(reiser4_blocknr_hin
 			{
 				txn_atom *atom = get_current_atom_locked();
 				flush_reserved2used(atom, *len);
-				UNLOCK_ATOM(atom);
+				spin_unlock_atom(atom);
 			}
 			break;
 		default:
@@ -719,7 +719,7 @@ static void
 used2fake_allocated(reiser4_super_info_data * sbinfo, __u64 count,
 		    int formatted)
 {
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	if (formatted)
 		sbinfo->blocks_fake_allocated += count;
@@ -730,7 +730,7 @@ used2fake_allocated(reiser4_super_info_d
 
 	assert("nikita-2681", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 static void
@@ -738,11 +738,11 @@ used2flush_reserved(reiser4_super_info_d
 		    __u64 count, reiser4_ba_flags_t flags UNUSED_ARG)
 {
 	assert("nikita-2791", atom != NULL);
-	assert("nikita-2792", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	add_to_atom_flush_reserved_nolock(atom, (__u32) count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sbinfo->blocks_flush_reserved += count;
 	/*add_to_sb_flush_reserved(sbinfo, count); */
@@ -750,7 +750,7 @@ used2flush_reserved(reiser4_super_info_d
 
 	assert("nikita-2681", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* disk space, virtually used by fake block numbers is counted as "grabbed" again. */
@@ -760,7 +760,7 @@ fake_allocated2grabbed(reiser4_context *
 {
 	add_to_ctx_grabbed(ctx, count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	assert("nikita-2682", check_block_counters(ctx->super));
 
@@ -769,7 +769,7 @@ fake_allocated2grabbed(reiser4_context *
 
 	assert("nikita-2683", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags)
@@ -804,13 +804,13 @@ grabbed2free(reiser4_context * ctx, reis
 {
 	sub_from_ctx_grabbed(ctx, count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sub_from_sb_grabbed(sbinfo, count);
 	sbinfo->blocks_free += count;
 	assert("nikita-2684", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count)
@@ -827,14 +827,14 @@ void grabbed2flush_reserved_nolock(txn_a
 
 	add_to_atom_flush_reserved_nolock(atom, count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sbinfo->blocks_flush_reserved += count;
 	sub_from_sb_grabbed(sbinfo, count);
 
 	assert("vpf-292", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 void grabbed2flush_reserved(__u64 count)
@@ -843,7 +843,7 @@ void grabbed2flush_reserved(__u64 count)
 
 	grabbed2flush_reserved_nolock(atom, count);
 
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 void flush_reserved2grabbed(txn_atom * atom, __u64 count)
@@ -852,7 +852,7 @@ void flush_reserved2grabbed(txn_atom * a
 	reiser4_super_info_data *sbinfo;
 
 	assert("nikita-2788", atom != NULL);
-	assert("nikita-2789", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	ctx = get_current_context();
 	sbinfo = get_super_private(ctx->super);
@@ -861,14 +861,14 @@ void flush_reserved2grabbed(txn_atom * a
 
 	sub_from_atom_flush_reserved_nolock(atom, (__u32) count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sbinfo->blocks_grabbed += count;
 	sub_from_sb_flush_reserved(sbinfo, count);
 
 	assert("vpf-292", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* release all blocks grabbed in context which where not used. */
@@ -887,27 +887,27 @@ used2grabbed(reiser4_context * ctx, reis
 {
 	add_to_ctx_grabbed(ctx, count);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sbinfo->blocks_grabbed += count;
 	sub_from_sb_used(sbinfo, count);
 
 	assert("nikita-2685", check_block_counters(ctx->super));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* this used to be done through used2grabbed and grabbed2free*/
 static void used2free(reiser4_super_info_data * sbinfo, __u64 count)
 {
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 
 	sbinfo->blocks_free += count;
 	sub_from_sb_used(sbinfo, count);
 
 	assert("nikita-2685", check_block_counters(reiser4_get_current_sb()));
 
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 #if REISER4_DEBUG
@@ -960,9 +960,9 @@ reiser4_dealloc_blocks(const reiser4_blo
 		assert("zam-432", *start != 0);
 		assert("zam-558", !blocknr_is_fake(start));
 
-		reiser4_spin_lock_sb(sbinfo);
+		spin_lock_reiser4_super(sbinfo);
 		assert("zam-562", *start < sbinfo->block_count);
-		reiser4_spin_unlock_sb(sbinfo);
+		spin_unlock_reiser4_super(sbinfo);
 	}
 
 	if (flags & BA_DEFER) {
@@ -987,7 +987,7 @@ reiser4_dealloc_blocks(const reiser4_blo
 		assert("zam-477", ret == 0);
 		assert("zam-433", atom != NULL);
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 
 	} else {
 		assert("zam-425", get_current_super_private() != NULL);
@@ -999,7 +999,7 @@ reiser4_dealloc_blocks(const reiser4_blo
 			 * back if allocation is discarded. */
 			txn_atom *atom = get_current_atom_locked();
 			atom->nr_blocks_allocated -= *len;
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
 
 		switch (target_stage) {
@@ -1023,7 +1023,7 @@ reiser4_dealloc_blocks(const reiser4_blo
 				atom = get_current_atom_locked();
 				used2flush_reserved(sbinfo, atom, *len,
 						    flags & BA_FORMATTED);
-				UNLOCK_ATOM(atom);
+				spin_unlock_atom(atom);
 				break;
 			}
 		default:
@@ -1062,12 +1062,12 @@ apply_dset(txn_atom * atom UNUSED_ARG, c
 		len = *b;
 
 	if (REISER4_DEBUG) {
-		reiser4_spin_lock_sb(sbinfo);
+		spin_lock_reiser4_super(sbinfo);
 
 		assert("zam-554", *a < reiser4_block_count(ctx->super));
 		assert("zam-555", *a + len <= reiser4_block_count(ctx->super));
 
-		reiser4_spin_unlock_sb(sbinfo);
+		spin_unlock_reiser4_super(sbinfo);
 	}
 
 	sa_dealloc_blocks(&sbinfo->space_allocator, *a, len);
@@ -1082,7 +1082,7 @@ void post_commit_hook(void)
 
 	atom = get_current_atom_locked();
 	assert("zam-452", atom->stage == ASTAGE_POST_COMMIT);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	/* do the block deallocation which was deferred
 	   until commit is done */
diff -puN fs/reiser4/blocknrset.c~reiser4-spinlock-cleanup fs/reiser4/blocknrset.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/blocknrset.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.464972250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/blocknrset.c	2005-10-20 14:01:52.744989750 +0400
@@ -156,7 +156,7 @@ static int blocknr_set_add(txn_atom *ato
 	    bse_avail(list_entry(bset->entries.next, blocknr_set_entry, link)) < entries_needed) {
 		/* See if a bse was previously allocated. */
 		if (*new_bsep == NULL) {
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 			*new_bsep = bse_alloc();
 			return (*new_bsep != NULL) ? -E_REPEAT :
 				RETERR(-ENOMEM);
diff -puN fs/reiser4/carry.c~reiser4-spinlock-cleanup fs/reiser4/carry.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/carry.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.468972500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/carry.c	2005-10-20 14:01:52.748990000 +0400
@@ -762,8 +762,8 @@ static void sync_dkeys(znode * spot /* n
 	assert("nikita-1612", LOCK_CNT_NIL(rw_locked_dk));
 
 	tree = znode_get_tree(spot);
-	RLOCK_TREE(tree);
-	WLOCK_DK(tree);
+	read_lock_tree(tree);
+	write_lock_dk(tree);
 
 	assert("nikita-2192", znode_is_loaded(spot));
 
@@ -798,8 +798,8 @@ static void sync_dkeys(znode * spot /* n
 			break;
 	}
 
-	WUNLOCK_DK(tree);
-	RUNLOCK_TREE(tree);
+	write_unlock_dk(tree);
+	read_unlock_tree(tree);
 }
 
 /* unlock all carry nodes in @level */
@@ -914,6 +914,7 @@ int lock_carry_node(carry_level * level 
 	znode *reference_point;
 	lock_handle lh;
 	lock_handle tmp_lh;
+	reiser4_tree *tree;
 
 	assert("nikita-887", level != NULL);
 	assert("nikita-882", node != NULL);
@@ -944,9 +945,10 @@ int lock_carry_node(carry_level * level 
 		   and thus, their sibling linkage cannot change.
 
 		 */
-		reference_point = UNDER_RW
-		    (tree, znode_get_tree(reference_point), read,
-		     find_begetting_brother(node, level)->node);
+		tree = znode_get_tree(reference_point);
+		read_lock_tree(tree);
+		reference_point = find_begetting_brother(node, level)->node;
+		read_unlock_tree(tree);
 		assert("nikita-1186", reference_point != NULL);
 	}
 	if (node->parent && (result == 0)) {
@@ -1222,11 +1224,11 @@ carry_node *add_new_znode(znode * brothe
 	add_pointer->u.insert.child = fresh;
 	add_pointer->u.insert.brother = brother;
 	/* initially new node spawns empty key range */
-	WLOCK_DK(znode_get_tree(brother));
+	write_lock_dk(znode_get_tree(brother));
 	znode_set_ld_key(new_znode,
 			 znode_set_rd_key(new_znode,
 					  znode_get_rd_key(brother)));
-	WUNLOCK_DK(znode_get_tree(brother));
+	write_unlock_dk(znode_get_tree(brother));
 	return fresh;
 }
 
@@ -1287,8 +1289,7 @@ static int carry_level_invariant(carry_l
 				continue;
 			if (!keyle(leftmost_key_in_node(left, &lkey),
 				   leftmost_key_in_node(right, &rkey))) {
-				print_znode("left", left);
-				print_znode("right", right);
+				warning("", "wrong key order");
 				return 0;
 			}
 		}
@@ -1343,8 +1344,6 @@ static void print_carry(const char *pref
 	    ("%s: %p parent: %s, left: %s, unlock: %s, free: %s, dealloc: %s\n",
 	     prefix, node, tf(node->parent), tf(node->left), tf(node->unlock),
 	     tf(node->free), tf(node->deallocate));
-	print_znode("\tnode", node->node);
-	print_znode("\treal_node", carry_real(node));
 }
 
 /* dump information about carry operation */
diff -puN fs/reiser4/carry_ops.c~reiser4-spinlock-cleanup fs/reiser4/carry_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/carry_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.472972750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/carry_ops.c	2005-10-20 14:01:52.752990250 +0400
@@ -49,13 +49,13 @@ static carry_node *find_left_neighbor(ca
 	node = op->node;
 
 	tree = current_tree;
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 	/* first, check whether left neighbor is already in a @doing queue */
 	if (carry_real(node)->left != NULL) {
 		/* NOTE: there is locking subtlety here. Look into
 		 * find_right_neighbor() for more info */
 		if (find_carry_node(doing, carry_real(node)->left) != NULL) {
-			RUNLOCK_TREE(tree);
+			read_unlock_tree(tree);
 			left = node;
 			do {
 				left = list_entry(left->header.level_linkage.prev,
@@ -66,7 +66,7 @@ static carry_node *find_left_neighbor(ca
 			return left;
 		}
 	}
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	left = add_carry_skip(doing, POOLO_BEFORE, node);
 	if (IS_ERR(left))
@@ -131,7 +131,7 @@ static carry_node *find_right_neighbor(c
 	node = op->node;
 
 	tree = current_tree;
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 	/* first, check whether right neighbor is already in a @doing queue */
 	if (carry_real(node)->right != NULL) {
 		/*
@@ -155,7 +155,7 @@ static carry_node *find_right_neighbor(c
 		 * locked neighbors.
 		 */
 		if (find_carry_node(doing, carry_real(node)->right) != NULL) {
-			RUNLOCK_TREE(tree);
+			read_unlock_tree(tree);
 			/*
 			 * What we are doing here (this is also applicable to
 			 * the find_left_neighbor()).
@@ -194,7 +194,7 @@ static carry_node *find_right_neighbor(c
 			return right;
 		}
 	}
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	flags = GN_CAN_USE_UPPER_LEVELS;
 	if (!op->u.insert.flags & COPI_LOAD_RIGHT)
@@ -463,7 +463,6 @@ static int make_space(carry_op * op /* c
 				warning("nikita-924",
 					"Error accessing left neighbor: %li",
 					PTR_ERR(left));
-				print_znode("node", node);
 			}
 		} else if (left != NULL) {
 
@@ -494,7 +493,6 @@ static int make_space(carry_op * op /* c
 			warning("nikita-1065",
 				"Error accessing right neighbor: %li",
 				PTR_ERR(right));
-			print_znode("node", node);
 		} else if (right != NULL) {
 			/* node containing insertion point, and its right
 			   neighbor node are write locked by now.
@@ -552,8 +550,6 @@ static int make_space(carry_op * op /* c
 		if (result != 0) {
 			warning("nikita-947",
 				"Cannot lock new node: %i", result);
-			print_znode("new", carry_real(fresh));
-			print_znode("node", node);
 			return result;
 		}
 
@@ -699,7 +695,6 @@ static int insert_paste_common(carry_op 
 		if ((intra_node != NS_FOUND) && (intra_node != NS_NOT_FOUND)) {
 			warning("nikita-1715", "Intra node lookup failure: %i",
 				intra_node);
-			print_znode("node", node);
 			return intra_node;
 		}
 	} else if (op->u.insert.type == COPT_CHILD) {
@@ -720,8 +715,6 @@ static int insert_paste_common(carry_op 
 			warning("nikita-993",
 				"Cannot find a place for child pointer: %i",
 				result);
-			print_znode("child", child);
-			print_znode("parent", carry_real(op->node));
 			return result;
 		}
 		/* This only happens when we did multiple insertions at
@@ -784,10 +777,10 @@ static int insert_paste_common(carry_op 
 		 * internal item and its key is (by the very definition of
 		 * search tree) is leftmost key in the child node.
 		 */
-		op->u.insert.d->key = UNDER_RW(dk, znode_get_tree(child), read,
-					       leftmost_key_in_node(child,
-								    znode_get_ld_key
-								    (child)));
+		write_lock_dk(znode_get_tree(child));
+		op->u.insert.d->key = leftmost_key_in_node(child,
+							   znode_get_ld_key(child));
+		write_unlock_dk(znode_get_tree(child));
 		op->u.insert.d->data->arg = op->u.insert.brother;
 	} else {
 		assert("vs-243", op->u.insert.d->coord != NULL);
@@ -1237,7 +1230,7 @@ static int carry_delete(carry_op * op /*
 	child = op->u.delete.child ?
 	    carry_real(op->u.delete.child) : op->node->node;
 	tree = znode_get_tree(child);
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 
 	/*
 	 * @parent was determined when carry entered parent level
@@ -1251,7 +1244,7 @@ static int carry_delete(carry_op * op /*
 		parent = znode_parent(child);
 		assert("nikita-2581", find_carry_node(doing, parent));
 	}
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	assert("nikita-1213", znode_get_level(parent) > LEAF_LEVEL);
 
@@ -1264,11 +1257,11 @@ static int carry_delete(carry_op * op /*
 	    znode_get_level(parent) <= REISER4_MIN_TREE_HEIGHT &&
 	    node_num_items(parent) == 1) {
 		/* Delimiting key manipulations. */
-		WLOCK_DK(tree);
+		write_lock_dk(tree);
 		znode_set_ld_key(child, znode_set_ld_key(parent, min_key()));
 		znode_set_rd_key(child, znode_set_rd_key(parent, max_key()));
 		ZF_SET(child, JNODE_DKSET);
-		WUNLOCK_DK(tree);
+		write_unlock_dk(tree);
 
 		/* @child escaped imminent death! */
 		ZF_CLR(child, JNODE_HEARD_BANSHEE);
@@ -1279,8 +1272,6 @@ static int carry_delete(carry_op * op /*
 	result = find_child_ptr(parent, child, &coord);
 	if (result != NS_FOUND) {
 		warning("nikita-994", "Cannot find child pointer: %i", result);
-		print_znode("child", child);
-		print_znode("parent", parent);
 		print_coord_content("coord", &coord);
 		return result;
 	}
@@ -1719,9 +1710,11 @@ static int update_delimiting_key(znode *
 
 	if (!ZF_ISSET(right, JNODE_HEARD_BANSHEE))
 		leftmost_key_in_node(right, &ldkey);
-	else
-		UNDER_RW_VOID(dk, znode_get_tree(parent), read,
-			      ldkey = *znode_get_rd_key(right));
+	else {
+		read_lock_dk(znode_get_tree(parent));
+		ldkey = *znode_get_rd_key(right);
+		read_unlock_dk(znode_get_tree(parent));
+	}
 	node_plugin_by_node(parent)->update_item_key(&right_pos, &ldkey, &info);
 	doing->restartable = 0;
 	znode_make_dirty(parent);
@@ -1772,9 +1765,9 @@ static int carry_update(carry_op * op /*
 		left = NULL;
 
 	tree = znode_get_tree(rchild->node);
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 	right = znode_parent(rchild->node);
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	if (right != NULL) {
 		result = update_delimiting_key(right,
@@ -1791,10 +1784,6 @@ static int carry_update(carry_op * op /*
 	if (result != 0) {
 		warning("nikita-999", "Error updating delimiting key: %s (%i)",
 			error_msg ? : "", result);
-		print_znode("left", left);
-		print_znode("right", right);
-		print_znode("lchild", lchild ? lchild->node : NULL);
-		print_znode("rchild", rchild->node);
 	}
 	return result;
 }
diff -puN fs/reiser4/context.c~reiser4-spinlock-cleanup fs/reiser4/context.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/context.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.476973000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/context.c	2005-10-20 14:01:52.752990250 +0400
@@ -244,7 +244,7 @@ void reiser4_exit_context(reiser4_contex
 			if (atom) {
 				atom->flags |= ATOM_FORCE_COMMIT;
 				context->trans->flags &= ~TXNH_DONT_COMMIT;
-				UNLOCK_ATOM(atom);
+				spin_unlock_atom(atom);
 			}
 		}
 		txn_end(context);
diff -puN fs/reiser4/context.h~reiser4-spinlock-cleanup fs/reiser4/context.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/context.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.480973250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/context.h	2005-10-20 14:01:52.752990250 +0400
@@ -8,7 +8,6 @@
 
 #include "forward.h"
 #include "debug.h"
-#include "spin_macros.h"
 #include "dformat.h"
 #include "tap.h"
 #include "lock.h"
diff -puN fs/reiser4/coord.c~reiser4-spinlock-cleanup fs/reiser4/coord.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/coord.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.480973250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/coord.c	2005-10-20 14:01:52.756990500 +0400
@@ -899,8 +899,6 @@ void print_coord(const char *mes, const 
 	printk("%s: item_pos = %d, unit_pos %d, tween=%s, iplug=%d\n",
 	       mes, coord->item_pos, coord->unit_pos,
 	       coord_tween_tostring(coord->between), coord->iplugid);
-	if (node)
-		print_znode("\tnode", coord->node);
 }
 
 int
diff -puN fs/reiser4/debug.c~reiser4-spinlock-cleanup fs/reiser4/debug.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/debug.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.484973500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/debug.c	2005-10-20 14:01:52.756990500 +0400
@@ -159,10 +159,10 @@ void print_lock_counters(const char *pre
 	printk("%s: jnode: %i, tree: %i (r:%i,w:%i), dk: %i (r:%i,w:%i)\n"
 	       "jload: %i, "
 	       "txnh: %i, atom: %i, stack: %i, txnmgr: %i, "
-	       "ktxnmgrd: %i, fq: %i, reiser4_sb: %i\n"
+	       "ktxnmgrd: %i, fq: %i\n"
 	       "inode: %i, "
 	       "cbk_cache: %i (r:%i,w%i), "
-	       "epoch: %i, eflush: %i, "
+	       "eflush: %i, "
 	       "zlock: %i (r:%i, w:%i)\n"
 	       "spin: %i, long: %i inode_sem: (r:%i,w:%i)\n"
 	       "d: %i, x: %i, t: %i\n", prefix,
@@ -174,12 +174,11 @@ void print_lock_counters(const char *pre
 	       info->spin_locked_txnh,
 	       info->spin_locked_atom, info->spin_locked_stack,
 	       info->spin_locked_txnmgr, info->spin_locked_ktxnmgrd,
-	       info->spin_locked_fq, info->spin_locked_super,
-	       info->spin_locked_inode_object,
+	       info->spin_locked_fq,
+	       info->spin_locked_inode,
 	       info->rw_locked_cbk_cache,
 	       info->read_locked_cbk_cache,
 	       info->write_locked_cbk_cache,
-	       info->spin_locked_epoch,
 	       info->spin_locked_super_eflush,
 	       info->rw_locked_zlock,
 	       info->read_locked_zlock,
@@ -213,7 +212,7 @@ int no_counters_are_held(void)
 	    (counters->spin_locked_atom == 0) &&
 	    (counters->spin_locked_stack == 0) &&
 	    (counters->spin_locked_txnmgr == 0) &&
-	    (counters->spin_locked_inode_object == 0) &&
+	    (counters->spin_locked_inode == 0) &&
 	    (counters->spin_locked == 0) &&
 	    (counters->long_term_locked_znode == 0) &&
 	    (counters->inode_sem_r == 0) &&
diff -puN fs/reiser4/debug.h~reiser4-spinlock-cleanup fs/reiser4/debug.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/debug.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.488973750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/debug.h	2005-10-20 14:01:52.756990500 +0400
@@ -113,9 +113,7 @@ typedef struct lock_counters_info {
 	int spin_locked_txnmgr;
 	int spin_locked_ktxnmgrd;
 	int spin_locked_fq;
-	int spin_locked_super;
-	int spin_locked_inode_object;
-	int spin_locked_epoch;
+	int spin_locked_inode;
 	int spin_locked_super_eflush;
 	int spin_locked;
 	int long_term_locked_znode;
@@ -143,6 +141,7 @@ extern lock_counters_info *lock_counters
 /* check that lock-counter is greater than zero. This is for use in
  * assertions */
 #define LOCK_CNT_GTZ(counter) IN_CONTEXT(lock_counters()->counter > 0, 1)
+#define LOCK_CNT_LT(counter,n) IN_CONTEXT(lock_counters()->counter < n, 1)
 
 #else				/* REISER4_DEBUG */
 
@@ -156,9 +155,18 @@ typedef struct lock_counters_info {
 #define LOCK_CNT_DEC(counter) noop
 #define LOCK_CNT_NIL(counter) (1)
 #define LOCK_CNT_GTZ(counter) (1)
+#define LOCK_CNT_LT(counter,n) (1)
 
 #endif				/* REISER4_DEBUG */
 
+#define assert_spin_not_locked(lock) BUG_ON(0)
+#define assert_rw_write_locked(lock) BUG_ON(0)
+#define assert_rw_read_locked(lock) BUG_ON(0)
+#define assert_rw_locked(lock) BUG_ON(0)
+#define assert_rw_not_write_locked(lock) BUG_ON(0)
+#define assert_rw_not_read_locked(lock) BUG_ON(0)
+#define assert_rw_not_locked(lock) BUG_ON(0)
+
 /* flags controlling debugging behavior. Are set through debug_flags=N mount
    option. */
 typedef enum {
diff -puN fs/reiser4/emergency_flush.c~reiser4-spinlock-cleanup fs/reiser4/emergency_flush.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/emergency_flush.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.492974000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/emergency_flush.c	2005-10-20 14:01:52.760990750 +0400
@@ -246,7 +246,7 @@
 
 #if REISER4_USE_EFLUSH
 
-static int flushable(const jnode * node, struct page *page, int);
+static int flushable(jnode * node, struct page *page, int);
 static int needs_allocation(const jnode * node);
 static eflush_node_t *ef_alloc(unsigned int flags);
 static reiser4_ba_flags_t ef_block_flags(const jnode * node);
@@ -294,7 +294,7 @@ int emergency_flush(struct page *page)
 	jref(node);
 
 	result = 0;
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	/*
 	 * page was dirty and under eflush. This is (only?) possible if page
 	 * was re-dirtied through mmap(2) after eflush IO was submitted, but
@@ -302,7 +302,7 @@ int emergency_flush(struct page *page)
 	 */
 	eflush_del(node, 1);
 
-	LOCK_JLOAD(node);
+	spin_lock(&(node->load));
 	if (flushable(node, page, 1)) {
 		if (needs_allocation(node)) {
 			reiser4_block_nr blk;
@@ -328,8 +328,8 @@ int emergency_flush(struct page *page)
 						 GFP_NOFS | __GFP_HIGH);
 			} else {
 				JF_CLR(node, JNODE_EFLUSH);
-				UNLOCK_JLOAD(node);
-				UNLOCK_JNODE(node);
+				spin_unlock(&(node->load));
+				spin_unlock_jnode(node);
 				if (blk != 0ull) {
 					ef_free_block(node, &blk,
 						      hint.block_stage, efnode);
@@ -352,11 +352,12 @@ int emergency_flush(struct page *page)
 
 			atom = node->atom;
 
-			if (!flushable(node, page, 1) || needs_allocation(node)
-			    || !jnode_is_dirty(node)) {
-				UNLOCK_JLOAD(node);
-				UNLOCK_JNODE(node);
-				UNLOCK_ATOM(atom);
+			if (!flushable(node, page, 1) ||
+			    needs_allocation(node) ||
+			    !JF_ISSET(node, JNODE_DIRTY)) {
+				spin_unlock(&(node->load));
+				spin_unlock_jnode(node);
+				spin_unlock_atom(atom);
 				fq_put(fq);
 				return 1;
 			}
@@ -366,9 +367,9 @@ int emergency_flush(struct page *page)
 
 			queue_jnode(fq, node);
 
-			UNLOCK_JLOAD(node);
-			UNLOCK_JNODE(node);
-			UNLOCK_ATOM(atom);
+			spin_unlock(&(node->load));
+			spin_unlock_jnode(node);
+			spin_unlock_atom(atom);
 
 			result = write_fq(fq, NULL, 0);
 			if (result != 0)
@@ -380,8 +381,8 @@ int emergency_flush(struct page *page)
 		}
 
 	} else {
-		UNLOCK_JLOAD(node);
-		UNLOCK_JNODE(node);
+		spin_unlock(&(node->load));
+		spin_unlock_jnode(node);
 		result = 1;
 	}
 
@@ -389,11 +390,11 @@ int emergency_flush(struct page *page)
 	return result;
 }
 
-static int flushable(const jnode * node, struct page *page, int check_eflush)
+static int flushable(jnode * node, struct page *page, int check_eflush)
 {
 	assert("nikita-2725", node != NULL);
-	assert("nikita-2726", spin_jnode_is_locked(node));
-	assert("nikita-3388", spin_jload_is_locked(node));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&(node->load));
 
 	if (jnode_is_loaded(node)) {	/* loaded */
 		return 0;
@@ -519,9 +520,9 @@ static void inc_unfm_ef(void)
 	reiser4_super_info_data *sbinfo;
 
 	sbinfo = get_super_private(get_current_context()->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	sbinfo->eflushed_unformatted++;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 static void dec_unfm_ef(void)
@@ -529,10 +530,10 @@ static void dec_unfm_ef(void)
 	reiser4_super_info_data *sbinfo;
 
 	sbinfo = get_super_private(get_current_context()->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	BUG_ON(sbinfo->eflushed_unformatted == 0);
 	sbinfo->eflushed_unformatted--;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 #define EFLUSH_MAGIC 4335203
@@ -545,8 +546,8 @@ eflush_add(jnode * node, reiser4_block_n
 	assert("nikita-2737", node != NULL);
 	assert("nikita-2738", JF_ISSET(node, JNODE_EFLUSH));
 	assert("nikita-3382", !JF_ISSET(node, JNODE_EPROTECTED));
-	assert("nikita-2765", spin_jnode_is_locked(node));
-	assert("nikita-3381", spin_jload_is_locked(node));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&(node->load));
 
 	tree = jnode_get_tree(node);
 
@@ -555,10 +556,10 @@ eflush_add(jnode * node, reiser4_block_n
 	ef->hadatom = (node->atom != NULL);
 	ef->incatom = 0;
 	jref(node);
-	spin_lock_eflush(tree->super);
+	spin_lock(&(get_super_private(tree->super)->eflush_guard));
 	ef_hash_insert(get_jnode_enhash(node), ef);
 	ON_DEBUG(++get_super_private(tree->super)->eflushed);
-	spin_unlock_eflush(tree->super);
+	spin_unlock(&(get_super_private(tree->super)->eflush_guard));
 
 	if (jnode_is_unformatted(node)) {
 		struct inode *inode;
@@ -578,7 +579,7 @@ eflush_add(jnode * node, reiser4_block_n
 	}
 
 	/* FIXME: do we need it here, if eflush add/del are protected by page lock? */
-	UNLOCK_JLOAD(node);
+	spin_unlock(&(node->load));
 
 	/*
 	 * jnode_get_atom() can possible release jnode spin lock. This
@@ -594,30 +595,30 @@ eflush_add(jnode * node, reiser4_block_n
 		if (atom != NULL) {
 			++atom->flushed;
 			ef->incatom = 1;
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
 	}
 
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 	return 0;
 }
 
 /* Arrghh... cast to keep hash table code happy. */
 #define C(node) ((jnode *const *)&(node))
 
-reiser4_block_nr *eflush_get(const jnode * node)
+reiser4_block_nr *eflush_get(jnode * node)
 {
 	eflush_node_t *ef;
 	reiser4_tree *tree;
 
 	assert("nikita-2740", node != NULL);
 	assert("nikita-2741", JF_ISSET(node, JNODE_EFLUSH));
-	assert("nikita-2767", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	tree = jnode_get_tree(node);
-	spin_lock_eflush(tree->super);
+	spin_lock(&(get_super_private(tree->super)->eflush_guard));
 	ef = ef_hash_find(get_jnode_enhash(node), C(node));
-	spin_unlock_eflush(tree->super);
+	spin_unlock(&(get_super_private(tree->super)->eflush_guard));
 
 	assert("nikita-2742", ef != NULL);
 	return &ef->blocknr;
@@ -633,25 +634,25 @@ void eflush_free(jnode * node)
 	struct inode *inode = NULL;
 	reiser4_block_nr blk;
 
-	assert("zam-1026", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	table = get_jnode_enhash(node);
 	tree = jnode_get_tree(node);
 
-	spin_lock_eflush(tree->super);
+	spin_lock(&(get_super_private(tree->super)->eflush_guard));
 	ef = ef_hash_find(table, C(node));
 	BUG_ON(ef == NULL);
 	assert("nikita-2745", ef != NULL);
 	blk = ef->blocknr;
 	ef_hash_remove(table, ef);
 	ON_DEBUG(--get_super_private(tree->super)->eflushed);
-	spin_unlock_eflush(tree->super);
+	spin_unlock(&(get_super_private(tree->super)->eflush_guard));
 
 	if (ef->incatom) {
 		atom = jnode_get_atom(node);
 		assert("nikita-3311", atom != NULL);
 		--atom->flushed;
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 
 	assert("vs-1215", JF_ISSET(node, JNODE_EFLUSH));
@@ -675,7 +676,7 @@ void eflush_free(jnode * node)
 		       jnode_tree_by_reiser4_inode(info)->rnode != NULL);
 		dec_unfm_ef();
 	}
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 
 #if REISER4_DEBUG
 	if (blocknr_is_fake(jnode_get_block(node)))
@@ -692,7 +693,7 @@ void eflush_free(jnode * node)
 
 	kmem_cache_free(eflush_slab, ef);
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 }
 
 void eflush_del(jnode * node, int page_locked)
@@ -700,7 +701,7 @@ void eflush_del(jnode * node, int page_l
 	struct page *page;
 
 	assert("nikita-2743", node != NULL);
-	assert("nikita-2770", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	if (!JF_ISSET(node, JNODE_EFLUSH))
 		return;
@@ -710,9 +711,9 @@ void eflush_del(jnode * node, int page_l
 		assert("nikita-2806", page != NULL);
 		assert("nikita-2807", PageLocked(page));
 	} else {
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		page = jnode_get_page_locked(node, GFP_NOFS);
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		if (page == NULL) {
 			warning("zam-1025",
 				"eflush_del failed to get page back\n");
@@ -724,11 +725,11 @@ void eflush_del(jnode * node, int page_l
 	}
 
 	if (PageWriteback(page)) {
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		page_cache_get(page);
 		reiser4_wait_page_writeback(page);
 		page_cache_release(page);
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		if (unlikely(!JF_ISSET(node, JNODE_EFLUSH)))
 			/* race: some other thread unflushed jnode. */
 			goto out;
@@ -796,13 +797,13 @@ static int ef_free_block(jnode * node,
 		if (ef->reserve) {
 			/* further, transfer block from grabbed into flush
 			 * reserved space. */
-			LOCK_JNODE(node);
+			spin_lock_jnode(node);
 			atom = jnode_get_atom(node);
 			assert("nikita-2785", atom != NULL);
 			grabbed2flush_reserved_nolock(atom, 1);
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 			JF_SET(node, JNODE_FLUSH_RESERVED);
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 		} else {
 			reiser4_context *ctx = get_current_context();
 			grabbed2free(ctx, get_super_private(ctx->super),
@@ -822,8 +823,8 @@ ef_prepare(jnode * node, reiser4_block_n
 	assert("nikita-2760", node != NULL);
 	assert("nikita-2761", blk != NULL);
 	assert("nikita-2762", efnode != NULL);
-	assert("nikita-2763", spin_jnode_is_locked(node));
-	assert("nikita-3387", spin_jload_is_locked(node));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&(node->load));
 
 	hint->blk = EFLUSH_START_BLOCK;
 	hint->max_dist = 0;
@@ -846,10 +847,10 @@ ef_prepare(jnode * node, reiser4_block_n
 					usedreserve = 1;
 					flush_reserved2grabbed(atom, 1);
 					JF_CLR(node, JNODE_FLUSH_RESERVED);
-					UNLOCK_ATOM(atom);
+					spin_unlock_atom(atom);
 					break;
 				} else
-					UNLOCK_ATOM(atom);
+					spin_unlock_atom(atom);
 			}
 			/*
 			 * fall through.
@@ -873,8 +874,8 @@ ef_prepare(jnode * node, reiser4_block_n
 	 * XXX protect @node from being concurrently eflushed. Otherwise, there
 	 * is a danger of underflowing block space
 	 */
-	UNLOCK_JLOAD(node);
-	UNLOCK_JNODE(node);
+	spin_unlock(&(node->load));
+	spin_unlock_jnode(node);
 
 	*efnode = ef_alloc(GFP_NOFS | __GFP_HIGH);
 	if (*efnode == NULL) {
@@ -890,8 +891,8 @@ ef_prepare(jnode * node, reiser4_block_n
 	if (result)
 		kmem_cache_free(eflush_slab, *efnode);
       out:
-	LOCK_JNODE(node);
-	LOCK_JLOAD(node);
+	spin_lock_jnode(node);
+	spin_lock(&(node->load));
 	return result;
 }
 
diff -puN fs/reiser4/emergency_flush.h~reiser4-spinlock-cleanup fs/reiser4/emergency_flush.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/emergency_flush.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.496974250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/emergency_flush.h	2005-10-20 14:01:52.760990750 +0400
@@ -34,7 +34,7 @@ extern void done_eflush(void);
 extern int eflush_init_at(struct super_block *super);
 extern void eflush_done_at(struct super_block *super);
 
-extern reiser4_block_nr *eflush_get(const jnode * node);
+extern reiser4_block_nr *eflush_get(jnode * node);
 extern void eflush_del(jnode * node, int page_locked);
 extern void eflush_free(jnode *);
 
diff -puN fs/reiser4/entd.c~reiser4-spinlock-cleanup fs/reiser4/entd.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/entd.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.500974500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/entd.c	2005-10-20 14:01:52.760990750 +0400
@@ -340,7 +340,8 @@ int write_page_by_ent(struct page *page,
 		/* entd is not running. */
 		return 0;
 
-	phantom = jprivate(page) == NULL || !jnode_check_dirty(jprivate(page));
+	phantom = jprivate(page) == NULL || !JF_ISSET(jprivate(page), JNODE_DIRTY);
+
 #if 1
 	BUG_ON(page->mapping == NULL);
 	/* re-dirty page */
diff -puN fs/reiser4/eottl.c~reiser4-spinlock-cleanup fs/reiser4/eottl.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/eottl.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.504974750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/eottl.c	2005-10-20 14:01:52.760990750 +0400
@@ -142,8 +142,9 @@ is_next_item_internal(coord_t *coord, co
 	 * concurrent thread could get their first and insert item with a key
 	 * smaller than @key
 	 */
-	result = UNDER_RW(dk, current_tree, read,
-			  keycmp(key, znode_get_rd_key(coord->node)));
+	read_lock_dk(current_tree);
+	result = keycmp(key, znode_get_rd_key(coord->node));
+	read_unlock_dk(current_tree);
 	assert("vs-6", result != EQUAL_TO);
 	if (result == GREATER_THAN)
 		return 2;
@@ -170,8 +171,9 @@ is_next_item_internal(coord_t *coord, co
 	 * check whether concurrent thread managed to insert item with a key
 	 * smaller than @key
 	 */
-	result = UNDER_RW(dk, current_tree, read,
-			  keycmp(key, znode_get_ld_key(rn.node)));
+	read_lock_dk(current_tree);
+	result = keycmp(key, znode_get_ld_key(rn.node));
+	read_unlock_dk(current_tree);
 	assert("vs-6", result != EQUAL_TO);
 	if (result == GREATER_THAN) {
 		done_lh(&rn);
@@ -224,19 +226,18 @@ static reiser4_key *rd_key(const coord_t
 	assert("nikita-2281", coord_is_between_items(coord));
 	coord_dup(&dup, coord);
 
-	RLOCK_DK(current_tree);
-
 	if (coord_set_to_right(&dup) == 0)
 		/* next item is in this node. Return its key. */
 		unit_key_by_coord(&dup, key);
-	else
+	else {
 		/*
 		 * next item either does not exist or is in right
 		 * neighbor. Return znode's right delimiting key.
 		 */
+		read_lock_dk(current_tree);
 		*key = *znode_get_rd_key(coord->node);
-
-	RUNLOCK_DK(current_tree);
+		read_unlock_dk(current_tree);
+	}
 	return key;
 }
 
@@ -250,7 +251,6 @@ static reiser4_key *rd_key(const coord_t
  * Inserts empty leaf node between two extent items. It is necessary when we
  * have to insert an item on leaf level between two extents (items on the twig
  * level).
- *
  */
 static int
 add_empty_leaf(coord_t *insert_coord, lock_handle *lh,
@@ -272,12 +272,12 @@ add_empty_leaf(coord_t *insert_coord, lo
 		return PTR_ERR(node);
 
 	/* setup delimiting keys for node being inserted */
-	WLOCK_DK(tree);
+	write_lock_dk(tree);
 	znode_set_ld_key(node, key);
 	znode_set_rd_key(node, rdkey);
 	ON_DEBUG(node->creator = current);
 	ON_DEBUG(node->first_key = *key);
-	WUNLOCK_DK(tree);
+	write_unlock_dk(tree);
 
 	ZF_SET(node, JNODE_ORPHAN);
 
@@ -339,13 +339,13 @@ add_empty_leaf(coord_t *insert_coord, lo
 					 * neighbor was not known. Do it
 					 * here
 					 */
-					WLOCK_TREE(tree);
+					write_lock_tree(tree);
 					assert("nikita-3312",
 					       znode_is_right_connected(node));
 					assert("nikita-2984",
 					       node->right == NULL);
 					ZF_CLR(node, JNODE_RIGHT_CONNECTED);
-					WUNLOCK_TREE(tree);
+					write_unlock_tree(tree);
 					result =
 					    connect_znode(insert_coord, node);
 					if (result == 0)
@@ -359,7 +359,6 @@ add_empty_leaf(coord_t *insert_coord, lo
 				} else {
 					warning("nikita-3136",
 						"Cannot lock child");
-					print_znode("child", node);
 				}
 				done_lh(&local_lh);
 				zrelse(node);
diff -puN fs/reiser4/flush.c~reiser4-spinlock-cleanup fs/reiser4/flush.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/flush.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.508975000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/flush.c	2005-10-20 14:01:52.764991000 +0400
@@ -919,13 +919,13 @@ static jnode * find_flush_start_jnode(
 	jnode * node;
 
 	if (start != NULL) {
-		LOCK_JNODE(start);
-		if (jnode_is_dirty(start) && !JF_ISSET(start, JNODE_OVRWR)) {
+		spin_lock_jnode(start);
+		if (JF_ISSET(start, JNODE_DIRTY) && !JF_ISSET(start, JNODE_OVRWR)) {
 			assert("zam-1056", start->atom == atom);
 			node = start;
 			goto enter;
 		}
-		UNLOCK_JNODE(start);
+		spin_unlock_jnode(start);
 	}
 	/*
 	 * In this loop we process all already prepped (RELOC or OVRWR) and dirtied again
@@ -933,9 +933,9 @@ static jnode * find_flush_start_jnode(
 	 * not prepped node found in the atom dirty lists.
 	 */
 	while ((node = find_first_dirty_jnode(atom, flags))) {
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 	enter:
-		assert("zam-881", jnode_is_dirty(node));
+		assert("zam-881", JF_ISSET(node, JNODE_DIRTY));
 		assert("zam-898", !JF_ISSET(node, JNODE_OVRWR));
 
 		if (JF_ISSET(node, JNODE_WRITEBACK)) {
@@ -966,7 +966,7 @@ static jnode * find_flush_start_jnode(
 		} else
 			break;
 
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 	}
 	return node;
 }
@@ -986,7 +986,7 @@ flush_current_atom(int flags, long nr_to
 	int ret;
 
 	assert("zam-889", atom != NULL && *atom != NULL);
-	assert("zam-890", spin_atom_is_locked(*atom));
+	assert_spin_locked(&((*atom)->alock));
 	assert("zam-892", get_current_context()->trans->atom == *atom);
 
 	nr_to_write = LONG_MAX;
@@ -999,7 +999,7 @@ flush_current_atom(int flags, long nr_to
 	if (ret)
 		return ret;
 
-	assert("zam-891", spin_atom_is_locked(*atom));
+	assert_spin_locked(&((*atom)->alock));
 
 	/* parallel flushers limit */
 	if (sinfo->tmgr.atom_max_flushers != 0) {
@@ -1029,12 +1029,12 @@ flush_current_atom(int flags, long nr_to
 			writeout_mode_disable();
 			return 0;
 		}
-		UNLOCK_ATOM(*atom);
+		spin_unlock_atom(*atom);
 	} else {
 		jref(node);
 		BUG_ON((*atom)->super != node->tree->super);
-		UNLOCK_ATOM(*atom);
-		UNLOCK_JNODE(node);
+		spin_unlock_atom(*atom);
+		spin_unlock_jnode(node);
 		BUG_ON(nr_to_write == 0);
 		ret = jnode_flush(node, nr_to_write, nr_submitted, fq, flags);
 		jput(node);
@@ -1048,7 +1048,7 @@ flush_current_atom(int flags, long nr_to
 	(*atom)->nr_flushers--;
 	fq_put_nolock(fq);
 	atom_send_event(*atom);
-	UNLOCK_ATOM(*atom);
+	spin_unlock_atom(*atom);
 
 	writeout_mode_disable();
 
@@ -1151,7 +1151,7 @@ reverse_relocate_check_dirty_parent(jnod
 {
 	int ret;
 
-	if (!znode_check_dirty(parent_coord->node)) {
+	if (!JF_ISSET(ZJNODE(parent_coord->node), JNODE_DIRTY)) {
 
 		ret = reverse_relocate_test(node, parent_coord, pos);
 		if (ret < 0) {
@@ -2141,7 +2141,7 @@ static int handle_pos_end_of_twig(flush_
 		goto out;
 
 	/* right twig could be not dirty */
-	if (znode_check_dirty(right_lock.node)) {
+	if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY)) {
 		/* If right twig node is dirty we always attempt to squeeze it
 		 * content to the left... */
 	      became_dirty:
@@ -2196,7 +2196,7 @@ static int handle_pos_end_of_twig(flush_
 								&at_right, pos);
 			if (ret)
 				goto out;
-			if (znode_check_dirty(right_lock.node))
+			if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY))
 				goto became_dirty;
 		}
 	}
@@ -2384,7 +2384,7 @@ static void update_ldkey(znode * node)
 {
 	reiser4_key ldkey;
 
-	assert("vs-1630", rw_dk_is_write_locked(znode_get_tree(node)));
+	assert_rw_write_locked(&(znode_get_tree(node)->dk_lock));
 	if (node_is_empty(node))
 		return;
 
@@ -2396,9 +2396,9 @@ static void update_ldkey(znode * node)
    and @right correspondingly and sets right delimiting key of @left to first key of @right */
 static void update_znode_dkeys(znode * left, znode * right)
 {
-	assert("nikita-1470", rw_dk_is_write_locked(znode_get_tree(right)));
-	assert("vs-1629", znode_is_write_locked(left)
-	       && znode_is_write_locked(right));
+	assert_rw_write_locked(&(znode_get_tree(right)->dk_lock));
+	assert("vs-1629", (znode_is_write_locked(left) &&
+			   znode_is_write_locked(right)));
 
 	/* we need to update left delimiting of left if it was empty before shift */
 	update_ldkey(left);
@@ -2442,7 +2442,8 @@ static int squeeze_right_non_twig(znode 
 
 	assert("nikita-2246", znode_get_level(left) == znode_get_level(right));
 
-	if (!znode_is_dirty(left) || !znode_is_dirty(right))
+	if (!JF_ISSET(ZJNODE(left), JNODE_DIRTY) || 
+	    !JF_ISSET(ZJNODE(right), JNODE_DIRTY))
 		return SQUEEZE_TARGET_FULL;
 
 	pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo));
@@ -2465,8 +2466,10 @@ static int squeeze_right_non_twig(znode 
 		   node's operation. But it can not be done there. Nobody
 		   remembers why, though */
 		tree = znode_get_tree(left);
-		UNDER_RW_VOID(dk, tree, write, update_znode_dkeys(left, right));
-
+		write_lock_dk(tree);
+		update_znode_dkeys(left, right);
+		write_unlock_dk(tree);
+		
 		/* Carry is called to update delimiting key and, maybe, to remove empty
 		   node. */
 		grabbed = get_current_context()->grabbed_blocks;
@@ -2486,6 +2489,18 @@ static int squeeze_right_non_twig(znode 
 	return ret;
 }
 
+#if REISER4_DEBUG
+static int sibling_link_is_ok(const znode *left, const znode *right)
+{
+	int result;
+ 
+	read_lock_tree(znode_get_tree(left));
+	result = (left->right == right && left == right->left);	
+	read_unlock_tree(znode_get_tree(left));
+	return result;
+}
+#endif
+
 /* Shift first unit of first item if it is an internal one.  Return
    SQUEEZE_TARGET_FULL if it fails to shift an item, otherwise return
    SUBTREE_MOVED. */
@@ -2501,15 +2516,12 @@ static int shift_one_internal_unit(znode
 	assert("nikita-2247", znode_get_level(left) == znode_get_level(right));
 	assert("nikita-2435", znode_is_write_locked(left));
 	assert("nikita-2436", znode_is_write_locked(right));
-	assert("nikita-2434",
-	       UNDER_RW(tree, znode_get_tree(left), read,
-			left->right == right));
-
-	pool =
-	    init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) + sizeof(*coord) +
-			    sizeof(*info)
+	assert("nikita-2434", sibling_link_is_ok(left, right));
+
+	pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) +
+			       sizeof(*coord) + sizeof(*info)
 #if REISER4_DEBUG
-			    + sizeof(*coord) + 2 * sizeof(reiser4_key)
+			       + sizeof(*coord) + 2 * sizeof(reiser4_key)
 #endif
 	    );
 	if (IS_ERR(pool))
@@ -2565,7 +2577,9 @@ static int shift_one_internal_unit(znode
 		znode_make_dirty(left);
 		znode_make_dirty(right);
 		tree = znode_get_tree(left);
-		UNDER_RW_VOID(dk, tree, write, update_znode_dkeys(left, right));
+		write_lock_dk(tree);
+		update_znode_dkeys(left, right);
+		write_unlock_dk(tree);
 
 		/* reserve space for delimiting keys after shifting */
 		grabbed = get_current_context()->grabbed_blocks;
@@ -2808,7 +2822,9 @@ allocate_znode_update(znode * node, cons
 
 		uber = uber_lock.node;
 
-		UNDER_RW_VOID(tree, tree, write, tree->root_block = blk);
+		write_lock_tree(tree);
+		tree->root_block = blk;
+		write_unlock_tree(tree);
 
 		znode_make_dirty(uber);
 	}
@@ -2877,13 +2893,13 @@ jnode_lock_parent_coord(jnode * node,
 		 * because coord_by_key() will just fail to find appropriate
 		 * extent.
 		 */
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) {
 			jnode_build_key(node, &key);
 			ret = 0;
 		} else
 			ret = RETERR(-ENOENT);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		if (ret != 0)
 			return ret;
@@ -2901,10 +2917,8 @@ jnode_lock_parent_coord(jnode * node,
 			assert("edward-1038",
 			       ergo(jnode_is_cluster_page(node),
 				    JF_ISSET(node, JNODE_HEARD_BANSHEE)));
-			if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) {
+			if (!JF_ISSET(node, JNODE_HEARD_BANSHEE))
 				warning("nikita-3177", "Parent not found");
-				print_jnode("node", node);
-			}
 			return ret;
 		case CBK_COORD_FOUND:
 			if (coord->between != AT_UNIT) {
@@ -2914,7 +2928,6 @@ jnode_lock_parent_coord(jnode * node,
 					warning("nikita-3178",
 						"Found but not happy: %i",
 						coord->between);
-					print_jnode("node", node);
 				}
 				return RETERR(-ENOENT);
 			}
@@ -3004,7 +3017,7 @@ static int neighbor_in_slum(znode * node
 	if (!check_dirty)
 		return 0;
 	/* Check dirty bit of locked znode, no races here */
-	if (znode_check_dirty(lock->node))
+	if (JF_ISSET(ZJNODE(lock->node), JNODE_DIRTY))
 		return 0;
 
 	done_lh(lock);
@@ -3015,13 +3028,17 @@ static int neighbor_in_slum(znode * node
    write-locked (for squeezing) so no tree lock is needed. */
 static int znode_same_parents(znode * a, znode * b)
 {
+	int result;
+
 	assert("jmacd-7011", znode_is_write_locked(a));
 	assert("jmacd-7012", znode_is_write_locked(b));
 
 	/* We lock the whole tree for this check.... I really don't like whole tree
 	 * locks... -Hans */
-	return UNDER_RW(tree, znode_get_tree(a), read,
-			(znode_parent(a) == znode_parent(b)));
+	read_lock_tree(znode_get_tree(a));
+	result = (znode_parent(a) == znode_parent(b));
+	read_unlock_tree(znode_get_tree(a));
+	return result;
 }
 
 /* FLUSH SCAN */
@@ -3333,7 +3350,7 @@ static int scan_formatted(flush_scan * s
 		}
 
 		/* Lock the tree, check-for and reference the next sibling. */
-		RLOCK_TREE(znode_get_tree(node));
+		read_lock_tree(znode_get_tree(node));
 
 		/* It may be that a node is inserted or removed between a node and its
 		   left sibling while the tree lock is released, but the flush-scan count
@@ -3344,7 +3361,7 @@ static int scan_formatted(flush_scan * s
 			zref(neighbor);
 		}
 
-		RUNLOCK_TREE(znode_get_tree(node));
+		read_unlock_tree(znode_get_tree(node));
 
 		/* If neighbor is NULL at the leaf level, need to check for an unformatted
 		   sibling using the parent--break in any case. */
diff -puN fs/reiser4/flush_queue.c~reiser4-spinlock-cleanup fs/reiser4/flush_queue.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/flush_queue.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.512975250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/flush_queue.c	2005-10-20 14:01:52.768991250 +0400
@@ -22,12 +22,6 @@
    kept on the flush queue until memory pressure or atom commit asks
    flush queues to write some or all from their jnodes. */
 
-#if REISER4_DEBUG
-#   define spin_ordering_pred_fq(fq)  (1)
-#endif
-
-SPIN_LOCK_FUNCTIONS(fq, flush_queue_t, guard);
-
 /*
    LOCKING:
 
@@ -56,13 +50,13 @@ SPIN_LOCK_FUNCTIONS(fq, flush_queue_t, g
 #define mark_fq_ready(fq)      do { (fq)->state &= ~FQ_IN_USE;   } while (0)
 
 /* get lock on atom from locked flush queue object */
-static txn_atom *atom_get_locked_by_fq(flush_queue_t * fq)
+static txn_atom *atom_locked_by_fq_nolock(flush_queue_t * fq)
 {
 	/* This code is similar to jnode_get_atom(), look at it for the
 	 * explanation. */
 	txn_atom *atom;
 
-	assert("zam-729", spin_fq_is_locked(fq));
+	assert_spin_locked(&(fq->guard));
 
 	while (1) {
 		atom = fq->atom;
@@ -73,18 +67,18 @@ static txn_atom *atom_get_locked_by_fq(f
 			break;
 
 		atomic_inc(&atom->refcount);
-		spin_unlock_fq(fq);
-		LOCK_ATOM(atom);
-		spin_lock_fq(fq);
+		spin_unlock(&(fq->guard));
+		spin_lock_atom(atom);
+		spin_lock(&(fq->guard));
 
 		if (fq->atom == atom) {
 			atomic_dec(&atom->refcount);
 			break;
 		}
 
-		spin_unlock_fq(fq);
+		spin_unlock(&(fq->guard));
 		atom_dec_and_unlock(atom);
-		spin_lock_fq(fq);
+		spin_lock(&(fq->guard));
 	}
 
 	return atom;
@@ -92,7 +86,12 @@ static txn_atom *atom_get_locked_by_fq(f
 
 txn_atom *atom_locked_by_fq(flush_queue_t * fq)
 {
-	return UNDER_SPIN(fq, fq, atom_get_locked_by_fq(fq));
+	txn_atom *atom;
+
+	spin_lock(&(fq->guard));
+	atom = atom_locked_by_fq_nolock(fq);
+	spin_unlock(&(fq->guard));
+	return atom;
 }
 
 static void init_fq(flush_queue_t * fq)
@@ -104,7 +103,7 @@ static void init_fq(flush_queue_t * fq)
 	INIT_LIST_HEAD(ATOM_FQ_LIST(fq));
 
 	sema_init(&fq->io_sem, 0);
-	spin_fq_init(fq);
+	spin_lock_init(&fq->guard);
 }
 
 /* slab for flush queues */
@@ -164,7 +163,7 @@ static void count_dequeued_node(flush_qu
 /* attach flush queue object to the atom */
 static void attach_fq(txn_atom *atom, flush_queue_t *fq)
 {
-	assert("zam-718", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	list_add(&fq->alink, &atom->flush_queues);
 	fq->atom = atom;
 	ON_DEBUG(atom->nr_flush_queues++);
@@ -172,14 +171,14 @@ static void attach_fq(txn_atom *atom, fl
 
 static void detach_fq(flush_queue_t * fq)
 {
-	assert("zam-731", spin_atom_is_locked(fq->atom));
+	assert_spin_locked(&(fq->atom->alock));
 
-	spin_lock_fq(fq);
+	spin_lock(&(fq->guard));
 	list_del_init(&fq->alink);
 	assert("vs-1456", fq->atom->nr_flush_queues > 0);
 	ON_DEBUG(fq->atom->nr_flush_queues--);
 	fq->atom = NULL;
-	spin_unlock_fq(fq);
+	spin_unlock(&(fq->guard));
 }
 
 /* destroy flush queue object */
@@ -202,14 +201,14 @@ void mark_jnode_queued(flush_queue_t * f
    spin-locked. */
 void queue_jnode(flush_queue_t * fq, jnode * node)
 {
-	assert("zam-711", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	assert("zam-713", node->atom != NULL);
-	assert("zam-712", spin_atom_is_locked(node->atom));
-	assert("zam-714", jnode_is_dirty(node));
+	assert_spin_locked(&(node->atom->alock));
 	assert("zam-716", fq->atom != NULL);
 	assert("zam-717", fq->atom == node->atom);
 	assert("zam-907", fq_in_use(fq));
 
+	assert("zam-714", JF_ISSET(node, JNODE_DIRTY));
 	assert("zam-826", JF_ISSET(node, JNODE_RELOC));
 	assert("vs-1481", !JF_ISSET(node, JNODE_FLUSH_QUEUED));
 	assert("vs-1481", NODE_LIST(node) != FQ_LIST);
@@ -226,14 +225,14 @@ void queue_jnode(flush_queue_t * fq, jno
 static int wait_io(flush_queue_t * fq, int *nr_io_errors)
 {
 	assert("zam-738", fq->atom != NULL);
-	assert("zam-739", spin_atom_is_locked(fq->atom));
+	assert_spin_locked(&(fq->atom->alock));
 	assert("zam-736", fq_in_use(fq));
 	assert("zam-911", list_empty_careful(ATOM_FQ_LIST(fq)));
 
 	if (atomic_read(&fq->nr_submitted) != 0) {
 		struct super_block *super;
 
-		UNLOCK_ATOM(fq->atom);
+		spin_unlock_atom(fq->atom);
 
 		assert("nikita-3013", schedulable());
 
@@ -262,7 +261,7 @@ static int finish_fq(flush_queue_t * fq,
 	txn_atom *atom = fq->atom;
 
 	assert("zam-801", atom != NULL);
-	assert("zam-744", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("zam-762", fq_in_use(fq));
 
 	ret = wait_io(fq, nr_io_errors);
@@ -283,7 +282,7 @@ static int finish_all_fq(txn_atom * atom
 {
 	flush_queue_t *fq;
 
-	assert("zam-730", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	if (list_empty_careful(&atom->flush_queues))
 		return 0;
@@ -305,7 +304,7 @@ static int finish_all_fq(txn_atom * atom
 				return ret;
 			}
 
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 
 			return -E_REPEAT;
 		}
@@ -336,9 +335,9 @@ int current_atom_finish_all_fq(void)
 	   -EBUSY are two return codes when atom remains locked after
 	   finish_all_fq */
 	if (!ret)
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 
-	assert("nikita-2696", spin_atom_is_not_locked(atom));
+	assert_spin_not_locked(&(atom->alock));
 
 	if (ret)
 		return ret;
@@ -356,9 +355,9 @@ scan_fq_and_update_atom_ref(struct list_
 	jnode *cur;
 
 	list_for_each_entry(cur, list, capture_link) {
-		LOCK_JNODE(cur);
+		spin_lock_jnode(cur);
 		cur->atom = atom;
-		UNLOCK_JNODE(cur);
+		spin_unlock_jnode(cur);
 	}
 }
 
@@ -367,14 +366,14 @@ void fuse_fq(txn_atom *to, txn_atom *fro
 {
 	flush_queue_t *fq;
 
-	assert("zam-720", spin_atom_is_locked(to));
-	assert("zam-721", spin_atom_is_locked(from));
+	assert_spin_locked(&(to->alock));
+	assert_spin_locked(&(from->alock));
 
 	list_for_each_entry(fq, &from->flush_queues, alink) {
 		scan_fq_and_update_atom_ref(ATOM_FQ_LIST(fq), to);
-		spin_lock_fq(fq);
+		spin_lock(&(fq->guard));
 		fq->atom = to;
-		spin_unlock_fq(fq);
+		spin_unlock(&(fq->guard));
 	}
 
 	list_splice_init(&from->flush_queues, to->flush_queues.prev);
@@ -467,7 +466,7 @@ static void release_prepped_list(flush_q
 	txn_atom *atom;
 
 	assert("zam-904", fq_in_use(fq));
-	atom = UNDER_SPIN(fq, fq, atom_get_locked_by_fq(fq));
+	atom = atom_locked_by_fq(fq);
 
 	while (!list_empty(ATOM_FQ_LIST(fq))) {
 		jnode *cur;
@@ -476,7 +475,7 @@ static void release_prepped_list(flush_q
 		list_del_init(&cur->capture_link);
 
 		count_dequeued_node(fq);
-		LOCK_JNODE(cur);
+		spin_lock_jnode(cur);
 		assert("nikita-3154", !JF_ISSET(cur, JNODE_OVRWR));
 		assert("nikita-3154", JF_ISSET(cur, JNODE_RELOC));
 		assert("nikita-3154", JF_ISSET(cur, JNODE_FLUSH_QUEUED));
@@ -493,13 +492,13 @@ static void release_prepped_list(flush_q
 					     CLEAN_LIST, 1));
 		}
 
-		UNLOCK_JNODE(cur);
+		spin_unlock_jnode(cur);
 	}
 
 	if (--atom->nr_running_queues == 0)
 		atom_send_event(atom);
 
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /* Submit write requests for nodes on the already filled flush queue @fq.
@@ -513,7 +512,7 @@ int write_fq(flush_queue_t * fq, long *n
 	txn_atom *atom;
 
 	while (1) {
-		atom = UNDER_SPIN(fq, fq, atom_get_locked_by_fq(fq));
+		atom = atom_locked_by_fq(fq);
 		assert("zam-924", atom);
 		/* do not write fq in parallel. */
 		if (atom->nr_running_queues == 0
@@ -523,7 +522,7 @@ int write_fq(flush_queue_t * fq, long *n
 	}
 
 	atom->nr_running_queues++;
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	ret = write_jnode_list(ATOM_FQ_LIST(fq), fq, nr_submitted, flags);
 	release_prepped_list(fq);
@@ -542,17 +541,17 @@ static int fq_by_atom_gfp(txn_atom *atom
 {
 	flush_queue_t *fq;
 
-	assert("zam-745", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	fq = list_entry(atom->flush_queues.next, flush_queue_t, alink);
 	while (&atom->flush_queues != &fq->alink) {
-		spin_lock_fq(fq);
+		spin_lock(&(fq->guard));
 
 		if (fq_ready(fq)) {
 			mark_fq_in_use(fq);
 			assert("vs-1246", fq->owner == NULL);
 			ON_DEBUG(fq->owner = current);
-			spin_unlock_fq(fq);
+			spin_unlock(&(fq->guard));
 
 			if (*new_fq)
 				done_fq(*new_fq);
@@ -562,7 +561,7 @@ static int fq_by_atom_gfp(txn_atom *atom
 			return 0;
 		}
 
-		spin_unlock_fq(fq);
+		spin_unlock(&(fq->guard));
 
 		fq = list_entry(fq->alink.next, flush_queue_t, alink);
 	}
@@ -577,7 +576,7 @@ static int fq_by_atom_gfp(txn_atom *atom
 		return 0;
 	}
 
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	*new_fq = create_fq(gfp);
 
@@ -624,16 +623,16 @@ void fq_put(flush_queue_t * fq)
 {
 	txn_atom *atom;
 
-	spin_lock_fq(fq);
-	atom = atom_get_locked_by_fq(fq);
+	spin_lock(&(fq->guard));
+	atom = atom_locked_by_fq_nolock(fq);
 
 	assert("zam-746", atom != NULL);
 
 	fq_put_nolock(fq);
 	atom_send_event(atom);
 
-	spin_unlock_fq(fq);
-	UNLOCK_ATOM(atom);
+	spin_unlock(&(fq->guard));
+	spin_unlock_atom(atom);
 }
 
 /* A part of atom object initialization related to the embedded flush queue
@@ -652,14 +651,14 @@ int fq_by_jnode_gfp(jnode * node, flush_
 	txn_atom *atom;
 	int ret;
 
-	assert("zam-835", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	*fq = NULL;
 
 	while (1) {
 		/* begin with taking lock on atom */
 		atom = jnode_get_atom(node);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		if (atom == NULL) {
 			/* jnode does not point to the atom anymore, it is
@@ -685,7 +684,7 @@ int fq_by_jnode_gfp(jnode * node, flush_
 		}
 
 		/* It is correct to lock atom first, then lock a jnode */
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 
 		if (node->atom == atom)
 			break;	/* Yes! it is our jnode. We got all of them:
@@ -694,15 +693,15 @@ int fq_by_jnode_gfp(jnode * node, flush_
 
 		/* release all locks and allocated objects and restart from
 		 * locked jnode. */
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		fq_put(*fq);
 		fq = NULL;
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 
 	      lock_again:
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 	}
 
 	return 0;
@@ -719,11 +718,11 @@ void check_fq(const txn_atom *atom)
 
 	count = 0;
 	list_for_each_entry(fq, &atom->flush_queues, alink) {
-		spin_lock_fq(fq);
+		spin_lock(&(fq->guard));
 		/* calculate number of jnodes on fq' list of prepped jnodes */
 		list_for_each(pos, ATOM_FQ_LIST(fq))
 			count++;
-		spin_unlock_fq(fq);
+		spin_unlock(&(fq->guard));
 	}
 	if (count != atom->fq)
 		warning("", "fq counter %d, real %d\n", atom->fq, count);
diff -puN fs/reiser4/fsdata.c~reiser4-spinlock-cleanup fs/reiser4/fsdata.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/fsdata.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.516975500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/fsdata.c	2005-10-20 14:01:52.772991500 +0400
@@ -16,7 +16,7 @@ static LIST_HEAD(cursor_cache);
 static unsigned long d_cursor_unused = 0;
 
 /* spinlock protecting manipulations with dir_cursor's hash table and lists */
-static spinlock_t d_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t d_lock = SPIN_LOCK_UNLOCKED;
 
 static void kill_cursor(dir_cursor *);
 
@@ -291,6 +291,8 @@ static __u32 cid_counter = 0;
 #define CID_SHIFT (20)
 #define CID_MASK  (0xfffffull)
 
+static void free_file_fsdata_nolock(struct file *);
+
 /**
  * insert_cursor - allocate file_fsdata, insert cursor to tree and hash table
  * @cursor:
@@ -337,7 +339,7 @@ static int insert_cursor(dir_cursor *cur
 				warning("", "file has fsdata already");
 #endif
 			clean_fsdata(file);
-			reiser4_free_file_fsdata(file);
+			free_file_fsdata_nolock(file);
 			file->private_data = fsdata;
 			fsdata->cursor = cursor;
 			spin_unlock_inode(inode);
@@ -554,7 +556,7 @@ int try_to_attach_fsdata(struct file *fi
 			spin_lock_inode(inode);
 			assert("nikita-3556", cursor->fsdata->back == NULL);
 			clean_fsdata(file);
-			reiser4_free_file_fsdata(file);
+			free_file_fsdata_nolock(file);
 			file->private_data = cursor->fsdata;
 			spin_unlock_inode(inode);
 		}
@@ -747,30 +749,39 @@ reiser4_file_fsdata *reiser4_get_file_fs
 }
 
 /**
- * reiser4_free_file_fsdata - detach from struct file and free reiser4_file_fsdata
+ * free_file_fsdata_nolock - detach and free reiser4_file_fsdata
  * @file:
  *
  * Detaches reiser4_file_fsdata from @file, removes reiser4_file_fsdata from
  * readdir list, frees if it is not linked to d_cursor object.
  */
-void reiser4_free_file_fsdata(struct file *file)
+static void free_file_fsdata_nolock(struct file *file)
 {
 	reiser4_file_fsdata *fsdata;
 
-	spin_lock_inode(file->f_dentry->d_inode);
+	assert("", spin_inode_is_locked(file->f_dentry->d_inode));
 	fsdata = file->private_data;
 	if (fsdata != NULL) {
 		list_del_init(&fsdata->dir.linkage);
 		if (fsdata->cursor == NULL)
 			free_fsdata(fsdata);
 	}
-	file->private_data = NULL;
+	file->private_data = NULL;	
+}
 
+/**
+ * reiser4_free_file_fsdata - detach from struct file and free reiser4_file_fsdata
+ * @file:
+ *
+ * Spinlocks inode and calls free_file_fsdata_nolock to do the work.
+ */
+void reiser4_free_file_fsdata(struct file *file)
+{
+	spin_lock_inode(file->f_dentry->d_inode);
+	free_file_fsdata_nolock(file);
 	spin_unlock_inode(file->f_dentry->d_inode);
 }
 
-
-
 /*
  * Local variables:
  * c-indentation-style: "K&R"
diff -puN fs/reiser4/fsdata.h~reiser4-spinlock-cleanup fs/reiser4/fsdata.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/fsdata.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.520975750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/fsdata.h	2005-10-20 14:01:52.772991500 +0400
@@ -193,10 +193,6 @@ extern void detach_fsdata(struct file *)
 void dispose_cursors(struct inode *inode);
 void load_cursors(struct inode *inode);
 void kill_cursors(struct inode *inode);
-
-
-
-
 void adjust_dir_file(struct inode *dir, const struct dentry *de, int offset, int adj);
 
 /*
@@ -208,6 +204,9 @@ struct d_cursor_info {
 	struct radix_tree_root tree;
 };
 
+/* spinlock protecting readdir cursors */
+extern spinlock_t d_lock;
+
 /* __REISER4_FSDATA_H__ */
 #endif
 
diff -puN fs/reiser4/init_super.c~reiser4-spinlock-cleanup fs/reiser4/init_super.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/init_super.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.524976000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/init_super.c	2005-10-20 14:01:52.772991500 +0400
@@ -31,9 +31,9 @@ int init_fs_info(struct super_block *sup
 
 	sema_init(&sbinfo->delete_sema, 1);
 	sema_init(&sbinfo->flush_sema, 1);
-	spin_super_init(sbinfo);
+	spin_lock_init(&(sbinfo->guard));
 #if REISER4_USE_EFLUSH
-	spin_super_eflush_init(sbinfo);
+	spin_lock_init(&(sbinfo->eflush_guard));
 #endif
 	/*  initialize per-super-block d_cursor resources */
 	init_super_d_info(super);
diff -puN fs/reiser4/inode.c~reiser4-spinlock-cleanup fs/reiser4/inode.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/inode.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.528976250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/inode.c	2005-10-20 14:01:52.776991750 +0400
@@ -603,8 +603,7 @@ void inode_set_extension(struct inode *i
 
 	assert("nikita-2716", inode != NULL);
 	assert("nikita-2717", ext < LAST_SD_EXTENSION);
-	assert("nikita-3491",
-	       spin_inode_object_is_locked(reiser4_inode_data(inode)));
+	assert("nikita-3491", spin_inode_is_locked(inode));
 
 	state = reiser4_inode_data(inode);
 	state->extmask |= 1 << ext;
@@ -674,12 +673,10 @@ znode *inode_get_vroot(struct inode *ino
 {
 	reiser4_block_nr blk;
 	znode *result;
-	reiser4_inode *info;
 
-	info = reiser4_inode_data(inode);
-	LOCK_INODE(info);
-	blk = info->vroot;
-	UNLOCK_INODE(info);
+	spin_lock_inode(inode);
+	blk = reiser4_inode_data(inode)->vroot;
+	spin_unlock_inode(inode);
 	if (!disk_addr_eq(&UBER_TREE_ADDR, &blk))
 		result = zlook(tree_by_inode(inode), &blk);
 	else
@@ -687,24 +684,18 @@ znode *inode_get_vroot(struct inode *ino
 	return result;
 }
 
-void inode_set_vroot(struct inode *inode, znode * vroot)
+void inode_set_vroot(struct inode *inode, znode *vroot)
 {
-	reiser4_inode *info;
-
-	info = reiser4_inode_data(inode);
-	LOCK_INODE(info);
-	info->vroot = *znode_get_block(vroot);
-	UNLOCK_INODE(info);
+	spin_lock_inode(inode);
+	reiser4_inode_data(inode)->vroot = *znode_get_block(vroot);
+	spin_unlock_inode(inode);
 }
 
 #if REISER4_DEBUG
 
 void inode_invariant(const struct inode *inode)
 {
-	reiser4_inode *object;
-
-	object = reiser4_inode_data(inode);
-	assert("nikita-3077", spin_inode_object_is_locked(object));
+	assert("nikita-3077", spin_inode_is_locked(inode));
 }
 
 int inode_has_no_jnodes(reiser4_inode * r4_inode)
diff -puN fs/reiser4/inode.h~reiser4-spinlock-cleanup fs/reiser4/inode.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/inode.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.532976500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/inode.h	2005-10-20 14:01:52.776991750 +0400
@@ -7,7 +7,6 @@
 
 #include "forward.h"
 #include "debug.h"
-#include "spin_macros.h"
 #include "key.h"
 #include "seal.h"
 #include "plugin/plugin.h"
@@ -99,7 +98,7 @@ typedef __u32 oid_hi_t;
 
 struct reiser4_inode {
 	/* spin lock protecting fields of this structure. */
-	reiser4_spin_data guard;
+	spinlock_t guard;
 	/* object plugins */
 	plugin_set *pset;
 	/* plugins set for inheritance */
@@ -252,18 +251,6 @@ static inline struct inode *unix_file_in
 			     p.file_plugin_data.unix_file_info)->vfs_inode;
 }
 
-/* ordering predicate for inode spin lock: only jnode lock can be held */
-#define spin_ordering_pred_inode_object(inode)			\
-	( lock_counters() -> rw_locked_dk == 0 ) &&		\
-	( lock_counters() -> rw_locked_tree == 0 ) &&		\
-	( lock_counters() -> spin_locked_txnh == 0 ) &&		\
-	( lock_counters() -> rw_locked_zlock == 0 ) &&	\
-	( lock_counters() -> spin_locked_jnode == 0 ) &&	\
-	( lock_counters() -> spin_locked_atom == 0 ) &&		\
-	( lock_counters() -> spin_locked_ktxnmgrd == 0 ) &&	\
-	( lock_counters() -> spin_locked_txnmgr == 0 )
-
-SPIN_LOCK_FUNCTIONS(inode_object, reiser4_inode, guard);
 
 extern ino_t oid_to_ino(oid_t oid) __attribute__ ((const));
 extern ino_t oid_to_uino(oid_t oid) __attribute__ ((const));
@@ -277,17 +264,55 @@ extern int inode_has_no_jnodes(reiser4_i
 #define inode_invariant(inode) noop
 #endif
 
-#define spin_lock_inode(inode)			\
-({						\
-	LOCK_INODE(reiser4_inode_data(inode));	\
-	inode_invariant(inode);			\
-})
+static inline int spin_inode_is_locked(const struct inode *inode)
+{
+	assert_spin_locked(&reiser4_inode_data(inode)->guard);
+	return 1;
+}
+
+/**
+ * spin_lock_inode - lock reiser4_inode' embedded spinlock
+ * @inode: inode to lock
+ *
+ * In debug mode it checks that lower priority locks are not held and
+ * increments reiser4_context's lock counters on which lock ordering checking
+ * is based.
+ */
+static inline void spin_lock_inode(struct inode *inode)
+{
+	assert("", LOCK_CNT_NIL(spin_locked));
+	/* check lock ordering */
+	assert_spin_not_locked(&d_lock);
+
+	spin_lock(&reiser4_inode_data(inode)->guard);
+
+	LOCK_CNT_INC(spin_locked_inode);
+	LOCK_CNT_INC(spin_locked);
+
+	inode_invariant(inode);
+}
+
+/**
+ * spin_unlock_inode - unlock reiser4_inode' embedded spinlock
+ * @inode: inode to unlock
+ *
+ * In debug mode it checks that spinlock is held and decrements
+ * reiser4_context's lock counters on which lock ordering checking is based.
+ */
+static inline void spin_unlock_inode(struct inode *inode)
+{
+	assert_spin_locked(&reiser4_inode_data(inode)->guard);
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_inode));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	inode_invariant(inode);
+
+	LOCK_CNT_DEC(spin_locked_inode);
+	LOCK_CNT_DEC(spin_locked);
+
+	spin_unlock(&reiser4_inode_data(inode)->guard);
+}
 
-#define spin_unlock_inode(inode)			\
-({							\
-	inode_invariant(inode);				\
-	UNLOCK_INODE(reiser4_inode_data(inode));	\
-})
 
 extern znode *inode_get_vroot(struct inode *inode);
 extern void inode_set_vroot(struct inode *inode, znode * vroot);
diff -puN fs/reiser4/jnode.c~reiser4-spinlock-cleanup fs/reiser4/jnode.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/jnode.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.536976750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/jnode.c	2005-10-20 14:01:52.780992000 +0400
@@ -239,8 +239,8 @@ void jnode_init(jnode * node, reiser4_tr
 	jnode_set_type(node, type);
 	atomic_set(&node->d_count, 0);
 	atomic_set(&node->x_count, 0);
-	spin_jnode_init(node);
-	spin_jload_init(node);
+	spin_lock_init(&node->guard);
+	spin_lock_init(&node->load);
 	node->atom = NULL;
 	node->tree = tree;
 	INIT_LIST_HEAD(&node->capture_link);
@@ -413,11 +413,11 @@ jnode *jfind(struct address_space * mapp
 	assert("vs-1694", mapping->host != NULL);
 	tree = tree_by_inode(mapping->host);
 
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 	node = jfind_nolock(mapping, index);
 	if (node != NULL)
 		jref(node);
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 	return node;
 }
 
@@ -427,7 +427,7 @@ static void inode_attach_jnode(jnode * n
 	reiser4_inode *info;
 	struct radix_tree_root *rtree;
 
-	assert("nikita-34391", rw_tree_is_write_locked(jnode_get_tree(node)));
+	assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock));
 	assert("zam-1043", node->key.j.mapping != NULL);
 	inode = node->key.j.mapping->host;
 	info = reiser4_inode_data(inode);
@@ -451,7 +451,7 @@ static void inode_detach_jnode(jnode * n
 	reiser4_inode *info;
 	struct radix_tree_root *rtree;
 
-	assert("nikita-34392", rw_tree_is_write_locked(jnode_get_tree(node)));
+	assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock));
 	assert("zam-1044", node->key.j.mapping != NULL);
 	inode = node->key.j.mapping->host;
 	info = reiser4_inode_data(inode);
@@ -487,7 +487,7 @@ hash_unformatted_jnode(jnode * node, str
 	assert("vs-1442", node->key.j.mapping == 0);
 	assert("vs-1443", node->key.j.objectid == 0);
 	assert("vs-1444", node->key.j.index == (unsigned long)-1);
-	assert("nikita-3439", rw_tree_is_write_locked(jnode_get_tree(node)));
+	assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock));
 
 	node->key.j.mapping = mapping;
 	node->key.j.objectid = get_inode_oid(mapping->host);
@@ -528,11 +528,10 @@ static void unhash_unformatted_node_nolo
 void unhash_unformatted_jnode(jnode * node)
 {
 	assert("vs-1445", jnode_is_unformatted(node));
-	WLOCK_TREE(node->tree);
 
+	write_lock_tree(node->tree);
 	unhash_unformatted_node_nolock(node);
-
-	WUNLOCK_TREE(node->tree);
+	write_unlock_tree(node->tree);
 }
 
 /*
@@ -556,7 +555,7 @@ jnode *find_get_jnode(reiser4_tree * tre
 	if (preload != 0)
 		return ERR_PTR(preload);
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	shadow = jfind_nolock(mapping, index);
 	if (likely(shadow == NULL)) {
 		/* add new jnode to hash table and inode's radix tree of jnodes */
@@ -569,7 +568,7 @@ jnode *find_get_jnode(reiser4_tree * tre
 		assert("vs-1498", shadow->key.j.mapping == mapping);
 		result = shadow;
 	}
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 
 	assert("nikita-2955",
 	       ergo(result != NULL, jnode_invariant(result, 0, 0)));
@@ -610,7 +609,9 @@ static jnode *do_jget(reiser4_tree * tre
 	/* check hash-table first */
 	result = jfind(pg->mapping, pg->index);
 	if (unlikely(result != NULL)) {
-		UNDER_SPIN_VOID(jnode, result, jnode_attach_page(result, pg));
+		spin_lock_jnode(result);
+		jnode_attach_page(result, pg);
+		spin_unlock_jnode(result);
 		result->key.j.mapping = pg->mapping;
 		return result;
 	}
@@ -619,7 +620,9 @@ static jnode *do_jget(reiser4_tree * tre
 	if (unlikely(IS_ERR(result)))
 		return result;
 	/* attach jnode to page */
-	UNDER_SPIN_VOID(jnode, result, jnode_attach_page(result, pg));
+	spin_lock_jnode(result);
+	jnode_attach_page(result, pg);
+	spin_unlock_jnode(result);
 	return result;
 }
 
@@ -669,7 +672,7 @@ void jnode_attach_page(jnode * node, str
 	assert("vs-1741", node->pg == NULL);
 
 	assert("nikita-2396", PageLocked(pg));
-	assert("nikita-2397", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	page_cache_get(pg);
 	pg->private = (unsigned long)node;
@@ -683,7 +686,7 @@ void page_clear_jnode(struct page *page,
 	assert("nikita-2424", page != NULL);
 	assert("nikita-2425", PageLocked(page));
 	assert("nikita-2426", node != NULL);
-	assert("nikita-2427", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	assert("nikita-2428", PagePrivate(page));
 
 	assert("nikita-3551", !PageWriteback(page));
@@ -708,8 +711,9 @@ page_detach_jnode(struct page *page, str
 		jnode *node;
 
 		node = jprivate(page);
-		assert("nikita-2399", spin_jnode_is_not_locked(node));
-		UNDER_SPIN_VOID(jnode, node, page_clear_jnode(page, node));
+		spin_lock_jnode(node);
+		page_clear_jnode(page, node);
+		spin_unlock_jnode(node);
 	}
 	unlock_page(page);
 }
@@ -726,11 +730,11 @@ static struct page *jnode_lock_page(jnod
 	struct page *page;
 
 	assert("nikita-2052", node != NULL);
-	assert("nikita-2401", spin_jnode_is_not_locked(node));
+	assert("nikita-2401", LOCK_CNT_NIL(spin_locked_jnode));
 
 	while (1) {
 
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		page = jnode_page(node);
 		if (page == NULL) {
 			break;
@@ -747,7 +751,7 @@ static struct page *jnode_lock_page(jnod
 
 		/* Page is locked by someone else. */
 		page_cache_get(page);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		wait_on_page_locked(page);
 		/* it is possible that page was detached from jnode and
 		   returned to the free pool, or re-assigned while we were
@@ -771,14 +775,14 @@ static inline int jparse(jnode * node)
 
 	assert("nikita-2466", node != NULL);
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	if (likely(!jnode_is_parsed(node))) {
 		result = jnode_ops(node)->parse(node);
 		if (likely(result == 0))
 			JF_SET(node, JNODE_PARSED);
 	} else
 		result = 0;
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 	return result;
 }
 
@@ -788,30 +792,30 @@ struct page *jnode_get_page_locked(jnode
 {
 	struct page *page;
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	page = jnode_page(node);
 
 	if (page == NULL) {
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		page = find_or_create_page(jnode_get_mapping(node),
 					   jnode_get_index(node), gfp_flags);
 		if (page == NULL)
 			return ERR_PTR(RETERR(-ENOMEM));
 	} else {
 		if (!TestSetPageLocked(page)) {
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 			return page;
 		}
 		page_cache_get(page);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		lock_page(page);
 		assert("nikita-3134", page->mapping == jnode_get_mapping(node));
 	}
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	if (!jnode_page(node))
 		jnode_attach_page(node, page);
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 
 	page_cache_release(page);
 	assert("zam-894", jnode_page(node) == page);
@@ -882,10 +886,10 @@ int jload_gfp(jnode * node /* node to lo
 	 * should be atomic, otherwise there is a race against
 	 * reiser4_releasepage().
 	 */
-	LOCK_JLOAD(node);
+	spin_lock(&(node->load));
 	add_d_ref(node);
 	parsed = jnode_is_parsed(node);
-	UNLOCK_JLOAD(node);
+	spin_unlock(&(node->load));
 
 	if (unlikely(!parsed)) {
 		page = jnode_get_page_locked(node, gfp_flags);
@@ -921,8 +925,11 @@ int jload_gfp(jnode * node /* node to lo
 			node->data = kmap(page);
 	}
 
-	if (unlikely(JF_ISSET(node, JNODE_EFLUSH)))
-		UNDER_SPIN_VOID(jnode, node, eflush_del(node, 0));
+	if (unlikely(JF_ISSET(node, JNODE_EFLUSH))) {
+		spin_lock_jnode(node);
+		eflush_del(node, 0);
+		spin_unlock_jnode(node);
+	}
 
 	if (!is_writeout_mode())
 		/* We do not mark pages active if jload is called as a part of
@@ -977,7 +984,9 @@ int jinit_new(jnode * node, int gfp_flag
 
 	if (!jnode_is_parsed(node)) {
 		jnode_plugin *jplug = jnode_ops(node);
-		result = UNDER_SPIN(jnode, node, jplug->init(node));
+		spin_lock_jnode(node);
+		result = jplug->init(node);
+		spin_unlock_jnode(node);
 		if (result) {
 			kunmap(page);
 			goto failed;
@@ -1010,7 +1019,7 @@ void jrelse(jnode * node /* jnode to rel
 	struct page *page;
 
 	assert("nikita-487", node != NULL);
-	assert("nikita-1906", spin_jnode_is_not_locked(node));
+	assert_spin_not_locked(&(node->guard));
 
 	page = jnode_page(node);
 	if (likely(page != NULL)) {
@@ -1035,15 +1044,15 @@ static void jnode_finish_io(jnode * node
 
 	assert("nikita-2922", node != NULL);
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	page = jnode_page(node);
 	if (page != NULL) {
 		page_cache_get(page);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		wait_on_page_writeback(page);
 		page_cache_release(page);
 	} else
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 }
 
 /*
@@ -1216,7 +1225,7 @@ static unsigned long index_is_address(co
 }
 
 /* resolve race with jput */
-jnode *jnode_rip_sync(reiser4_tree * t, jnode * node)
+jnode *jnode_rip_sync(reiser4_tree *tree, jnode *node)
 {
 	/*
 	 * This is used as part of RCU-based jnode handling.
@@ -1239,12 +1248,12 @@ jnode *jnode_rip_sync(reiser4_tree * t, 
 	 * jnode.
 	 */
 	if (unlikely(JF_ISSET(node, JNODE_RIP))) {
-		RLOCK_TREE(t);
+		read_lock_tree(tree);
 		if (JF_ISSET(node, JNODE_RIP)) {
 			dec_x_ref(node);
 			node = NULL;
 		}
-		RUNLOCK_TREE(t);
+		read_unlock_tree(tree);
 	}
 	return node;
 }
@@ -1293,7 +1302,7 @@ static void delete_znode(jnode * node, r
 {
 	znode *z;
 
-	assert("nikita-2128", rw_tree_is_write_locked(tree));
+	assert_rw_write_locked(&(tree->tree_lock));
 	assert("vs-898", JF_ISSET(node, JNODE_HEARD_BANSHEE));
 
 	z = JZNODE(node);
@@ -1310,7 +1319,7 @@ static int remove_znode(jnode * node, re
 {
 	znode *z;
 
-	assert("nikita-2128", rw_tree_is_locked(tree));
+	assert_rw_write_locked(&(tree->tree_lock));
 	z = JZNODE(node);
 
 	if (z->c_count == 0) {
@@ -1577,15 +1586,15 @@ static int jnode_try_drop(jnode * node)
 	tree = jnode_get_tree(node);
 	jtype = jnode_get_type(node);
 
-	LOCK_JNODE(node);
-	WLOCK_TREE(tree);
+	spin_lock_jnode(node);
+	write_lock_tree(tree);
 	/*
 	 * if jnode has a page---leave it alone. Memory pressure will
 	 * eventually kill page and jnode.
 	 */
 	if (jnode_page(node) != NULL) {
-		UNLOCK_JNODE(node);
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
+		spin_unlock_jnode(node);
 		JF_CLR(node, JNODE_RIP);
 		return RETERR(-EBUSY);
 	}
@@ -1597,16 +1606,16 @@ static int jnode_try_drop(jnode * node)
 		assert("nikita-3223", !JF_ISSET(node, JNODE_EFLUSH));
 		assert("jmacd-511/b", atomic_read(&node->d_count) == 0);
 
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		/* no page and no references---despatch him. */
 		jnode_remove(node, jtype, tree);
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		jnode_free(node, jtype);
 	} else {
 		/* busy check failed: reference was acquired by concurrent
 		 * thread. */
-		WUNLOCK_TREE(tree);
-		UNLOCK_JNODE(node);
+		write_unlock_tree(tree);
+		spin_unlock_jnode(node);
 		JF_CLR(node, JNODE_RIP);
 	}
 	return result;
@@ -1629,11 +1638,11 @@ static int jdelete(jnode * node /* jnode
 	jtype = jnode_get_type(node);
 
 	page = jnode_lock_page(node);
-	assert("nikita-2402", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	tree = jnode_get_tree(node);
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	/* re-check ->x_count under tree lock. */
 	result = jnode_is_busy(node, jtype);
 	if (likely(!result)) {
@@ -1647,10 +1656,10 @@ static int jdelete(jnode * node /* jnode
 			 */
 			page_clear_jnode(page, node);
 		}
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		/* goodbye */
 		jnode_delete(node, jtype, tree);
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		jnode_free(node, jtype);
 		/* @node is no longer valid pointer */
 		if (page != NULL)
@@ -1659,8 +1668,8 @@ static int jdelete(jnode * node /* jnode
 		/* busy check failed: reference was acquired by concurrent
 		 * thread. */
 		JF_CLR(node, JNODE_RIP);
-		WUNLOCK_TREE(tree);
-		UNLOCK_JNODE(node);
+		write_unlock_tree(tree);
+		spin_unlock_jnode(node);
 		if (page != NULL)
 			unlock_page(page);
 	}
@@ -1683,16 +1692,16 @@ static int jdrop_in_tree(jnode * node, r
 	int result;
 
 	assert("zam-602", node != NULL);
-	assert("nikita-2362", rw_tree_is_not_locked(tree));
+	assert_rw_not_read_locked(&(tree->tree_lock));
+	assert_rw_not_write_locked(&(tree->tree_lock));
 	assert("nikita-2403", !JF_ISSET(node, JNODE_HEARD_BANSHEE));
-	// assert( "nikita-2532", JF_ISSET( node, JNODE_RIP ) );
 
 	jtype = jnode_get_type(node);
 
 	page = jnode_lock_page(node);
-	assert("nikita-2405", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 
 	/* re-check ->x_count under tree lock. */
 	result = jnode_is_busy(node, jtype);
@@ -1705,9 +1714,9 @@ static int jdrop_in_tree(jnode * node, r
 			assert("nikita-2181", PageLocked(page));
 			page_clear_jnode(page, node);
 		}
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		jnode_remove(node, jtype, tree);
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		jnode_free(node, jtype);
 		if (page != NULL) {
 			drop_page(page);
@@ -1716,8 +1725,8 @@ static int jdrop_in_tree(jnode * node, r
 		/* busy check failed: reference was acquired by concurrent
 		 * thread. */
 		JF_CLR(node, JNODE_RIP);
-		WUNLOCK_TREE(tree);
-		UNLOCK_JNODE(node);
+		write_unlock_tree(tree);
+		spin_unlock_jnode(node);
 		if (page != NULL)
 			unlock_page(page);
 	}
@@ -1796,7 +1805,7 @@ int jnode_invariant_f(const jnode * node
 	    _check(node->jnodes.next != NULL) &&
 	    /* [jnode-dirty] invariant */
 	    /* dirty inode is part of atom */
-	    _ergo(jnode_is_dirty(node), node->atom != NULL) &&
+	    _ergo(JF_ISSET(node, JNODE_DIRTY), node->atom != NULL) &&
 	    /* [jnode-oid] invariant */
 	    /* for unformatted node ->objectid and ->mapping fields are
 	     * consistent */
@@ -1828,18 +1837,18 @@ static int jnode_invariant(const jnode *
 	assert("umka-064321", tree != NULL);
 
 	if (!jlocked && !tlocked)
-		LOCK_JNODE((jnode *) node);
+		spin_lock_jnode((jnode *) node);
 	if (!tlocked)
-		RLOCK_TREE(jnode_get_tree(node));
+		read_lock_tree(jnode_get_tree(node));
 	result = jnode_invariant_f(node, &failed_msg);
 	if (!result) {
 		info_jnode("corrupted node", node);
 		warning("jmacd-555", "Condition %s failed", failed_msg);
 	}
 	if (!tlocked)
-		RUNLOCK_TREE(jnode_get_tree(node));
+		read_unlock_tree(jnode_get_tree(node));
 	if (!jlocked && !tlocked)
-		UNLOCK_JNODE((jnode *) node);
+		spin_unlock_jnode((jnode *) node);
 	return result;
 }
 
@@ -1915,15 +1924,6 @@ void info_jnode(const char *prefix /* pr
 	}
 }
 
-/* debugging aid: output human readable information about @node */
-void print_jnode(const char *prefix /* prefix to print */ ,
-		 const jnode * node /* node to print */ )
-{
-	if (jnode_is_znode(node))
-		print_znode(prefix, JZNODE(node));
-	else
-		info_jnode(prefix, node);
-}
 
 #endif				/* REISER4_DEBUG */
 
diff -puN fs/reiser4/jnode.h~reiser4-spinlock-cleanup fs/reiser4/jnode.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/jnode.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.540977000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/jnode.h	2005-10-20 14:01:52.780992000 +0400
@@ -12,7 +12,6 @@
 #include "key.h"
 #include "debug.h"
 #include "dformat.h"
-#include "spin_macros.h"
 #include "emergency_flush.h"
 
 #include "plugin/plugin.h"
@@ -107,7 +106,7 @@ struct jnode {
 	/*   0 */ unsigned long state;
 
 	/* lock, protecting jnode's fields. */
-	/*   4 */ reiser4_spin_data load;
+	/*   4 */ spinlock_t load;
 
 	/* counter of references to jnode itself. Increased on jref().
 	   Decreased on jput().
@@ -148,7 +147,7 @@ struct jnode {
 
 	/* FOURTH CACHE LINE: atom related fields */
 
-	/*   48 */ reiser4_spin_data guard;
+	/*   48 */ spinlock_t guard;
 
 	/* atom the block is in, if any */
 	/*   52 */ txn_atom *atom;
@@ -324,28 +323,32 @@ static inline int JF_TEST_AND_SET(jnode 
 	return test_and_set_bit(f, &j->state);
 }
 
-/* ordering constraint for znode spin lock: znode lock is weaker than
-   tree lock and dk lock */
-#define spin_ordering_pred_jnode( node )					\
-	( ( lock_counters() -> rw_locked_tree == 0 ) &&			\
-	  ( lock_counters() -> spin_locked_txnh == 0 ) &&                       \
-	  ( lock_counters() -> rw_locked_zlock == 0 ) &&                      \
-	  ( lock_counters() -> rw_locked_dk == 0 )   &&                       \
-	  /*                                                                    \
-	     in addition you cannot hold more than one jnode spin lock at a     \
-	     time.                                                              \
-	  */                                                                   \
-	  ( lock_counters() -> spin_locked_jnode < 2 ) )
-
-/* Define spin_lock_jnode, spin_unlock_jnode, and spin_jnode_is_locked.
-   Take and release short-term spinlocks.  Don't hold these across
-   io.
-*/
-SPIN_LOCK_FUNCTIONS(jnode, jnode, guard);
+static inline void spin_lock_jnode(jnode *node)
+{
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(rw_locked_tree) &&
+		    LOCK_CNT_NIL(spin_locked_txnh) &&
+		    LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_LT(spin_locked_jnode, 2)));
+
+	spin_lock(&(node->guard));
+
+	LOCK_CNT_INC(spin_locked_jnode);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void spin_unlock_jnode(jnode *node)
+{
+	assert_spin_locked(&(node->guard));
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_jnode));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
 
-#define spin_ordering_pred_jload(node) (1)
+	LOCK_CNT_DEC(spin_locked_jnode);
+	LOCK_CNT_DEC(spin_locked);
 
-SPIN_LOCK_FUNCTIONS(jload, jnode, load);
+	spin_unlock(&(node->guard));
+}
 
 static inline int jnode_is_in_deleteset(const jnode * node)
 {
@@ -398,10 +401,10 @@ static inline const reiser4_block_nr *jn
 /* block number for IO. Usually this is the same as jnode_get_block(), unless
  * jnode was emergency flushed---then block number chosen by eflush is
  * used. */
-static inline const reiser4_block_nr *jnode_get_io_block(const jnode * node)
+static inline const reiser4_block_nr *jnode_get_io_block(jnode * node)
 {
 	assert("nikita-2768", node != NULL);
-	assert("nikita-2769", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	if (unlikely(JF_ISSET(node, JNODE_EFLUSH)))
 		return eflush_get(node);
@@ -447,13 +450,11 @@ extern int jnodes_tree_done(reiser4_tree
 extern int znode_is_any_locked(const znode * node);
 extern void jnode_list_remove(jnode * node);
 extern void info_jnode(const char *prefix, const jnode * node);
-extern void print_jnode(const char *prefix, const jnode * node);
 
 #else
 
 #define jnode_list_remove(node) noop
 #define info_jnode(p, n) noop
-#define print_jnode(p, n) noop
 
 #endif
 
@@ -582,29 +583,12 @@ static inline int jnode_is_znode(const j
 	return jnode_get_type(node) == JNODE_FORMATTED_BLOCK;
 }
 
-/* return true if "node" is dirty */
-static inline int jnode_is_dirty(const jnode * node)
-{
-	assert("nikita-782", node != NULL);
-	assert("jmacd-1800", spin_jnode_is_locked(node)
-	       || (jnode_is_znode(node) && znode_is_any_locked(JZNODE(node))));
-	return JF_ISSET(node, JNODE_DIRTY);
-}
-
-/* return true if "node" is dirty, node is unlocked */
-static inline int jnode_check_dirty(jnode * node)
-{
-	assert("jmacd-7798", node != NULL);
-	assert("jmacd-7799", spin_jnode_is_not_locked(node));
-	return UNDER_SPIN(jnode, node, jnode_is_dirty(node));
-}
-
-static inline int jnode_is_flushprepped(const jnode * node)
+static inline int jnode_is_flushprepped(jnode * node)
 {
 	assert("jmacd-78212", node != NULL);
-	assert("jmacd-71276", spin_jnode_is_locked(node));
-	return !jnode_is_dirty(node) || JF_ISSET(node, JNODE_RELOC)
-	    || JF_ISSET(node, JNODE_OVRWR);
+	assert_spin_locked(&(node->guard));
+	return !JF_ISSET(node, JNODE_DIRTY) || JF_ISSET(node, JNODE_RELOC) ||
+		JF_ISSET(node, JNODE_OVRWR);
 }
 
 /* Return true if @node has already been processed by the squeeze and allocate
@@ -613,9 +597,13 @@ static inline int jnode_is_flushprepped(
    returns true you may use the block number as a hint. */
 static inline int jnode_check_flushprepped(jnode * node)
 {
+	int result;
+
 	/* It must be clean or relocated or wandered.  New allocations are set to relocate. */
-	assert("jmacd-71275", spin_jnode_is_not_locked(node));
-	return UNDER_SPIN(jnode, node, jnode_is_flushprepped(node));
+	spin_lock_jnode(node);
+	result = jnode_is_flushprepped(node);
+	spin_unlock_jnode(node);
+	return result;
 }
 
 /* returns true if node is unformatted */
@@ -691,7 +679,6 @@ static inline void jput(jnode * node)
 {
 	assert("jmacd-509", node != NULL);
 	assert("jmacd-510", atomic_read(&node->x_count) > 0);
-	assert("nikita-3065", spin_jnode_is_not_locked(node));
 	assert("zam-926", schedulable());
 	LOCK_CNT_DEC(x_refs);
 
diff -puN fs/reiser4/ktxnmgrd.h~reiser4-spinlock-cleanup fs/reiser4/ktxnmgrd.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/ktxnmgrd.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.544977250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/ktxnmgrd.h	2005-10-20 14:01:52.780992000 +0400
@@ -7,7 +7,6 @@
 #define __KTXNMGRD_H__
 
 #include "txnmgr.h"
-#include "spin_macros.h"
 
 #include <linux/fs.h>
 #include <linux/wait.h>
diff -puN fs/reiser4/lock.c~reiser4-spinlock-cleanup fs/reiser4/lock.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/lock.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.548977500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/lock.c	2005-10-20 14:05:07.689173000 +0400
@@ -231,7 +231,7 @@ static void wake_up_all_lopri_owners(zno
 {
 	lock_handle *handle;
 
-	assert("nikita-1824", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 	list_for_each_entry(handle, &node->lock.owners, owners_link) {
 		spin_lock_stack(handle->owner);
 
@@ -257,7 +257,7 @@ static inline void
 link_object(lock_handle * handle, lock_stack * owner, znode * node)
 {
 	assert("jmacd-810", handle->owner == NULL);
-	assert("nikita-1830", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 
 	handle->owner = owner;
 	handle->node = node;
@@ -279,7 +279,7 @@ static inline void unlink_object(lock_ha
 {
 	assert("zam-354", handle->owner != NULL);
 	assert("nikita-1608", handle->node != NULL);
-	assert("nikita-1633", rw_zlock_is_locked(&handle->node->lock));
+	assert_rw_locked(&(handle->node->lock.guard));
 	assert("nikita-1829", handle->owner == get_current_lock_stack());
 	assert("reiser4-5", handle->owner->nr_locks > 0);
 
@@ -305,7 +305,7 @@ static void lock_object(lock_stack * own
 
 	request = &owner->request;
 	node = request->node;
-	assert("nikita-1834", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 	if (request->mode == ZNODE_READ_LOCK) {
 		node->lock.nr_readers++;
 	} else {
@@ -335,7 +335,7 @@ static int recursive(lock_stack * owner)
 	/* Owners list is not empty for a locked node */
 	assert("zam-314", !list_empty_careful(&node->lock.owners));
 	assert("nikita-1841", owner == get_current_lock_stack());
-	assert("nikita-1848", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 
 
 	lh = list_entry(node->lock.owners.next, lock_handle, owners_link);
@@ -418,7 +418,7 @@ int znode_is_write_locked(const znode * 
 */
 static inline int check_deadlock_condition(znode * node)
 {
-	assert("nikita-1833", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 	return node->lock.nr_hipri_requests > 0
 	    && node->lock.nr_hipri_owners == 0;
 }
@@ -436,7 +436,7 @@ static int can_lock_object(lock_stack * 
 {
 	znode *node = owner->request.node;
 
-	assert("nikita-1843", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 
 	/* See if the node is disconnected. */
 	if (unlikely(ZF_ISSET(node, JNODE_IS_DYING)))
@@ -473,7 +473,7 @@ static void set_high_priority(lock_stack
 		while (&owner->locks != &item->locks_link) {
 			znode *node = item->node;
 
-			WLOCK_ZLOCK(&node->lock);
+			write_lock_zlock(&node->lock);
 
 			node->lock.nr_hipri_owners++;
 
@@ -481,7 +481,7 @@ static void set_high_priority(lock_stack
 			   previous statement (nr_hipri_owners ++) guarantees
 			   that signaled will be never set again. */
 			item->signaled = 0;
-			WUNLOCK_ZLOCK(&node->lock);
+			write_unlock_zlock(&node->lock);
 
 			item = list_entry(item->locks_link.next, lock_handle, locks_link);
 		}
@@ -503,7 +503,7 @@ static void set_low_priority(lock_stack 
 		lock_handle *handle = list_entry(owner->locks.next, lock_handle, locks_link);
 		while (&owner->locks != &handle->locks_link) {
 			znode *node = handle->node;
-			WLOCK_ZLOCK(&node->lock);
+			write_lock_zlock(&node->lock);
 			/* this thread just was hipri owner of @node, so
 			   nr_hipri_owners has to be greater than zero. */
 			assert("nikita-1835", node->lock.nr_hipri_owners > 0);
@@ -519,7 +519,7 @@ static void set_low_priority(lock_stack 
 				handle->signaled = 1;
 				atomic_inc(&owner->nr_signaled);
 			}
-			WUNLOCK_ZLOCK(&node->lock);
+			write_unlock_zlock(&node->lock);
 			handle = list_entry(handle->locks_link.next, lock_handle, locks_link);
 		}
 		owner->curpri = 0;
@@ -543,7 +543,7 @@ static void dispatch_lock_requests(znode
 {
 	lock_stack *requestor, *tmp;
 
-	assert("zam-1056", rw_zlock_is_locked(&node->lock));
+	assert_rw_write_locked(&(node->lock.guard));
 
 	list_for_each_entry_safe(requestor, tmp, &node->lock.requestors, requestors_link) {
 		int can_lock;
@@ -604,7 +604,7 @@ void longterm_unlock_znode(lock_handle *
 	/* true if node is to die and write lock is released */
 	youdie = ZF_ISSET(node, JNODE_HEARD_BANSHEE) && (readers < 0);
 
-	WLOCK_ZLOCK(&node->lock);
+	write_lock_zlock(&node->lock);
 
 	assert("zam-101", znode_is_locked(node));
 
@@ -647,7 +647,7 @@ void longterm_unlock_znode(lock_handle *
 		dispatch_lock_requests(node);
 	if (check_deadlock_condition(node))
 		wake_up_all_lopri_owners(node);
-	WUNLOCK_ZLOCK(&node->lock);
+	write_unlock_zlock(&node->lock);
 
 	assert("nikita-3182", rw_zlock_is_not_locked(&node->lock));
 	/* minus one reference from handle->node */
@@ -664,7 +664,7 @@ lock_tail(lock_stack * owner, int ok, zn
 {
 	znode *node = owner->request.node;
 
-	assert("jmacd-807", rw_zlock_is_locked(&node->lock));
+	assert_rw_write_locked(&(node->lock.guard));
 
 	/* If we broke with (ok == 0) it means we can_lock, now do it. */
 	if (ok == 0) {
@@ -685,7 +685,7 @@ lock_tail(lock_stack * owner, int ok, zn
 	} else if (ok == -EINVAL)
 		/* wake the invalidate_lock() thread up. */
 		dispatch_lock_requests(node);
-	WUNLOCK_ZLOCK(&node->lock);
+	write_unlock_zlock(&node->lock);
 	ON_DEBUG(check_lock_data());
 	ON_DEBUG(check_lock_node_data(node));
 	return ok;
@@ -709,23 +709,24 @@ static int longterm_lock_tryfast(lock_st
 	assert("nikita-3341", request_is_deadlock_safe(node,
 						       ZNODE_READ_LOCK,
 						       ZNODE_LOCK_LOPRI));
-
-	result = UNDER_RW(zlock, lock, read, can_lock_object(owner));
-
+	read_lock_zlock(lock);
+	result = can_lock_object(owner);
+	read_unlock_zlock(lock);
+	
 	if (likely(result != -EINVAL)) {
 		spin_lock_znode(node);
 		result =
 		    try_capture(ZJNODE(node), ZNODE_READ_LOCK, 0,
 				1 /* can copy on capture */ );
 		spin_unlock_znode(node);
-		WLOCK_ZLOCK(lock);
+		write_lock_zlock(lock);
 		if (unlikely(result != 0)) {
 			owner->request.mode = 0;
 		} else {
 			result = can_lock_object(owner);
 			if (unlikely(result == -E_REPEAT)) {
 				/* fall back to longterm_lock_znode() */
-				WUNLOCK_ZLOCK(lock);
+				write_unlock_zlock(lock);
 				return 1;
 			}
 		}
@@ -806,7 +807,7 @@ int longterm_lock_znode(
 	has_atom = (txnh->atom != NULL);
 
 	/* Synchronize on node's zlock guard lock. */
-	WLOCK_ZLOCK(lock);
+	write_lock_zlock(lock);
 
 	if (znode_is_locked(node) &&
 	    mode == ZNODE_WRITE_LOCK && recursive(owner))
@@ -898,13 +899,13 @@ int longterm_lock_znode(
 			 * JNODE_IS_DYING and this will be noted by
 			 * can_lock_object() below.
 			 */
-			WUNLOCK_ZLOCK(lock);
+			write_unlock_zlock(lock);
 			spin_lock_znode(node);
 			ret =
 			    try_capture(ZJNODE(node), mode, cap_flags,
 					1 /* can copy on capture */ );
 			spin_unlock_znode(node);
-			WLOCK_ZLOCK(lock);
+			write_lock_zlock(lock);
 			if (unlikely(ret != 0)) {
 				/* In the failure case, the txnmgr releases
 				   the znode's lock (or in some cases, it was
@@ -937,7 +938,7 @@ int longterm_lock_znode(
 			break;
 		}
 
-		assert("nikita-1837", rw_zlock_is_locked(&node->lock));
+		assert_rw_locked(&(node->lock.guard));
 		if (hipri) {
 			/* If we are going in high priority direction then
 			   increase high priority requests counter for the
@@ -955,7 +956,7 @@ int longterm_lock_znode(
 
 		/* Ok, here we have prepared a lock request, so unlock
 		   a znode ... */
-		WUNLOCK_ZLOCK(lock);
+		write_unlock_zlock(lock);
 		/* ... and sleep */
 		go_to_sleep(owner);
 		/* Fast check whether the lock was passed
@@ -970,10 +971,10 @@ int longterm_lock_znode(
 			zref(node);
 			return 0;
 		}
-		WLOCK_ZLOCK(lock);
+		write_lock_zlock(lock);
 		/* non-racy check the same after getting the spin-lock. */
 		if (unlikely(owner->request.mode == ZNODE_NO_LOCK)) {
-			WUNLOCK_ZLOCK(lock);
+			write_unlock_zlock(lock);
 			goto lock_is_done;
 		}
 		remove_lock_request(owner);
@@ -1000,7 +1001,7 @@ void invalidate_lock(lock_handle * handl
 	assert("nikita-1793", !ZF_ISSET(node, JNODE_RIGHT_CONNECTED));
 	assert("nikita-1394", ZF_ISSET(node, JNODE_HEARD_BANSHEE));
 	assert("nikita-3097", znode_is_wlocked_once(node));
-	assert("nikita-3338", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 
 	if (handle->signaled)
 		atomic_dec(&owner->nr_signaled);
@@ -1021,14 +1022,14 @@ void invalidate_lock(lock_handle * handl
 
 		prepare_to_sleep(owner);
 
-		WUNLOCK_ZLOCK(&node->lock);
+		write_unlock_zlock(&node->lock);
 		go_to_sleep(owner);
-		WLOCK_ZLOCK(&node->lock);
+		write_lock_zlock(&node->lock);
 
 		list_del_init(&owner->requestors_link);
 	}
 
-	WUNLOCK_ZLOCK(&node->lock);
+	write_unlock_zlock(&node->lock);
 }
 
 /* Initializes lock_stack. */
@@ -1038,7 +1039,7 @@ void init_lock_stack(lock_stack * owner	
 {
 	INIT_LIST_HEAD(&owner->locks);
 	INIT_LIST_HEAD(&owner->requestors_link);
-	spin_stack_init(owner);
+	spin_lock_init(&owner->sguard);
 	owner->curpri = 1;
 	sema_init(&owner->sema, 0);
 }
@@ -1049,7 +1050,7 @@ void reiser4_init_lock(zlock * lock	/* p
 					 * structure. */ )
 {
 	memset(lock, 0, sizeof(zlock));
-	rw_zlock_init(lock);
+	rwlock_init(&lock->guard);
 	INIT_LIST_HEAD(&lock->requestors);
 	INIT_LIST_HEAD(&lock->owners);
 }
@@ -1086,7 +1087,7 @@ move_lh_internal(lock_handle * new, lock
 	assert("nikita-1827", owner == get_current_lock_stack());
 	assert("nikita-1831", new->owner == NULL);
 
-	WLOCK_ZLOCK(&node->lock);
+	write_lock_zlock(&node->lock);
 
 	signaled = old->signaled;
 	if (unlink_old) {
@@ -1110,7 +1111,7 @@ move_lh_internal(lock_handle * new, lock
 	link_object(new, owner, node);
 	new->signaled = signaled;
 
-	WUNLOCK_ZLOCK(&node->lock);
+	write_unlock_zlock(&node->lock);
 }
 
 void move_lh(lock_handle * new, lock_handle * old)
@@ -1255,10 +1256,10 @@ void check_lock_data(void)
 /* check consistency of locking data structures for @node */
 void check_lock_node_data(znode * node)
 {
-	RLOCK_ZLOCK(&node->lock);
+	read_lock_zlock(&node->lock);
 	list_check(&node->lock.owners);
 	list_check(&node->lock.requestors);
-	RUNLOCK_ZLOCK(&node->lock);
+	read_unlock_zlock(&node->lock);
 }
 
 /* check that given lock request is dead lock safe. This check is, of course,
diff -puN fs/reiser4/lock.h~reiser4-spinlock-cleanup fs/reiser4/lock.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/lock.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.552977750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/lock.h	2005-10-20 14:01:52.788992500 +0400
@@ -8,7 +8,6 @@
 #include "forward.h"
 #include "debug.h"
 #include "dformat.h"
-#include "spin_macros.h"
 #include "key.h"
 #include "coord.h"
 #include "plugin/node/node.h"
@@ -23,7 +22,7 @@
 
 /* Per-znode lock object */
 struct zlock {
-	reiser4_rw_data guard;
+	rwlock_t guard;
 	/* The number of readers if positive; the number of recursively taken
 	   write locks if negative. Protected by zlock spin lock. */
 	int nr_readers;
@@ -40,20 +39,73 @@ struct zlock {
 	struct list_head requestors;
 };
 
-#define rw_ordering_pred_zlock(lock)			\
-	  (lock_counters()->spin_locked_stack == 0)
+static inline void read_lock_zlock(zlock *lock)
+{
+	/* check that zlock is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(read_locked_zlock) &&
+		    LOCK_CNT_NIL(write_locked_zlock)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", LOCK_CNT_NIL(spin_locked_stack));
+
+	read_lock(&(lock->guard));
+
+	LOCK_CNT_INC(read_locked_zlock);
+	LOCK_CNT_INC(rw_locked_zlock);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void read_unlock_zlock(zlock *lock)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(read_locked_zlock));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_zlock));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(read_locked_zlock);
+	LOCK_CNT_DEC(rw_locked_zlock);
+	LOCK_CNT_DEC(spin_locked);
+
+	read_unlock(&(lock->guard));
+}
+
+static inline void write_lock_zlock(zlock *lock)
+{
+	/* check that zlock is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(read_locked_zlock) &&
+		    LOCK_CNT_NIL(write_locked_zlock)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", LOCK_CNT_NIL(spin_locked_stack));
+
+	write_lock(&(lock->guard));
+
+	LOCK_CNT_INC(write_locked_zlock);
+	LOCK_CNT_INC(rw_locked_zlock);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void write_unlock_zlock(zlock *lock)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(write_locked_zlock));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_zlock));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(write_locked_zlock);
+	LOCK_CNT_DEC(rw_locked_zlock);
+	LOCK_CNT_DEC(spin_locked);
+
+	write_unlock(&(lock->guard));
+}
 
-/* Define spin_lock_zlock, spin_unlock_zlock, etc. */
-RW_LOCK_FUNCTIONS(zlock, zlock, guard);
 
 #define lock_is_locked(lock)          ((lock)->nr_readers != 0)
 #define lock_is_rlocked(lock)         ((lock)->nr_readers > 0)
 #define lock_is_wlocked(lock)         ((lock)->nr_readers < 0)
 #define lock_is_wlocked_once(lock)    ((lock)->nr_readers == -1)
 #define lock_can_be_rlocked(lock)     ((lock)->nr_readers >=0)
-#define lock_mode_compatible(lock, mode) \
-             (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) \
-           || ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock)))
+#define lock_mode_compatible(lock, mode)				\
+             (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) ||	\
+              ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock)))
 
 /* Since we have R/W znode locks we need additional bidirectional `link'
    objects to implement n<->m relationship between lock owners and lock
@@ -90,7 +142,7 @@ typedef struct lock_request {
 /* A lock stack structure for accumulating locks owned by a process */
 struct lock_stack {
 	/* A guard lock protecting a lock stack */
-	reiser4_spin_data sguard;
+	spinlock_t sguard;
 	/* number of znodes which were requested by high priority processes */
 	atomic_t nr_signaled;
 	/* Current priority of a process
@@ -177,26 +229,32 @@ extern int lock_stack_isclean(lock_stack
 extern int znode_is_write_locked(const znode *);
 extern void invalidate_lock(lock_handle *);
 
-#if REISER4_DEBUG
-#define spin_ordering_pred_stack_addendum (1)
-#else
-#define spin_ordering_pred_stack_addendum		\
-	 ((lock_counters()->rw_locked_dk == 0) &&	\
-	  (lock_counters()->rw_locked_tree == 0))
-#endif
 /* lock ordering is: first take zlock spin lock, then lock stack spin lock */
-#define spin_ordering_pred_stack(stack)				\
-	((lock_counters()->spin_locked_stack == 0) &&		\
-	 (lock_counters()->spin_locked_txnmgr == 0) &&		\
-	 (lock_counters()->spin_locked_super == 0) &&		\
-	 (lock_counters()->spin_locked_inode_object == 0) &&	\
-	 (lock_counters()->rw_locked_cbk_cache == 0) &&	\
-	 (lock_counters()->spin_locked_epoch == 0) &&		\
-	 (lock_counters()->spin_locked_super_eflush == 0) &&	\
-	 spin_ordering_pred_stack_addendum)
+#define spin_ordering_pred_stack(stack)			\
+	(LOCK_CNT_NIL(spin_locked_stack) &&		\
+	 LOCK_CNT_NIL(spin_locked_txnmgr) &&		\
+	 LOCK_CNT_NIL(spin_locked_inode) &&		\
+	 LOCK_CNT_NIL(rw_locked_cbk_cache) &&		\
+	 LOCK_CNT_NIL(spin_locked_super_eflush) )
+
+static inline void spin_lock_stack(lock_stack *stack)
+{
+	assert("", spin_ordering_pred_stack(stack));
+	spin_lock(&(stack->sguard));
+	LOCK_CNT_INC(spin_locked_stack);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void spin_unlock_stack(lock_stack *stack)
+{
+	assert_spin_locked(&(stack->sguard));
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_stack));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+	LOCK_CNT_DEC(spin_locked_stack);
+	LOCK_CNT_DEC(spin_locked);
+	spin_unlock(&(stack->sguard));
+}
 
-/* Same for lock_stack */
-SPIN_LOCK_FUNCTIONS(stack, lock_stack, sguard);
 
 static inline void reiser4_wake_up(lock_stack * owner)
 {
diff -puN fs/reiser4/oid.c~reiser4-spinlock-cleanup fs/reiser4/oid.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/oid.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.556978000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/oid.c	2005-10-20 14:01:52.788992500 +0400
@@ -36,13 +36,13 @@ oid_t oid_allocate(struct super_block * 
 
 	sbinfo = get_super_private(super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	if (sbinfo->next_to_use != ABSOLUTE_MAX_OID) {
 		oid = sbinfo->next_to_use++;
 		sbinfo->oids_in_use++;
 	} else
 		oid = ABSOLUTE_MAX_OID;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	return oid;
 }
 
@@ -55,9 +55,9 @@ int oid_release(struct super_block *supe
 
 	sbinfo = get_super_private(super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	sbinfo->oids_in_use--;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	return 0;
 }
 
@@ -73,9 +73,9 @@ oid_t oid_next(const struct super_block 
 
 	sbinfo = get_super_private(super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	oid = sbinfo->next_to_use;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	return oid;
 }
 
@@ -91,9 +91,9 @@ long oids_used(const struct super_block 
 
 	sbinfo = get_super_private(super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	used = sbinfo->oids_in_use;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	if (used < (__u64) ((long)~0) >> 1)
 		return (long)used;
 	else
@@ -111,9 +111,9 @@ long oids_free(const struct super_block 
 
 	sbinfo = get_super_private(super);
 
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	oids = ABSOLUTE_MAX_OID - OIDS_RESERVED - sbinfo->next_to_use;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 	if (oids < (__u64) ((long)~0) >> 1)
 		return (long)oids;
 	else
@@ -132,7 +132,7 @@ void oid_count_allocated(void)
 
 	atom = get_current_atom_locked();
 	atom->nr_objects_created++;
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /*
@@ -146,7 +146,7 @@ void oid_count_released(void)
 
 	atom = get_current_atom_locked();
 	atom->nr_objects_deleted++;
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /*
diff -puN fs/reiser4/page_cache.c~reiser4-spinlock-cleanup fs/reiser4/page_cache.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/page_cache.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.560978250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/page_cache.c	2005-10-20 14:01:52.792992750 +0400
@@ -432,7 +432,9 @@ static struct bio *page_bio(struct page 
 		blksz = super->s_blocksize;
 		assert("nikita-2028", blksz == (int)PAGE_CACHE_SIZE);
 
-		blocknr = *UNDER_SPIN(jnode, node, jnode_get_io_block(node));
+		spin_lock_jnode(node);
+		blocknr = *jnode_get_io_block(node);
+		spin_unlock_jnode(node);
 
 		assert("nikita-2275", blocknr != (reiser4_block_nr) 0);
 		assert("nikita-2276", !blocknr_is_fake(&blocknr));
@@ -527,22 +529,22 @@ int reiser4_writepage(struct page *page 
 
 		assert("nikita-2419", node != NULL);
 
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		/*
 		 * page was dirty, but jnode is not. This is (only?)
 		 * possible if page was modified through mmap(). We
 		 * want to handle such jnodes specially.
 		 */
-		phantom = !jnode_is_dirty(node);
+		phantom = !JF_ISSET(node, JNODE_DIRTY);
 		atom = jnode_get_atom(node);
 		if (atom != NULL) {
 			if (!(atom->flags & ATOM_FORCE_COMMIT)) {
 				atom->flags |= ATOM_FORCE_COMMIT;
 				ktxnmgrd_kick(&get_super_private(s)->tmgr);
 			}
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		result = emergency_flush(page);
 		if (result == 0)
@@ -646,13 +648,13 @@ static void invalidate_unformatted(jnode
 {
 	struct page *page;
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	page = node->pg;
 	if (page) {
 		loff_t from, to;
 
 		page_cache_get(page);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		/* FIXME: use truncate_complete_page instead */
 		from = (loff_t) page->index << PAGE_CACHE_SHIFT;
 		to = from + PAGE_CACHE_SIZE - 1;
@@ -693,7 +695,7 @@ truncate_jnodes_range(struct inode *inod
 
 		assert("nikita-3466", index <= end);
 
-		RLOCK_TREE(tree);
+		read_lock_tree(tree);
 		taken =
 		    radix_tree_gang_lookup(jnode_tree_by_reiser4_inode(info),
 					   (void **)gang, index,
@@ -705,7 +707,7 @@ truncate_jnodes_range(struct inode *inod
 			else
 				gang[i] = NULL;
 		}
-		RUNLOCK_TREE(tree);
+		read_unlock_tree(tree);
 
 		for (i = 0; i < taken; ++i) {
 			node = gang[i];
@@ -740,39 +742,6 @@ reiser4_invalidate_pages(struct address_
 	truncate_jnodes_range(mapping->host, from, count);
 }
 
-#if REISER4_DEBUG
-
-#define page_flag_name( page, flag )			\
-	( test_bit( ( flag ), &( page ) -> flags ) ? ((#flag "|")+3) : "" )
-
-void print_page(const char *prefix, struct page *page)
-{
-	if (page == NULL) {
-		printk("null page\n");
-		return;
-	}
-	printk("%s: page index: %lu mapping: %p count: %i private: %lx\n",
-	       prefix, page->index, page->mapping, page_count(page),
-	       page->private);
-	printk("\tflags: %s%s%s%s %s%s%s %s%s%s %s%s\n",
-	       page_flag_name(page, PG_locked), page_flag_name(page, PG_error),
-	       page_flag_name(page, PG_referenced), page_flag_name(page,
-								   PG_uptodate),
-	       page_flag_name(page, PG_dirty), page_flag_name(page, PG_lru),
-	       page_flag_name(page, PG_slab), page_flag_name(page, PG_checked),
-	       page_flag_name(page, PG_reserved), page_flag_name(page,
-								 PG_private),
-	       page_flag_name(page, PG_writeback), page_flag_name(page,
-								  PG_nosave));
-	if (jprivate(page) != NULL) {
-		print_jnode("\tpage jnode", jprivate(page));
-		printk("\n");
-	}
-}
-
-#endif
-
-
 /*
  * Local variables:
  * c-indentation-style: "K&R"
diff -puN fs/reiser4/plugin/file/cryptcompress.c~reiser4-spinlock-cleanup fs/reiser4/plugin/file/cryptcompress.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/file/cryptcompress.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.588980000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/file/cryptcompress.c	2005-10-20 14:01:52.792992750 +0400
@@ -572,14 +572,6 @@ free_reserved4cluster(struct inode *inod
 	clust->reserved = 0;
 }
 
-#if REISER4_DEBUG
-static int eq_to_ldk(znode * node, const reiser4_key * key)
-{
-	return UNDER_RW(dk, current_tree, read,
-			keyeq(key, znode_get_ld_key(node)));
-}
-#endif
-
 /* The core search procedure.
    If returned value is not cbk_errored, current znode is locked */
 static int find_cluster_item(hint_t * hint, const reiser4_key * key,	/* key of the item we are
@@ -617,7 +609,7 @@ static int find_cluster_item(hint_t * hi
 		}
 		if (result)
 			return result;
-		assert("edward-1218", eq_to_ldk(coord->node, key));
+		assert("edward-1218", equal_to_ldk(coord->node, key));
 	} else {
 		coord->item_pos++;
 		coord->unit_pos = 0;
@@ -1145,13 +1137,13 @@ make_cluster_jnode_dirty_locked(reiser4_
 
 	assert("edward-221", node != NULL);
 	assert("edward-971", clust->reserved == 1);
-	assert("edward-1028", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	assert("edward-972", node->page_count < cluster_nrpages(inode));
 	assert("edward-1263",
 	       clust->reserved_prepped == estimate_insert_cluster(inode, 0));
 	assert("edward-1264", clust->reserved_unprepped == 0);
 
-	if (jnode_is_dirty(node)) {
+	if (JF_ISSET(node, JNODE_DIRTY)) {
 		/* there are >= 1 pages already referenced by this jnode */
 		assert("edward-973",
 		       count_to_nrpages(off_to_count
@@ -1218,7 +1210,7 @@ static int try_capture_cluster(reiser4_c
 
 	assert("edward-1035", node != NULL);
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	if (clust->win)
 		inode_set_new_size(clust, inode);
 
@@ -1228,7 +1220,7 @@ static int try_capture_cluster(reiser4_c
 	make_cluster_jnode_dirty_locked(clust, node, &old_size, inode);
       exit:
 	assert("edward-1034", !result);
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 	jput(node);
 	return result;
 }
@@ -1279,9 +1271,9 @@ grab_cluster_pages_jnode(struct inode *i
 		return result;
 	}
 	assert("edward-920", jprivate(clust->pages[0]));
-	LOCK_JNODE(node);
+
+	/* NOTE-Edward: spin_lock_jnode is removed here: set_bit is atomic */
 	JF_SET(node, JNODE_CLUSTER_PAGE);
-	UNLOCK_JNODE(node);
 	return 0;
 }
 
@@ -1459,22 +1451,24 @@ static int update_sd_cryptcompress(struc
 	return result;
 }
 
+
+/* NOTE-Edward: this is too similar to reiser4/txnmgr.c:uncapture_jnode() */
 static void uncapture_cluster_jnode(jnode * node)
 {
 	txn_atom *atom;
 
-	assert("edward-1023", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	/*jnode_make_clean(node); */
 	atom = jnode_get_atom(node);
 	if (atom == NULL) {
-		assert("jmacd-7111", !jnode_is_dirty(node));
-		UNLOCK_JNODE(node);
+		assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY));
+		spin_unlock_jnode(node);
 		return;
 	}
 
 	uncapture_block(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 	jput(node);
 }
 
@@ -1508,17 +1502,17 @@ flush_cluster_pages(reiser4_cluster_t * 
 	assert("edward-241", schedulable());
 	assert("edward-718", crc_inode_ok(inode));
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 
-	if (!jnode_is_dirty(node)) {
+	if (!JF_ISSET(node, JNODE_DIRTY)) {
 
 		assert("edward-981", node->page_count == 0);
+
+		/* race with another flush */
+		spin_unlock_jnode(node);
 		warning("edward-982", "flush_cluster_pages: jnode is not dirty "
 			"clust %lu, inode %llu\n",
 			clust->index, (unsigned long long)get_inode_oid(inode));
-
-		/* race with another flush */
-		UNLOCK_JNODE(node);
 		return RETERR(-E_REPEAT);
 	}
 	tc->len = fsize_to_count(clust, inode);
@@ -2106,13 +2100,13 @@ static int jnodes_truncate_ok(struct ino
 	reiser4_inode *info = reiser4_inode_data(inode);
 	reiser4_tree *tree = tree_by_inode(inode);
 
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 
 	result =
 	    radix_tree_gang_lookup(jnode_tree_by_reiser4_inode(info),
 				   (void **)&node, clust_to_pg(start, inode),
 				   1);
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 	if (result)
 		warning("edward-1332", "Untruncated jnode %p\n", node);
 	return !result;
@@ -2166,8 +2160,8 @@ void truncate_page_cluster(struct inode 
 	found = find_get_pages(inode->i_mapping, clust_to_pg(index, inode),
 			       nr_pages, pages);
 
-	LOCK_JNODE(node);
-	if (jnode_is_dirty(node)) {
+	spin_lock_jnode(node);
+	if (JF_ISSET(node, JNODE_DIRTY)) {
 		/* jnode is dirty => space for disk cluster
 		   conversion grabbed */
 		cluster_reserved2grabbed(estimate_insert_cluster(inode, 0));
@@ -2187,7 +2181,7 @@ void truncate_page_cluster(struct inode 
 			page_cache_release(pages[i]);
 		}
 	} else
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 	/* now drop pages and jnode */
 	/* FIXME-EDWARD: Use truncate_complete_page in the loop above instead */
 
diff -puN fs/reiser4/plugin/file/file.c~reiser4-spinlock-cleanup fs/reiser4/plugin/file/file.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/file/file.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.592980250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/file/file.c	2005-10-20 14:01:52.796993000 +0400
@@ -62,30 +62,46 @@ static void set_file_state_unknown(struc
 	unix_file_inode_data(inode)->container = UF_CONTAINER_UNKNOWN;
 }
 
-static int less_than_ldk(znode * node, const reiser4_key * key)
+static int less_than_ldk(znode *node, const reiser4_key *key)
 {
-	return UNDER_RW(dk, current_tree, read,
-			keylt(key, znode_get_ld_key(node)));
+	int result;
+
+	read_lock_dk(znode_get_tree(node));
+	result = keylt(key, znode_get_ld_key(node));
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
-int equal_to_rdk(znode * node, const reiser4_key * key)
+int equal_to_rdk(znode *node, const reiser4_key *key)
 {
-	return UNDER_RW(dk, current_tree, read,
-			keyeq(key, znode_get_rd_key(node)));
+	int result;
+
+	read_lock_dk(znode_get_tree(node));
+	result = keyeq(key, znode_get_rd_key(node));
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
 #if REISER4_DEBUG
 
 static int less_than_rdk(znode * node, const reiser4_key * key)
 {
-	return UNDER_RW(dk, current_tree, read,
-			keylt(key, znode_get_rd_key(node)));
+	int result;
+
+	read_lock_dk(znode_get_tree(node));
+	result = keylt(key, znode_get_rd_key(node));
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
-static int equal_to_ldk(znode * node, const reiser4_key * key)
+int equal_to_ldk(znode * node, const reiser4_key * key)
 {
-	return UNDER_RW(dk, current_tree, read,
-			keyeq(key, znode_get_ld_key(node)));
+	int result;
+
+	read_lock_dk(znode_get_tree(node));
+	result = keyeq(key, znode_get_ld_key(node));
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
 /* get key of item next to one @coord is set to */
@@ -94,8 +110,9 @@ static reiser4_key *get_next_item_key(co
 {
 	if (coord->item_pos == node_num_items(coord->node) - 1) {
 		/* get key of next item if it is in right neighbor */
-		UNDER_RW_VOID(dk, znode_get_tree(coord->node), read,
-			      *next_key = *znode_get_rd_key(coord->node));
+		read_lock_dk(znode_get_tree(coord->node));
+		*next_key = *znode_get_rd_key(coord->node);
+		read_unlock_dk(znode_get_tree(coord->node));
 	} else {
 		/* get key of next item if it is in the same node */
 		coord_t next;
@@ -210,10 +227,7 @@ write_mode_t how_to_write(uf_coord_t * u
 		 * space, for example) and leaves empty leaf
 		 * lingering. Nothing prevents us from reusing it.
 		 */
-		assert("vs-1000", UNDER_RW(dk, current_tree, read,
-					   keylt(key,
-						 znode_get_rd_key(coord->
-								  node))));
+		assert("vs-1000", less_than_rdk(coord->node, key));
 		assert("vs-1002", coord->between == EMPTY_NODE);
 		result = FIRST_ITEM;
 		uf_coord->valid = 1;
@@ -1323,14 +1337,20 @@ static int sync_page(struct page *page)
 
 		lock_page(page);
 		node = jprivate(page);
-		if (node != NULL)
-			atom = UNDER_SPIN(jnode, node, jnode_get_atom(node));
-		else
+		if (node != NULL) {
+			spin_lock_jnode(node);
+			atom = jnode_get_atom(node);
+			spin_unlock_jnode(node);
+		} else
 			atom = NULL;
 		unlock_page(page);
 		result = sync_atom(atom);
 	} while (result == -E_REPEAT);
-/* 	ZAM-FIXME-HANS: document the logic of this loop, is it just to handle the case where more pages get added to the atom while we are syncing it? */
+	/*
+	 * ZAM-FIXME-HANS: document the logic of this loop, is it just to
+	 * handle the case where more pages get added to the atom while we are
+	 * syncing it?
+	 */
 	assert("nikita-3485", ergo(result == 0,
 				   get_current_context()->trans->atom == NULL));
 	return result;
@@ -1626,9 +1646,9 @@ int sync_unix_file(struct file *file, st
 				node = jref(ZJNODE(coord.node));
 				done_lh(&lh);
 				txn_restart_current();
-				LOCK_JNODE(node);
+				spin_lock_jnode(node);
 				atom = jnode_get_atom(node);
-				UNLOCK_JNODE(node);
+				spin_unlock_jnode(node);
 				result = sync_atom(atom);
 				jput(node);
 			} else
@@ -2614,9 +2634,15 @@ ssize_t write_unix_file(struct file *fil
 	return count ? count : result;
 }
 
-/* this is implementation of vfs's release method of struct
-   file_operations for unix file plugin
-   convert all extent items into tail items if necessary */
+/**
+ * release_unix_file - release of struct file_operations
+ * @inode: inode of released file
+ * @file: file to release
+ *
+ * Implementation of release method of struct file_operations for unix file
+ * plugin. If last reference to indode is released - convert all extent items
+ * into tail items if necessary. Frees reiser4 specific file data.
+ */
 int release_unix_file(struct inode *inode, struct file *file)
 {
 	reiser4_context *ctx;
diff -puN fs/reiser4/plugin/file/funcs.h~reiser4-spinlock-cleanup fs/reiser4/plugin/file/funcs.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/file/funcs.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.596980500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/file/funcs.h	2005-10-20 14:01:52.796993000 +0400
@@ -1,6 +1,6 @@
 /* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by reiser4/README */
 
-/* this prototyles functions used by both file.c and tail_conversion.c */
+/* this prototypes functions used by both file.c and tail_conversion.c */
 void get_exclusive_access(unix_file_info_t *);
 void drop_exclusive_access(unix_file_info_t *);
 void get_nonexclusive_access(unix_file_info_t *, int);
@@ -17,6 +17,9 @@ int find_file_item_nohint(coord_t *, loc
 int goto_right_neighbor(coord_t *, lock_handle *);
 int find_or_create_extent(struct page *);
 write_mode_t how_to_write(uf_coord_t *, const reiser4_key *);
+#if REISER4_DEBUG
+int equal_to_ldk(znode *, const reiser4_key *);
+#endif
 
 extern inline int cbk_errored(int cbk_result)
 {
diff -puN fs/reiser4/plugin/file_ops.c~reiser4-spinlock-cleanup fs/reiser4/plugin/file_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/file_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.600980750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/file_ops.c	2005-10-20 14:01:52.800993250 +0400
@@ -20,12 +20,23 @@ loff_t llseek_common_dir(struct file *, 
 */
 int readdir_common(struct file *, void *dirent, filldir_t);
 
-/* this is implementation of vfs's release method of struct file_operations for
-   typical directory
+/**
+ * release_dir_common - release of struct file_operations
+ * @inode: inode of released file
+ * @file: file to release
+ *
+ * Implementation of release method of struct file_operations for typical
+ * directory. All it does is freeing of reiser4 specific file data.
 */
 int release_dir_common(struct inode *inode, struct file *file)
 {
+	reiser4_context *ctx;
+
+	ctx = init_context(inode->i_sb);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
 	reiser4_free_file_fsdata(file);
+	reiser4_exit_context(ctx);
 	return 0;
 }
 
diff -puN fs/reiser4/plugin/item/ctail.c~reiser4-spinlock-cleanup fs/reiser4/plugin/item/ctail.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/item/ctail.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.608981250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/item/ctail.c	2005-10-20 14:01:52.800993250 +0400
@@ -402,12 +402,8 @@ static int ctail_convertible(const coord
 				       NULL) << cluster_shift_by_coord(coord));
 	if (!child)
 		return 0;
-	LOCK_JNODE(child);
-	if (jnode_is_dirty(child))
-		result = 1;
-	else
-		result = 0;
-	UNLOCK_JNODE(child);
+	/* NOTE-Edward: jnode spin lock is removed here: test_bit is atomic */
+	result = JF_ISSET(child, JNODE_DIRTY);
 	jput(child);
 	return result;
 }
@@ -1110,22 +1106,20 @@ int scan_ctail(flush_scan * scan)
 
 	if (!scanning_left(scan))
 		return result;
-	if (!znode_is_dirty(scan->parent_lock.node))
+	if (!ZF_ISSET(scan->parent_lock.node, JNODE_DIRTY))
 		znode_make_dirty(scan->parent_lock.node);
 
 	if (!znode_convertible(scan->parent_lock.node)) {
-		LOCK_JNODE(scan->node);
-		if (jnode_is_dirty(scan->node)) {
+		/* NOTE-Edward: jnode spinlock is removed. test_bit is atomic */
+		if (JF_ISSET(scan->node, JNODE_DIRTY)) {
 			warning("edward-873",
 				"child is dirty but parent not squeezable");
 			znode_set_convertible(scan->parent_lock.node);
 		} else {
 			warning("edward-681",
 				"cluster page is already processed");
-			UNLOCK_JNODE(scan->node);
 			return -EAGAIN;
 		}
-		UNLOCK_JNODE(scan->node);
 	}
 	return result;
 }
@@ -1146,10 +1140,10 @@ static int should_attach_convert_idata(f
 
 	if (!pos->child)
 		return 0;
-	LOCK_JNODE(pos->child);
-	result = jnode_is_dirty(pos->child) &&
-	    pos->child->atom == ZJNODE(pos->coord.node)->atom;
-	UNLOCK_JNODE(pos->child);
+	spin_lock_jnode(pos->child);
+	result = (JF_ISSET(pos->child, JNODE_DIRTY) &&
+		  pos->child->atom == ZJNODE(pos->coord.node)->atom);
+	spin_unlock_jnode(pos->child);
 	if (!result && pos->child) {
 		/* existing child isn't to attach, clear up this one */
 		jput(pos->child);
@@ -1436,7 +1430,7 @@ static int next_item_dc_stat(flush_pos_t
 
 			item_convert_data(pos)->d_next = DC_CHAINED_ITEM;
 
-			if (!znode_is_dirty(lh.node)) {
+			if (!ZF_ISSET(lh.node, JNODE_DIRTY)) {
 				/*
 				   warning("edward-1024",
 				   "next slum item mergeable, "
diff -puN fs/reiser4/plugin/item/extent_file_ops.c~reiser4-spinlock-cleanup fs/reiser4/plugin/item/extent_file_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/item/extent_file_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.612981500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/item/extent_file_ops.c	2005-10-20 14:01:52.804993500 +0400
@@ -856,11 +856,11 @@ static int extent_write_flow(struct inod
 						   process because page
 						   attached to jnode is
 						   locked */
-						LOCK_JNODE(j);
+						spin_lock_jnode(j);
 						assign_jnode_blocknr(j, h->blocknr,
 								     h->created);
 						blocknr_set = 1;
-						UNLOCK_JNODE(j);
+						spin_unlock_jnode(j);
 					}
 					result =
 					    page_io(page, j, READ, GFP_KERNEL);
@@ -875,12 +875,12 @@ static int extent_write_flow(struct inod
 				}
 
 				/* assign blocknr to jnode if it is not assigned yet */
-				LOCK_JNODE(j);
+				spin_lock_jnode(j);
 				eflush_del(j, 1);
 				if (blocknr_set == 0)
 					assign_jnode_blocknr(j, h->blocknr,
 							     h->created);
-				UNLOCK_JNODE(j);
+				spin_unlock_jnode(j);
 			} else {
 				/* new page added to the file. No need to carry
 				   about data it might contain. Zero content of
@@ -890,21 +890,22 @@ static int extent_write_flow(struct inod
 
 				/* assign blocknr to jnode if it is not
 				   assigned yet */
-				LOCK_JNODE(j);
+				spin_lock_jnode(j);
 				assign_jnode_blocknr(j, h->blocknr, h->created);
-				UNLOCK_JNODE(j);
+				spin_unlock_jnode(j);
 			}
 		} else {
-			LOCK_JNODE(j);
+			spin_lock_jnode(j);
 			eflush_del(j, 1);
 			assign_jnode_blocknr(j, h->blocknr, h->created);
-			UNLOCK_JNODE(j);
+			spin_unlock_jnode(j);
 		}
-
-		assert("vs-1503",
-		       UNDER_SPIN(jnode, j,
-				  (!JF_ISSET(j, JNODE_EFLUSH)
-				   && jnode_page(j) == page)));
+#if REISER4_DEBUG
+		spin_lock_jnode(j);
+		assert("vs-1503", (!JF_ISSET(j, JNODE_EFLUSH) &&
+				   jnode_page(j) == page));
+		spin_unlock_jnode(j);
+#endif
 		assert("nikita-3033", schedulable());
 
 		/* copy user data into page */
@@ -930,16 +931,16 @@ static int extent_write_flow(struct inod
 		   gets into clean list in try_capture and then in
 		   jnode_mark_dirty gets moved to dirty list. So, it would be
 		   more optimal to put jnode directly to dirty list */
-		LOCK_JNODE(j);
+		spin_lock_jnode(j);
 		result = try_capture(j, ZNODE_WRITE_LOCK, 0, 1 /* can_coc */ );
 		if (result) {
-			UNLOCK_JNODE(j);
+			spin_unlock_jnode(j);
 			page_cache_release(page);
 			goto exit2;
 		}
 		jnode_make_dirty_locked(j);
 		JF_CLR(j, JNODE_KEEPME);
-		UNLOCK_JNODE(j);
+		spin_unlock_jnode(j);
 
 		page_cache_release(page);
 		jput(j);
@@ -1104,7 +1105,7 @@ do_readpage_extent(reiser4_extent * ext,
 			zero_page(page);
 			return 0;
 		}
-		LOCK_JNODE(j);
+		spin_lock_jnode(j);
 		if (!jnode_page(j)) {
 			jnode_attach_page(j, page);
 		} else {
@@ -1112,7 +1113,7 @@ do_readpage_extent(reiser4_extent * ext,
 			assert("vs-1504", jnode_page(j) == page);
 		}
 
-		UNLOCK_JNODE(j);
+		spin_unlock_jnode(j);
 		break;
 
 	case ALLOCATED_EXTENT:
@@ -1134,7 +1135,9 @@ do_readpage_extent(reiser4_extent * ext,
 		assert("nikita-2688", j);
 		assert("vs-1426", jnode_page(j) == NULL);
 
-		UNDER_SPIN_VOID(jnode, j, jnode_attach_page(j, page));
+		spin_lock_jnode(j);
+		jnode_attach_page(j, page);
+		spin_unlock_jnode(j);
 
 		/* page is locked, it is safe to check JNODE_EFLUSH */
 		assert("vs-1668", JF_ISSET(j, JNODE_EFLUSH));
@@ -1610,12 +1613,11 @@ capture_extent(reiser4_key *key, uf_coor
 		done_lh(uf_coord->lh);
 		return PTR_ERR(j);
 	}
-	UNDER_SPIN_VOID(jnode, j, eflush_del(j, 1));
+	spin_lock_jnode(j);
+	eflush_del(j, 1);
 
 	unlock_page(page);
 
-	LOCK_JNODE(j);
-
 	BUG_ON(JF_ISSET(j, JNODE_EFLUSH));
 	if (h->created) {
 		/* extent corresponding to this jnode was just created */
@@ -1633,18 +1635,18 @@ capture_extent(reiser4_key *key, uf_coor
 		assert("vs-1507",
 		       ergo(h->blocknr, *jnode_get_block(j) == h->blocknr));
 	}
-	UNLOCK_JNODE(j);
+	spin_unlock_jnode(j);
 
 	done_lh(h->uf_coord->lh);
 
-	LOCK_JNODE(j);
+	spin_lock_jnode(j);
 	result = try_capture(j, ZNODE_WRITE_LOCK, 0, 1 /* can_coc */ );
 	if (result != 0)
 		reiser4_panic("nikita-3324", "Cannot capture jnode: %i",
 			      result);
 	jnode_make_dirty_locked(j);
 	JF_CLR(j, JNODE_KEEPME);
-	UNLOCK_JNODE(j);
+	spin_unlock_jnode(j);
 	jput(j);
 
 	if (h->created)
diff -puN fs/reiser4/plugin/item/extent_flush_ops.c~reiser4-spinlock-cleanup fs/reiser4/plugin/item/extent_flush_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/item/extent_flush_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.616981750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/item/extent_flush_ops.c	2005-10-20 14:01:52.804993500 +0400
@@ -138,7 +138,7 @@ int scan_extent(flush_scan * scan)
 	int ret = 0, allocated, incr;
 	reiser4_tree *tree;
 
-	if (!jnode_check_dirty(scan->node)) {
+	if (!JF_ISSET(scan->node, JNODE_DIRTY)) {
 		scan->stop = 1;
 		return 0;	/* Race with truncate, this node is already
 				 * truncated. */
@@ -439,9 +439,11 @@ unprotect_extent_nodes(flush_pos_t *flus
 	do {
 		count--;
 		junprotect(node);
-		ON_DEBUG(LOCK_JNODE(node);
-			 count_jnode(atom, node, PROTECT_LIST, DIRTY_LIST, 0);
-			 UNLOCK_JNODE(node););
+		ON_DEBUG(
+			spin_lock_jnode(node);
+			count_jnode(atom, node, PROTECT_LIST, DIRTY_LIST, 0);
+			spin_unlock_jnode(node);
+			);
 		if (count == 0) {
 			break;
 		}
@@ -454,7 +456,7 @@ unprotect_extent_nodes(flush_pos_t *flus
 	protected_list_split(protected_nodes, &unprotected_nodes, node);
 	list_splice_init(&unprotected_nodes, ATOM_DIRTY_LIST(atom, LEAF_LEVEL)->prev);
 
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 extern int getjevent(void);
@@ -464,8 +466,8 @@ static void protect_reloc_node(struct li
 {
 	assert("zam-836", !JF_ISSET(node, JNODE_EPROTECTED));
 	assert("vs-1216", jnode_is_unformatted(node));
-	assert("vs-1477", spin_atom_is_locked(node->atom));
-	assert("nikita-3390", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->atom->alock));
+	assert_spin_locked(&(node->guard));
 
 	JF_SET(node, JNODE_EPROTECTED);
 	list_del_init(&node->capture_link);
@@ -517,31 +519,31 @@ protect_extent_nodes(flush_pos_t *flush_
 			break;
 		}
 
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		assert("vs-1476", atomic_read(&node->x_count) > 1);
 		assert("nikita-3393", !JF_ISSET(node, JNODE_EPROTECTED));
 
 		if (JF_ISSET(node, JNODE_EFLUSH)) {
 			if (eflushed == JNODES_TO_UNFLUSH) {
-				UNLOCK_JNODE(node);
+				spin_unlock_jnode(node);
 				atomic_dec(&node->x_count);
 				break;
 			}
 			buf[eflushed] = node;
 			eflushed++;
 			protect_reloc_node(protected_nodes, node);
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 		} else {
 			assert("nikita-3384", node->atom == atom);
 			protect_reloc_node(protected_nodes, node);
 			assert("nikita-3383", !JF_ISSET(node, JNODE_EFLUSH));
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 			atomic_dec(&node->x_count);
 		}
 
 		(*protected)++;
 	}
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	/* start io for eflushed nodes */
 	for (j = 0; j < eflushed; ++j)
@@ -554,7 +556,6 @@ protect_extent_nodes(flush_pos_t *flush_
 			if (result != 0) {
 				warning("nikita-3179",
 					"unflush failed: %i", result);
-				print_jnode("node", buf[j]);
 			}
 		}
 		jput(buf[j]);
@@ -706,7 +707,7 @@ assign_real_blocknrs(flush_pos_t *flush_
 
 	i = 0;
 	list_for_each_entry(node, protected_nodes, capture_link) {
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		assert("vs-1132",
 		       ergo(state == UNALLOCATED_EXTENT,
 			    blocknr_is_fake(jnode_get_block(node))));
@@ -720,7 +721,7 @@ assign_real_blocknrs(flush_pos_t *flush_
 				     FQ_LIST, 0));
 		junprotect(node);
 		assert("", NODE_LIST(node) == FQ_LIST);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		first++;
 		i++;
 	}
@@ -730,7 +731,7 @@ assign_real_blocknrs(flush_pos_t *flush_
 	assert("vs-1687", count == i);
 	if (state == UNALLOCATED_EXTENT)
 		dec_unalloc_unfm_ptrs(count);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /**
@@ -744,7 +745,7 @@ assign_real_blocknrs(flush_pos_t *flush_
  */
 static void make_node_ovrwr(struct list_head *jnodes, jnode *node)
 {
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 
 	assert("zam-917", !JF_ISSET(node, JNODE_RELOC));
 	assert("zam-918", !JF_ISSET(node, JNODE_OVRWR));
@@ -754,7 +755,7 @@ static void make_node_ovrwr(struct list_
 	list_add_tail(&node->capture_link, jnodes);
 	ON_DEBUG(count_jnode(node->atom, node, DIRTY_LIST, OVRWR_LIST, 0));
 
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 }
 
 /**
@@ -799,7 +800,7 @@ static void mark_jnodes_overwrite(flush_
 	}
 
 	list_splice_init(&jnodes, ATOM_OVRWR_LIST(atom)->prev);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /* this is called by handle_pos_on_twig to proceed extent unit flush_pos->coord is set to. It is to prepare for flushing
diff -puN fs/reiser4/plugin/item/extent_item_ops.c~reiser4-spinlock-cleanup fs/reiser4/plugin/item/extent_item_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/item/extent_item_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.620982000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/item/extent_item_ops.c	2005-10-20 14:01:52.808993750 +0400
@@ -263,8 +263,8 @@ int create_hook_extent(const coord_t * c
 
 	assert("nikita-3246", znode_get_level(child_coord->node) == LEAF_LEVEL);
 
-	WLOCK_TREE(tree);
-	WLOCK_DK(tree);
+	write_lock_tree(tree);
+	write_lock_dk(tree);
 	/* find a node on the left level for which right delimiting key has to
 	   be updated */
 	if (coord_wrt(child_coord) == COORD_ON_THE_LEFT) {
@@ -291,8 +291,8 @@ int create_hook_extent(const coord_t * c
 			node->right = NULL;
 		}
 	}
-	WUNLOCK_DK(tree);
-	WUNLOCK_TREE(tree);
+	write_unlock_dk(tree);
+	write_unlock_tree(tree);
 	return 0;
 }
 
@@ -373,8 +373,8 @@ kill_hook_extent(const coord_t * coord, 
 			 */
 			/* if neighbors of item being removed are znodes -
 			 * link them */
-			WLOCK_TREE(tree);
-			WLOCK_DK(tree);
+			write_lock_tree(tree);
+			write_lock_dk(tree);
 			link_left_and_right(left, right);
 			if (left) {
 				/* update right delimiting key of left
@@ -390,8 +390,8 @@ kill_hook_extent(const coord_t * coord, 
 					item_key_by_coord(next, key);
 				znode_set_rd_key(left, key);
 			}
-			WUNLOCK_DK(tree);
-			WUNLOCK_TREE(tree);
+			write_unlock_dk(tree);
+			write_unlock_tree(tree);
 
 			from_off =
 			    get_key_offset(min_item_key) >> PAGE_CACHE_SHIFT;
@@ -426,8 +426,9 @@ kill_hook_extent(const coord_t * coord, 
 			*key = *pto_key;
 			set_key_offset(key, get_key_offset(pto_key) + 1);
 
-			UNDER_RW_VOID(dk, current_tree, write,
-				      znode_set_rd_key(kdata->left->node, key));
+			write_lock_dk(current_tree);
+			znode_set_rd_key(kdata->left->node, key);
+			write_unlock_dk(current_tree);
 		}
 
 		from_off = get_key_offset(pfrom_key) >> PAGE_CACHE_SHIFT;
diff -puN fs/reiser4/plugin/item/internal.c~reiser4-spinlock-cleanup fs/reiser4/plugin/item/internal.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/item/internal.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.632982750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/item/internal.c	2005-10-20 14:01:52.808993750 +0400
@@ -186,22 +186,22 @@ int check__internal(const coord_t * coor
 		assert("nikita-3256", znode_invariant(child));
 		if (coord_prev_item(&cpy) == 0 && item_is_internal(&cpy)) {
 			left_child = znode_at(&cpy, cpy.node);
-			RLOCK_TREE(znode_get_tree(child));
-			if (left_child != NULL)
+			if (left_child != NULL) {
+				read_lock_tree(znode_get_tree(child));
 				check_link(left_child, child);
-			RUNLOCK_TREE(znode_get_tree(child));
-			if (left_child != NULL)
+				read_unlock_tree(znode_get_tree(child));
 				zput(left_child);
+			}
 		}
 		coord_dup(&cpy, coord);
 		if (coord_next_item(&cpy) == 0 && item_is_internal(&cpy)) {
 			right_child = znode_at(&cpy, cpy.node);
-			RLOCK_TREE(znode_get_tree(child));
-			if (right_child != NULL)
+			if (right_child != NULL) {
+				read_lock_tree(znode_get_tree(child));
 				check_link(child, right_child);
-			RUNLOCK_TREE(znode_get_tree(child));
-			if (right_child != NULL)
+				read_unlock_tree(znode_get_tree(child));
 				zput(right_child);
+			}
 		}
 		zput(child);
 	}
@@ -256,8 +256,8 @@ int create_hook_internal(const coord_t *
 
 		left = arg;
 		tree = znode_get_tree(item->node);
-		WLOCK_TREE(tree);
-		WLOCK_DK(tree);
+		write_lock_tree(tree);
+		write_lock_dk(tree);
 		assert("nikita-1400", (child->in_parent.node == NULL)
 		       || (znode_above_root(child->in_parent.node)));
 		++item->node->c_count;
@@ -271,8 +271,8 @@ int create_hook_internal(const coord_t *
 					     znode_get_rd_key(child))) {
 			znode_set_rd_key(child, znode_get_rd_key(left));
 		}
-		WUNLOCK_DK(tree);
-		WUNLOCK_TREE(tree);
+		write_unlock_dk(tree);
+		write_unlock_tree(tree);
 		zput(child);
 		return result;
 	} else {
@@ -320,17 +320,15 @@ int kill_hook_internal(const coord_t * i
 		assert("nikita-2546", ZF_ISSET(child, JNODE_HEARD_BANSHEE));
 
 		tree = znode_get_tree(item->node);
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 		init_parent_coord(&child->in_parent, NULL);
 		--item->node->c_count;
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		zput(child);
 		return 0;
 	} else {
 		warning("nikita-1223",
 			"Cowardly refuse to remove link to non-empty node");
-		print_znode("parent", item->node);
-		print_znode("child", child);
 		zput(child);
 		return RETERR(-EIO);
 	}
@@ -363,7 +361,7 @@ int shift_hook_internal(const coord_t * 
 	if (child == NULL)
 		return 0;
 	if (!IS_ERR(child)) {
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 		++new_node->c_count;
 		assert("nikita-1395", znode_parent(child) == old_node);
 		assert("nikita-1396", old_node->c_count > 0);
@@ -372,7 +370,7 @@ int shift_hook_internal(const coord_t * 
 		assert("nikita-1782",
 		       check_tree_pointer(item, child) == NS_FOUND);
 		--old_node->c_count;
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		zput(child);
 		return 0;
 	} else
diff -puN fs/reiser4/plugin/node/node40.c~reiser4-spinlock-cleanup fs/reiser4/plugin/node/node40.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/node/node40.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.648983750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/node/node40.c	2005-10-20 14:01:52.812994000 +0400
@@ -391,7 +391,6 @@ node_search_result lookup_node40(znode *
 				left);
 			print_key("key", key);
 			print_key("min", &bstop->key);
-			print_znode("node", node);
 			print_coord_content("coord", coord);
 			return RETERR(-EIO);
 		} else {
@@ -406,7 +405,6 @@ node_search_result lookup_node40(znode *
 		warning("nikita-588", "Unknown plugin %i",
 			le16_to_cpu(get_unaligned(&bstop->plugin_id)));
 		print_key("key", key);
-		print_znode("node", node);
 		print_coord_content("coord", coord);
 		return RETERR(-EIO);
 	}
@@ -475,6 +473,7 @@ int check_node40(const znode * node /* n
 	unsigned old_offset;
 	tree_level level;
 	coord_t coord;
+	int result;
 
 	assert("nikita-580", node != NULL);
 	assert("nikita-581", error != NULL);
@@ -591,25 +590,26 @@ int check_node40(const znode * node /* n
 
 			iplug->s.file.append_key(&coord, &mkey);
 			set_key_offset(&mkey, get_key_offset(&mkey) - 1);
-			if (UNDER_RW
-			    (dk, current_tree, read,
-			     keygt(&mkey, znode_get_rd_key((znode *) node)))) {
+			read_lock_dk(current_tree);
+			result = keygt(&mkey, znode_get_rd_key((znode *) node));
+			read_unlock_dk(current_tree);
+			if (result) {
 				*error = "key of rightmost item is too large";
 				return -1;
 			}
 		}
 	}
 	if (flags & REISER4_NODE_DKEYS) {
-		RLOCK_TREE(current_tree);
-		RLOCK_DK(current_tree);
+		read_lock_tree(current_tree);
+		read_lock_dk(current_tree);
 
 		flags |= REISER4_NODE_TREE_STABLE;
 
 		if (keygt(&prev, znode_get_rd_key((znode *) node))) {
 			if (flags & REISER4_NODE_TREE_STABLE) {
 				*error = "Last key is greater than rdkey";
-				RUNLOCK_DK(current_tree);
-				RUNLOCK_TREE(current_tree);
+				read_unlock_dk(current_tree);
+				read_unlock_tree(current_tree);
 				return -1;
 			}
 		}
@@ -617,8 +617,8 @@ int check_node40(const znode * node /* n
 		    (znode_get_ld_key((znode *) node),
 		     znode_get_rd_key((znode *) node))) {
 			*error = "ldkey is greater than rdkey";
-			RUNLOCK_DK(current_tree);
-			RUNLOCK_TREE(current_tree);
+			read_unlock_dk(current_tree);
+			read_unlock_tree(current_tree);
 			return -1;
 		}
 		if (ZF_ISSET(node, JNODE_LEFT_CONNECTED) &&
@@ -631,8 +631,8 @@ int check_node40(const znode * node /* n
 			    keygt(znode_get_rd_key(node->left),
 				  znode_get_ld_key((znode *) node)))) {
 			*error = "left rdkey or ldkey is wrong";
-			RUNLOCK_DK(current_tree);
-			RUNLOCK_TREE(current_tree);
+ 			read_unlock_dk(current_tree);
+			read_unlock_tree(current_tree);
 			return -1;
 		}
 		if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) &&
@@ -645,13 +645,13 @@ int check_node40(const znode * node /* n
 			    keygt(znode_get_rd_key((znode *) node),
 				  znode_get_ld_key(node->right)))) {
 			*error = "rdkey or right ldkey is wrong";
-			RUNLOCK_DK(current_tree);
-			RUNLOCK_TREE(current_tree);
+ 			read_unlock_dk(current_tree);
+			read_unlock_tree(current_tree);
 			return -1;
 		}
 
-		RUNLOCK_DK(current_tree);
-		RUNLOCK_TREE(current_tree);
+		read_unlock_dk(current_tree);
+		read_unlock_tree(current_tree);
 	}
 
 	return 0;
@@ -2084,6 +2084,7 @@ prepare_for_update(znode * left, znode *
 int prepare_removal_node40(znode * empty, carry_plugin_info * info)
 {
 	carry_op *op;
+	reiser4_tree *tree;
 
 	if (!should_notify_parent(empty))
 		return 0;
@@ -2098,14 +2099,14 @@ int prepare_removal_node40(znode * empty
 	op->u.delete.flags = 0;
 
 	/* fare thee well */
-
-	RLOCK_TREE(current_tree);
-	WLOCK_DK(current_tree);
+	tree = znode_get_tree(empty);
+	read_lock_tree(tree);
+	write_lock_dk(tree);
 	znode_set_ld_key(empty, znode_get_rd_key(empty));
 	if (znode_is_left_connected(empty) && empty->left)
 		znode_set_rd_key(empty->left, znode_get_rd_key(empty));
-	WUNLOCK_DK(current_tree);
-	RUNLOCK_TREE(current_tree);
+	write_unlock_dk(tree);
+	read_unlock_tree(tree);
 
 	ZF_SET(empty, JNODE_HEARD_BANSHEE);
 	return 0;
diff -puN fs/reiser4/plugin/space/bitmap.c~reiser4-spinlock-cleanup fs/reiser4/plugin/space/bitmap.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/plugin/space/bitmap.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.664984750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/plugin/space/bitmap.c	2005-10-20 14:01:52.812994000 +0400
@@ -1283,8 +1283,8 @@ static void cond_add_to_overwrite_set(tx
 	assert("zam-547", atom->stage == ASTAGE_PRE_COMMIT);
 	assert("zam-548", node != NULL);
 
-	LOCK_ATOM(atom);
-	LOCK_JNODE(node);
+	spin_lock_atom(atom);
+	spin_lock_jnode(node);
 
 	if (node->atom == NULL) {
 		JF_SET(node, JNODE_OVRWR);
@@ -1293,8 +1293,8 @@ static void cond_add_to_overwrite_set(tx
 		assert("zam-549", node->atom == atom);
 	}
 
-	UNLOCK_JNODE(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_jnode(node);
+	spin_unlock_atom(atom);
 }
 
 /* an actor which applies delete set to COMMIT bitmap pages and link modified
@@ -1559,9 +1559,9 @@ int pre_commit_hook_bitmap(void)
 
 		sbinfo = get_super_private(super);
 
-		reiser4_spin_lock_sb(sbinfo);
+		spin_lock_reiser4_super(sbinfo);
 		sbinfo->blocks_free_committed += blocks_freed;
-		reiser4_spin_unlock_sb(sbinfo);
+		spin_unlock_reiser4_super(sbinfo);
 	}
 
 	return 0;
diff -puN fs/reiser4/readahead.c~reiser4-spinlock-cleanup fs/reiser4/readahead.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/readahead.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.668985000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/readahead.c	2005-10-20 14:01:52.816994250 +0400
@@ -26,8 +26,12 @@ static inline int ra_adjacent_only(int f
    if right neighbor's first key is less or equal to readahead's stop key */
 static int should_readahead_neighbor(znode * node, ra_info_t * info)
 {
-	return (UNDER_RW(dk, ZJNODE(node)->tree, read,
-			 keyle(znode_get_rd_key(node), &info->key_to_stop)));
+	int result;
+
+	read_lock_dk(znode_get_tree(node));
+	result = keyle(znode_get_rd_key(node), &info->key_to_stop);
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
 #define LOW_MEM_PERCENTAGE (5)
diff -puN fs/reiser4/reiser4.h~reiser4-spinlock-cleanup fs/reiser4/reiser4.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/reiser4.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.672985250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/reiser4.h	2005-10-20 14:01:52.816994250 +0400
@@ -97,7 +97,7 @@ extern const int REISER4_MAGIC_OFFSET;	/
 #define REISER4_USE_ENTD (1)
 
 /* Using of emergency flush is an option. */
-#define REISER4_USE_EFLUSH (0)
+#define REISER4_USE_EFLUSH (1)
 
 /* key allocation is Plan-A */
 #define REISER4_PLANA_KEY_ALLOCATION (1)
diff -puN fs/reiser4/seal.c~reiser4-spinlock-cleanup fs/reiser4/seal.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/seal.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.676985500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/seal.c	2005-10-20 14:01:52.816994250 +0400
@@ -194,11 +194,15 @@ static znode *seal_node(const seal_t * s
 static int seal_matches(const seal_t * seal /* seal to check */ ,
 			znode * node /* node to check */ )
 {
+	int result;
+
 	assert("nikita-1991", seal != NULL);
 	assert("nikita-1993", node != NULL);
 
-	return UNDER_SPIN(jnode, ZJNODE(node),
-			  (seal->version == node->version));
+	spin_lock_znode(node);
+	result = (seal->version == node->version);
+	spin_unlock_znode(node);
+	return result;
 }
 
 /* Make Linus happy.
diff -puN fs/reiser4/search.c~reiser4-spinlock-cleanup fs/reiser4/search.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/search.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.680985750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/search.c	2005-10-20 14:01:52.820994500 +0400
@@ -64,7 +64,7 @@ int cbk_cache_init(cbk_cache *cache /* c
 		cbk_cache_init_slot(cache->slot + i);
 		list_add_tail(&((cache->slot + i)->lru), &cache->lru);
 	}
-	rw_cbk_cache_init(cache);
+	rwlock_init(&cache->guard);
 	return 0;
 }
 
@@ -107,7 +107,7 @@ static int cbk_cache_invariant(const cbk
 	assert("nikita-2469", cache != NULL);
 	unused = 0;
 	result = 1;
-	read_lock_cbk_cache((cbk_cache *) cache);
+	read_lock(&((cbk_cache *)cache)->guard);
 	for_all_slots(cache, slot) {
 		/* in LRU first go all `used' slots followed by `unused' */
 		if (unused && (slot->node != NULL))
@@ -130,7 +130,7 @@ static int cbk_cache_invariant(const cbk
 		if (!result)
 			break;
 	}
-	read_unlock_cbk_cache((cbk_cache *) cache);
+	read_unlock(&((cbk_cache *)cache)->guard);
 	return result;
 }
 
@@ -150,7 +150,7 @@ void cbk_cache_invalidate(const znode * 
 	cache = &tree->cbk_cache;
 	assert("nikita-2470", cbk_cache_invariant(cache));
 
-	write_lock_cbk_cache(cache);
+	write_lock(&(cache->guard));
 	for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) {
 		if (slot->node == node) {
 			list_del(&slot->lru);
@@ -159,7 +159,7 @@ void cbk_cache_invalidate(const znode * 
 			break;
 		}
 	}
-	write_unlock_cbk_cache(cache);
+	write_unlock(&(cache->guard));
 	assert("nikita-2471", cbk_cache_invariant(cache));
 }
 
@@ -179,7 +179,7 @@ static void cbk_cache_add(const znode *n
 	if (cache->nr_slots == 0)
 		return;
 
-	write_lock_cbk_cache(cache);
+	write_lock(&(cache->guard));
 	/* find slot to update/add */
 	for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) {
 		/* oops, this node is already in a cache */
@@ -193,7 +193,7 @@ static void cbk_cache_add(const znode *n
 	}
 	list_del(&slot->lru);
 	list_add(&slot->lru, &cache->lru);
-	write_unlock_cbk_cache(cache);
+	write_unlock(&(cache->guard));
 	assert("nikita-2473", cbk_cache_invariant(cache));
 }
 
@@ -605,12 +605,10 @@ static int prepare_object_lookup(cbk_han
 
 		isunique = h->flags & CBK_UNIQUE;
 		/* check that key is inside vroot */
-		inside =
-		    UNDER_RW(dk, h->tree, read,
-			     znode_contains_key_strict(vroot,
-						       h->key,
-						       isunique)) &&
-		    !ZF_ISSET(vroot, JNODE_HEARD_BANSHEE);
+		read_lock_dk(h->tree);
+		inside = (znode_contains_key_strict(vroot, h->key, isunique) &&
+			  !ZF_ISSET(vroot, JNODE_HEARD_BANSHEE));
+		read_unlock_dk(h->tree);
 		if (inside) {
 			h->result = zload(vroot);
 			if (h->result == 0) {
@@ -736,8 +734,6 @@ static lookup_result traverse_tree(cbk_h
 		print_address("block", &h->block);
 		print_key("key", h->key);
 		print_coord_content("coord", h->coord);
-		print_znode("active", h->active_lh->node);
-		print_znode("parent", h->parent_lh->node);
 	}
 	/* `unlikely' error case */
 	if (unlikely(IS_CBKERR(h->result))) {
@@ -774,7 +770,7 @@ static void find_child_delimiting_keys(z
 	coord_t neighbor;
 
 	assert("nikita-1484", parent != NULL);
-	assert("nikita-1485", rw_dk_is_locked(znode_get_tree(parent)));
+	assert_rw_locked(&(znode_get_tree(parent)->dk_lock));
 
 	coord_dup(&neighbor, parent_coord);
 
@@ -819,7 +815,7 @@ set_child_delimiting_keys(znode * parent
 	 * JNODE_DKSET is never cleared once set. */
 	if (!ZF_ISSET(child, JNODE_DKSET)) {
 		tree = znode_get_tree(parent);
-		WLOCK_DK(tree);
+		write_lock_dk(tree);
 		if (likely(!ZF_ISSET(child, JNODE_DKSET))) {
 			find_child_delimiting_keys(parent, coord,
 						   &child->ld_key,
@@ -830,7 +826,7 @@ set_child_delimiting_keys(znode * parent
 				 atomic_inc_return(&delim_key_version););
 			ZF_SET(child, JNODE_DKSET);
 		}
-		WUNLOCK_DK(tree);
+		write_unlock_dk(tree);
 		return 1;
 	}
 	return 0;
@@ -895,10 +891,10 @@ static level_lookup_result cbk_level_loo
 			setdk = set_child_delimiting_keys(parent,
 							  h->coord, active);
 		else {
-			UNDER_RW_VOID(dk, h->tree, read,
-				      find_child_delimiting_keys(parent,
-								 h->coord,
-								 &ldkey, &key));
+			read_lock_dk(h->tree);
+			find_child_delimiting_keys(parent, h->coord, &ldkey,
+						   &key);
+			read_unlock_dk(h->tree);
 			ldkeyset = 1;
 		}
 		zrelse(parent);
@@ -911,13 +907,13 @@ static level_lookup_result cbk_level_loo
 	h->coord->between = AT_UNIT;
 
 	if (znode_just_created(active) && (h->coord->node != NULL)) {
-		WLOCK_TREE(h->tree);
+		write_lock_tree(h->tree);
 		/* if we are going to load znode right now, setup
 		   ->in_parent: coord where pointer to this node is stored in
 		   parent.
 		 */
 		coord_to_parent_coord(h->coord, &active->in_parent);
-		WUNLOCK_TREE(h->tree);
+		write_unlock_tree(h->tree);
 	}
 
 	/* check connectedness without holding tree lock---false negatives
@@ -1003,8 +999,8 @@ void check_dkeys(znode * node)
 	znode *left;
 	znode *right;
 
-	RLOCK_TREE(current_tree);
-	RLOCK_DK(current_tree);
+	read_lock_tree(current_tree);
+	read_lock_dk(current_tree);
 
 	assert("vs-1710", znode_is_any_locked(node));
 	assert("vs-1197",
@@ -1029,8 +1025,8 @@ void check_dkeys(znode * node)
 		       (keyeq(znode_get_rd_key(node), znode_get_ld_key(right))
 			|| ZF_ISSET(right, JNODE_HEARD_BANSHEE)));
 
-	RUNLOCK_DK(current_tree);
-	RUNLOCK_TREE(current_tree);
+	read_unlock_dk(current_tree);
+	read_unlock_tree(current_tree);
 }
 #endif
 
@@ -1042,10 +1038,10 @@ static int key_is_ld(znode * node, const
 	assert("nikita-1716", node != NULL);
 	assert("nikita-1758", key != NULL);
 
-	RLOCK_DK(znode_get_tree(node));
+	read_lock_dk(znode_get_tree(node));
 	assert("nikita-1759", znode_contains_key(node, key));
 	ld = keyeq(znode_get_ld_key(node), key);
-	RUNLOCK_DK(znode_get_tree(node));
+	read_unlock_dk(znode_get_tree(node));
 	return ld;
 }
 
@@ -1179,7 +1175,7 @@ static int cbk_cache_scan_slots(cbk_hand
 	 */
 
 	rcu_read_lock();
-	read_lock_cbk_cache(cache);
+	read_lock(&((cbk_cache *)cache)->guard);
 
 	slot = list_entry(cache->lru.next, cbk_cache_slot, lru);
 	slot = list_entry(slot->lru.prev, cbk_cache_slot, lru);
@@ -1207,11 +1203,11 @@ static int cbk_cache_scan_slots(cbk_hand
 		    znode_contains_key_strict(node, key, isunique)) {
 			zref(node);
 			result = 0;
-			spin_lock_prefetch(&tree->tree_lock.lock);
+			spin_lock_prefetch(&tree->tree_lock);
 			break;
 		}
 	}
-	read_unlock_cbk_cache(cache);
+	read_unlock(&((cbk_cache *)cache)->guard);
 
 	assert("nikita-2475", cbk_cache_invariant(cache));
 
@@ -1236,11 +1232,10 @@ static int cbk_cache_scan_slots(cbk_hand
 		return result;
 
 	/* recheck keys */
-	result =
-	    UNDER_RW(dk, tree, read,
-		     znode_contains_key_strict(node, key, isunique)) &&
-	    !ZF_ISSET(node, JNODE_HEARD_BANSHEE);
-
+	read_lock_dk(tree);
+	result = (znode_contains_key_strict(node, key, isunique) &&
+		!ZF_ISSET(node, JNODE_HEARD_BANSHEE));
+	read_unlock_dk(tree);
 	if (result) {
 		/* do lookup inside node */
 		llr = cbk_node_lookup(h);
@@ -1258,14 +1253,14 @@ static int cbk_cache_scan_slots(cbk_hand
 			/* good. Either item found or definitely not found. */
 			result = 0;
 
-			write_lock_cbk_cache(cache);
+			write_lock(&(cache->guard));
 			if (slot->node == h->active_lh->node /*node */ ) {
 				/* if this node is still in cbk cache---move
 				   its slot to the head of the LRU list. */
 				list_del(&slot->lru);
 				list_add(&slot->lru, &cache->lru);
 			}
-			write_unlock_cbk_cache(cache);
+			write_unlock(&(cache->guard));
 		}
 	} else {
 		/* race. While this thread was waiting for the lock, node was
@@ -1337,8 +1332,8 @@ static void stale_dk(reiser4_tree * tree
 {
 	znode *right;
 
-	RLOCK_TREE(tree);
-	WLOCK_DK(tree);
+	read_lock_tree(tree);
+	write_lock_dk(tree);
 	right = node->right;
 
 	if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) &&
@@ -1346,8 +1341,8 @@ static void stale_dk(reiser4_tree * tree
 	    !keyeq(znode_get_rd_key(node), znode_get_ld_key(right)))
 		znode_set_rd_key(node, znode_get_ld_key(right));
 
-	WUNLOCK_DK(tree);
-	RUNLOCK_TREE(tree);
+	write_unlock_dk(tree);
+	read_unlock_tree(tree);
 }
 
 /* check for possibly outdated delimiting keys, and update them if
@@ -1357,8 +1352,8 @@ static void update_stale_dk(reiser4_tree
 	znode *right;
 	reiser4_key rd;
 
-	RLOCK_TREE(tree);
-	RLOCK_DK(tree);
+	read_lock_tree(tree);
+	read_lock_dk(tree);
 	rd = *znode_get_rd_key(node);
 	right = node->right;
 	if (unlikely(ZF_ISSET(node, JNODE_RIGHT_CONNECTED) &&
@@ -1367,13 +1362,13 @@ static void update_stale_dk(reiser4_tree
 		/* does this ever happen? */
 		warning("nikita-38210", "stale dk");
 		assert("nikita-38211", ZF_ISSET(node, JNODE_DKSET));
-		RUNLOCK_DK(tree);
-		RUNLOCK_TREE(tree);
+		read_unlock_dk(tree);
+		read_unlock_tree(tree);
 		stale_dk(tree, node);
 		return;
 	}
-	RUNLOCK_DK(tree);
-	RUNLOCK_TREE(tree);
+	read_unlock_dk(tree);
+	read_unlock_tree(tree);
 }
 
 /*
@@ -1452,10 +1447,10 @@ static level_lookup_result search_to_lef
 	default:		/* some other error */
 				result = LOOKUP_DONE;
 			} else if (h->result == NS_FOUND) {
-				RLOCK_DK(znode_get_tree(neighbor));
+				read_lock_dk(znode_get_tree(neighbor));
 				h->rd_key = *znode_get_ld_key(node);
 				leftmost_key_in_node(neighbor, &h->ld_key);
-				RUNLOCK_DK(znode_get_tree(neighbor));
+				read_unlock_dk(znode_get_tree(neighbor));
 				h->flags |= CBK_DKSET;
 
 				h->block = *znode_get_block(neighbor);
@@ -1465,8 +1460,10 @@ static level_lookup_result search_to_lef
 				   Parent hint was set up by
 				   reiser4_get_left_neighbor()
 				 */
-				UNDER_RW_VOID(tree, znode_get_tree(neighbor),
-					      write, h->coord->node = NULL);
+				/* FIXME: why do we have to spinlock here? */
+				write_lock_tree(znode_get_tree(neighbor));
+				h->coord->node = NULL;
+				write_unlock_tree(znode_get_tree(neighbor));
 				result = LOOKUP_CONT;
 			} else {
 				result = LOOKUP_DONE;
@@ -1511,7 +1508,6 @@ void print_coord_content(const char *pre
 	    && coord_is_existing_item(p))
 		printk("%s: data: %p, length: %i\n", prefix,
 		       item_body_by_coord(p), item_length_by_coord(p));
-	print_znode(prefix, p->node);
 	if (znode_is_loaded(p->node)) {
 		item_key_by_coord(p, &key);
 		print_key(prefix, &key);
@@ -1574,13 +1570,13 @@ static int setup_delimiting_keys(cbk_han
 	 * JNODE_DKSET is never cleared once set. */
 	if (!ZF_ISSET(active, JNODE_DKSET)) {
 		tree = znode_get_tree(active);
-		WLOCK_DK(tree);
+		write_lock_dk(tree);
 		if (!ZF_ISSET(active, JNODE_DKSET)) {
 			znode_set_ld_key(active, &h->ld_key);
 			znode_set_rd_key(active, &h->rd_key);
 			ZF_SET(active, JNODE_DKSET);
 		}
-		WUNLOCK_DK(tree);
+		write_unlock_dk(tree);
 		return 1;
 	}
 	return 0;
diff -puN -L fs/reiser4/spin_macros.h fs/reiser4/spin_macros.h~reiser4-spinlock-cleanup /dev/null
--- linux-2.6.14-rc4-mm1/fs/reiser4/spin_macros.h
+++ /dev/null	2003-09-23 21:59:22.000000000 +0400
@@ -1,474 +0,0 @@
-/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
-
-/* Wrapper functions/macros for spin locks. */
-
-/*
- * This file implements wrapper functions and macros to work with spin locks
- * and read write locks embedded into kernel objects. Wrapper functions
- * provide following functionality:
- *
- *    (1) encapsulation of locks: in stead of writing spin_lock(&obj->lock),
- *    where obj is object of type foo, one writes spin_lock_foo(obj).
- *
- *    (2) optional keeping (in per-thread reiser4_context->locks) information
- *    about number of locks of particular type currently held by thread. This
- *    is done if REISER4_DEBUG is on.
- *
- *    (3) optional checking of lock ordering. For object type foo, it is
- *    possible to provide "lock ordering predicate" (possibly using
- *    information stored in reiser4_context->locks) checking that locks are
- *    acquired in the proper order. This is done if REISER4_DEBUG is on.
- *
- *    (4) optional collection of spin lock contention statistics. In this mode
- *    two sysfs objects (located in /sys/profregion) are associated with each
- *    spin lock type. One object (foo_t) shows how much time was spent trying
- *    to acquire spin locks of foo type. Another (foo_h) shows how much time
- *    spin locks of the type foo were held locked. See spinprof.h for more
- *    details on this.
- *
- */
-
-#ifndef __SPIN_MACROS_H__
-#define __SPIN_MACROS_H__
-
-#include <linux/spinlock.h>
-#include <linux/profile.h>
-
-#include "debug.h"
-
-/* Checks that read write lock @s is locked (or not) by the -current-
- * thread. not yet implemented */
-#define check_is_write_locked(s)     ((void)(s), 1)
-#define check_is_read_locked(s)      ((void)(s), 1)
-#define check_is_not_read_locked(s)  ((void)(s), 1)
-#define check_is_not_write_locked(s) ((void)(s), 1)
-
-/* Checks that spin lock @s is locked (or not) by the -current- thread. */
-#define check_spin_is_not_locked(s) ((void)(s), 1)
-#define spin_is_not_locked(s)       ((void)(s), 1)
-#if defined(CONFIG_SMP)
-#    define check_spin_is_locked(s)     spin_is_locked(s)
-#else
-#    define check_spin_is_locked(s)     ((void)(s), 1)
-#endif
-
-/*
- * Data structure embedded into kernel objects together with spin lock.
- */
-typedef struct reiser4_spin_data {
-	/* spin lock proper */
-	spinlock_t lock;
-} reiser4_spin_data;
-
-/*
- * Data structure embedded into kernel objects together with read write lock.
- */
-typedef struct reiser4_rw_data {
-	/* read write lock proper */
-	rwlock_t lock;
-} reiser4_rw_data;
-
-#if REISER4_DEBUG
-#define __ODCA(l, e) ON_DEBUG_CONTEXT(assert(l, e))
-#else
-#define __ODCA(l, e) noop
-#endif
-
-/* Define several inline functions for each type of spinlock. This is long
- * monster macro definition. */
-#define SPIN_LOCK_FUNCTIONS(NAME,TYPE,FIELD)					\
-										\
-/* Initialize spin lock embedded in @x			*/			\
-static inline void spin_ ## NAME ## _init(TYPE *x)				\
-{										\
-	__ODCA("nikita-2987", x != NULL);					\
-	spin_lock_init(& x->FIELD.lock);					\
-}										\
-										\
-/* Increment per-thread lock counter for this lock type and total counter */	\
-/* of acquired spin locks. This is helper function used by spin lock      */	\
-/* acquiring functions below                                              */	\
-static inline void spin_ ## NAME ## _inc(void)					\
-{										\
-	LOCK_CNT_INC(spin_locked_ ## NAME);					\
-	LOCK_CNT_INC(spin_locked);						\
-}										\
-										\
-/* Decrement per-thread lock counter and total counter of acquired spin   */	\
-/* locks. This is helper function used by spin lock releasing functions   */	\
-/* below.                                                                 */	\
-static inline void spin_ ## NAME ## _dec(void)					\
-{										\
-	LOCK_CNT_DEC(spin_locked_ ## NAME);					\
-	LOCK_CNT_DEC(spin_locked);						\
-}										\
-										\
-/* Return true of spin lock embedded in @x is acquired by -current-       */	\
-/* thread                                                                 */	\
-static inline int  spin_ ## NAME ## _is_locked (const TYPE *x)			\
-{										\
-	return check_spin_is_locked (& x->FIELD.lock) &&			\
-	       LOCK_CNT_GTZ(spin_locked_ ## NAME);				\
-}										\
-										\
-/* Return true of spin lock embedded in @x is not acquired by -current-   */	\
-/* thread                                                                 */	\
-static inline int  spin_ ## NAME ## _is_not_locked (TYPE *x)			\
-{										\
-	return check_spin_is_not_locked (& x->FIELD.lock);			\
-}										\
-										\
-/* Acquire spin lock embedded in @x without checking lock ordering.       */	\
-/* This is useful when, for example, locking just created object.         */	\
-static inline void spin_lock_ ## NAME ## _no_ord (TYPE *x) 			\
-{										\
-	__ODCA("nikita-2703", spin_ ## NAME ## _is_not_locked(x));		\
-	spin_lock(&x->FIELD.lock);						\
-	spin_ ## NAME ## _inc();						\
-}										\
-										\
-/* Account for spin lock acquired by some other means. For example        */	\
-/* through atomic_dec_and_lock() or similar.                              */	\
-static inline void spin_lock_ ## NAME ## _acc (TYPE *x)				\
-{										\
-	spin_ ## NAME ## _inc();						\
-}										\
-										\
-/* Lock @x with explicit indication of spin lock profiling "sites".       */	\
-/* Locksite is used by spin lock profiling code (spinprof.[ch]) to        */	\
-/* identify fragment of code that locks @x.                               */	\
-/*                                                                        */	\
-/* If clock interrupt finds that current thread is spinning waiting for   */	\
-/* the lock on @x, counters in @t will be incremented.                    */	\
-/*                                                                        */	\
-/* If clock interrupt finds that current thread holds the lock on @x,     */	\
-/* counters in @h will be incremented.                                    */	\
-/*                                                                        */	\
-static inline void spin_lock_ ## NAME ## _at (TYPE *x) 				\
-{										\
-	__ODCA("nikita-1383", spin_ordering_pred_ ## NAME(x));			\
-	spin_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Lock @x.                                                               */	\
-static inline void spin_lock_ ## NAME (TYPE *x)					\
-{										\
-	__ODCA("nikita-1383", spin_ordering_pred_ ## NAME(x));			\
-	spin_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Try to obtain lock @x. On success, returns 1 with @x locked.           */	\
-/* If @x is already locked, return 0 immediately.                         */	\
-static inline int  spin_trylock_ ## NAME (TYPE *x)				\
-{										\
-	if (spin_trylock (& x->FIELD.lock)) {					\
-		spin_ ## NAME ## _inc();					\
-		return 1;							\
-	}									\
-	return 0;								\
-}										\
-										\
-/* Unlock @x.                                                             */	\
-static inline void spin_unlock_ ## NAME (TYPE *x)				\
-{										\
-	__ODCA("nikita-1375", LOCK_CNT_GTZ(spin_locked_ ## NAME));		\
-	__ODCA("nikita-1376", LOCK_CNT_GTZ(spin_locked));			\
-	__ODCA("nikita-2703", spin_ ## NAME ## _is_locked(x));			\
-										\
-	spin_ ## NAME ## _dec();						\
-	spin_unlock (& x->FIELD.lock);						\
-}										\
-										\
-typedef struct { int foo; } NAME ## _spin_dummy
-
-/*
- * Helper macro to perform a simple operation that requires taking of spin
- * lock.
- *
- * 1. Acquire spin lock on object @obj of type @obj_type.
- *
- * 2. Execute @exp under spin lock, and store result.
- *
- * 3. Release spin lock.
- *
- * 4. Return result of @exp.
- *
- * Example:
- *
- * right_delimiting_key = UNDER_SPIN(dk, current_tree, *znode_get_rd_key(node));
- *
- */
-#define UNDER_SPIN(obj_type, obj, exp)						\
-({										\
-	typeof (obj) __obj;							\
-	typeof (exp) __result;							\
-										\
-	__obj = (obj);								\
-	__ODCA("nikita-2492", __obj != NULL);					\
-	spin_lock_ ## obj_type ## _at (__obj);					\
-	__result = exp;								\
-	spin_unlock_ ## obj_type (__obj);					\
-	__result;								\
-})
-
-/*
- * The same as UNDER_SPIN, but without storing and returning @exp's result.
- */
-#define UNDER_SPIN_VOID(obj_type, obj, exp)					\
-({										\
-	typeof (obj) __obj;							\
-										\
-	__obj = (obj);								\
-	__ODCA("nikita-2492", __obj != NULL);					\
-	spin_lock_ ## obj_type ## _at (__obj);					\
-	exp;									\
-	spin_unlock_ ## obj_type (__obj);					\
-})
-
-/* Define several inline functions for each type of read write lock. This is
- * insanely long macro definition. */
-#define RW_LOCK_FUNCTIONS(NAME,TYPE,FIELD)					\
-										\
-										\
-/* Initialize read write lock embedded into @x.                           */	\
-static inline void rw_ ## NAME ## _init(TYPE *x)				\
-{										\
-	__ODCA("nikita-2988", x != NULL);					\
-	rwlock_init(& x->FIELD.lock);						\
-}										\
-										\
-/* True, if @x is read locked by the -current- thread.                    */	\
-static inline int  rw_ ## NAME ## _is_read_locked (const TYPE *x)		\
-{										\
-	return check_is_read_locked (& x->FIELD.lock);				\
-}										\
-										\
-/* True, if @x is write locked by the -current- thread.                   */	\
-static inline int  rw_ ## NAME ## _is_write_locked (const TYPE *x)		\
-{										\
-	return check_is_write_locked (& x->FIELD.lock);				\
-}										\
-										\
-/* True, if @x is not read locked by the -current- thread.                */	\
-static inline int  rw_ ## NAME ## _is_not_read_locked (TYPE *x)			\
-{										\
-	return check_is_not_read_locked (& x->FIELD.lock);			\
-}										\
-										\
-/* True, if @x is not write locked by the -current- thread.               */	\
-static inline int  rw_ ## NAME ## _is_not_write_locked (TYPE *x)		\
-{										\
-	return check_is_not_write_locked (& x->FIELD.lock);			\
-}										\
-										\
-/* True, if @x is either read or write locked by the -current- thread.    */	\
-static inline int  rw_ ## NAME ## _is_locked (const TYPE *x)			\
-{										\
-	return check_is_read_locked (& x->FIELD.lock) ||			\
-	       check_is_write_locked (& x->FIELD.lock);				\
-}										\
-										\
-/* True, if @x is neither read nor write locked by the -current- thread.  */	\
-static inline int  rw_ ## NAME ## _is_not_locked (const TYPE *x)		\
-{										\
-	return check_is_not_read_locked (& x->FIELD.lock) &&			\
-	       check_is_not_write_locked (& x->FIELD.lock);			\
-}										\
-										\
-/* This is helper function used by lock acquiring functions below         */	\
-static inline void read_ ## NAME ## _inc(void)					\
-{										\
-	LOCK_CNT_INC(read_locked_ ## NAME);					\
-	LOCK_CNT_INC(rw_locked_ ## NAME);					\
-	LOCK_CNT_INC(spin_locked);						\
-}										\
-										\
-/* This is helper function used by lock acquiring functions below         */	\
-static inline void read_ ## NAME ## _dec(void)					\
-{										\
-	LOCK_CNT_DEC(read_locked_ ## NAME);					\
-	LOCK_CNT_DEC(rw_locked_ ## NAME);					\
-	LOCK_CNT_DEC(spin_locked);						\
-}										\
-										\
-/* This is helper function used by lock acquiring functions below         */	\
-static inline void write_ ## NAME ## _inc(void)					\
-{										\
-	LOCK_CNT_INC(write_locked_ ## NAME);					\
-	LOCK_CNT_INC(rw_locked_ ## NAME);					\
-	LOCK_CNT_INC(spin_locked);						\
-}										\
-										\
-/* This is helper function used by lock acquiring functions below         */	\
-static inline void write_ ## NAME ## _dec(void)					\
-{										\
-	LOCK_CNT_DEC(write_locked_ ## NAME);					\
-	LOCK_CNT_DEC(rw_locked_ ## NAME);					\
-	LOCK_CNT_DEC(spin_locked);						\
-}										\
-										\
-/* Acquire read lock on @x without checking lock ordering predicates.     */	\
-/* This is useful when, for example, locking just created object.         */	\
-static inline void read_lock_ ## NAME ## _no_ord (TYPE *x)			\
-{										\
-	__ODCA("nikita-2976", rw_ ## NAME ## _is_not_read_locked(x));		\
-	read_lock(&x->FIELD.lock);						\
-	read_ ## NAME ## _inc();						\
-}										\
-										\
-/* Acquire write lock on @x without checking lock ordering predicates.    */	\
-/* This is useful when, for example, locking just created object.         */	\
-static inline void write_lock_ ## NAME ## _no_ord (TYPE *x)			\
-{										\
-	__ODCA("nikita-2977", rw_ ## NAME ## _is_not_write_locked(x));		\
-	write_lock(&x->FIELD.lock);						\
-	write_ ## NAME ## _inc();						\
-}										\
-										\
-/* Read lock @x with explicit indication of spin lock profiling "sites".  */	\
-/* See spin_lock_foo_at() above for more information.                     */	\
-static inline void read_lock_ ## NAME ## _at (TYPE *x) 				\
-{										\
-	__ODCA("nikita-2975", rw_ordering_pred_ ## NAME(x));			\
-	read_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Write lock @x with explicit indication of spin lock profiling "sites". */	\
-/* See spin_lock_foo_at() above for more information.                     */	\
-static inline void write_lock_ ## NAME ## _at (TYPE *x)				\
-{										\
-	__ODCA("nikita-2978", rw_ordering_pred_ ## NAME(x));			\
-	write_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Read lock @x.                                                          */	\
-static inline void read_lock_ ## NAME (TYPE *x)					\
-{										\
-	__ODCA("nikita-2975", rw_ordering_pred_ ## NAME(x));			\
-	read_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Write lock @x.                                                         */	\
-static inline void write_lock_ ## NAME (TYPE *x)				\
-{										\
-	__ODCA("nikita-2978", rw_ordering_pred_ ## NAME(x));			\
-	write_lock_ ## NAME ## _no_ord(x);					\
-}										\
-										\
-/* Release read lock on @x.                                               */	\
-static inline void read_unlock_ ## NAME (TYPE *x)				\
-{										\
-	__ODCA("nikita-2979", LOCK_CNT_GTZ(read_locked_ ## NAME));		\
-	__ODCA("nikita-2980", LOCK_CNT_GTZ(rw_locked_ ## NAME));		\
-	__ODCA("nikita-2980", LOCK_CNT_GTZ(spin_locked));			\
-	read_ ## NAME ## _dec();						\
-	__ODCA("nikita-2703", rw_ ## NAME ## _is_read_locked(x));		\
-	read_unlock (& x->FIELD.lock);						\
-}										\
-										\
-/* Release write lock on @x.                                              */	\
-static inline void write_unlock_ ## NAME (TYPE *x)				\
-{										\
-	__ODCA("nikita-2979", LOCK_CNT_GTZ(write_locked_ ## NAME));		\
-	__ODCA("nikita-2980", LOCK_CNT_GTZ(rw_locked_ ## NAME));		\
-	__ODCA("nikita-2980", LOCK_CNT_GTZ(spin_locked));			\
-	write_ ## NAME ## _dec();						\
-	__ODCA("nikita-2703", rw_ ## NAME ## _is_write_locked(x));		\
-	write_unlock (& x->FIELD.lock);						\
-}										\
-										\
-/* Try to obtain write lock on @x. On success, returns 1 with @x locked.  */	\
-/* If @x is already locked, return 0 immediately.                         */	\
-static inline int  write_trylock_ ## NAME (TYPE *x)				\
-{										\
-	if (write_trylock (& x->FIELD.lock)) {					\
-		write_ ## NAME ## _inc();					\
-		return 1;							\
-	}									\
-	return 0;								\
-}										\
-										\
-										\
-typedef struct { int foo; } NAME ## _rw_dummy
-
-/*
- * Helper macro to perform a simple operation that requires taking of read
- * write lock.
- *
- * 1. Acquire read or write (depending on @rw parameter) lock on object @obj
- * of type @obj_type.
- *
- * 2. Execute @exp under lock, and store result.
- *
- * 3. Release lock.
- *
- * 4. Return result of @exp.
- *
- * Example:
- *
- * tree_height = UNDER_RW(tree, current_tree, read, current_tree->height);
- */
-#define UNDER_RW(obj_type, obj, rw, exp)				\
-({									\
-	typeof (obj) __obj;						\
-	typeof (exp) __result;						\
-									\
-	__obj = (obj);							\
-	__ODCA("nikita-2981", __obj != NULL);				\
-	rw ## _lock_ ## obj_type ## _at (__obj);			\
-	__result = exp;							\
-	rw ## _unlock_ ## obj_type (__obj);				\
-	__result;							\
-})
-
-/*
- * The same as UNDER_RW, but without storing and returning @exp's result.
- */
-#define UNDER_RW_VOID(obj_type, obj, rw, exp)				\
-({									\
-	typeof (obj) __obj;						\
-									\
-	__obj = (obj);							\
-	__ODCA("nikita-2982", __obj != NULL);				\
-	rw ## _lock_ ## obj_type ## _at (__obj);			\
-	exp;								\
-	rw ## _unlock_ ## obj_type (__obj);				\
-})
-
-#define LOCK_JNODE(node) spin_lock_jnode(node)
-#define LOCK_JLOAD(node) spin_lock_jload(node)
-#define LOCK_ATOM(atom) spin_lock_atom(atom)
-#define LOCK_TXNH(txnh) spin_lock_txnh(txnh)
-#define LOCK_INODE(inode) spin_lock_inode_object(inode)
-#define RLOCK_TREE(tree) read_lock_tree(tree)
-#define WLOCK_TREE(tree) write_lock_tree(tree)
-#define RLOCK_DK(tree) read_lock_dk(tree)
-#define WLOCK_DK(tree) write_lock_dk(tree)
-#define RLOCK_ZLOCK(lock) read_lock_zlock(lock)
-#define WLOCK_ZLOCK(lock) write_lock_zlock(lock)
-
-#define UNLOCK_JNODE(node) spin_unlock_jnode(node)
-#define UNLOCK_JLOAD(node) spin_unlock_jload(node)
-#define UNLOCK_ATOM(atom) spin_unlock_atom(atom)
-#define UNLOCK_TXNH(txnh) spin_unlock_txnh(txnh)
-#define UNLOCK_INODE(inode) spin_unlock_inode_object(inode)
-#define RUNLOCK_TREE(tree) read_unlock_tree(tree)
-#define WUNLOCK_TREE(tree) write_unlock_tree(tree)
-#define RUNLOCK_DK(tree) read_unlock_dk(tree)
-#define WUNLOCK_DK(tree) write_unlock_dk(tree)
-#define RUNLOCK_ZLOCK(lock) read_unlock_zlock(lock)
-#define WUNLOCK_ZLOCK(lock) write_unlock_zlock(lock)
-
-/* __SPIN_MACROS_H__ */
-#endif
-
-/* Make Linus happy.
-   Local variables:
-   c-indentation-style: "K&R"
-   mode-name: "LC"
-   c-basic-offset: 8
-   tab-width: 8
-   fill-column: 120
-   scroll-step: 1
-   End:
-*/
diff -puN fs/reiser4/super.c~reiser4-spinlock-cleanup fs/reiser4/super.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/super.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.688986250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/super.c	2005-10-20 14:01:52.824994750 +0400
@@ -318,9 +318,9 @@ void inc_unalloc_unfm_ptr(void)
 	reiser4_super_info_data *sbinfo;
 
 	sbinfo = get_super_private(get_current_context()->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	sbinfo->unalloc_extent_pointers++;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 /* this is called when unallocated extent is converted to allocated */
@@ -329,10 +329,10 @@ void dec_unalloc_unfm_ptrs(int nr)
 	reiser4_super_info_data *sbinfo;
 
 	sbinfo = get_super_private(get_current_context()->super);
-	reiser4_spin_lock_sb(sbinfo);
+	spin_lock_reiser4_super(sbinfo);
 	BUG_ON(sbinfo->unalloc_extent_pointers < nr);
 	sbinfo->unalloc_extent_pointers -= nr;
-	reiser4_spin_unlock_sb(sbinfo);
+	spin_unlock_reiser4_super(sbinfo);
 }
 
 
diff -puN fs/reiser4/super.h~reiser4-spinlock-cleanup fs/reiser4/super.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/super.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.692986500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/super.h	2005-10-20 14:01:52.824994750 +0400
@@ -119,7 +119,7 @@ struct reiser4_super_info_data {
 	 * guard spinlock which protects reiser4 super block fields (currently
 	 * blocks_free, blocks_free_committed)
 	 */
-	reiser4_spin_data guard;
+	spinlock_t guard;
 
 	/* next oid that will be returned by oid_allocate() */
 	oid_t next_to_use;
@@ -220,7 +220,7 @@ struct reiser4_super_info_data {
 
 #if REISER4_USE_EFLUSH
 	/* see emergency_flush.c for details */
-	reiser4_spin_data eflush_guard;
+	spinlock_t eflush_guard;
 	/* number of emergency flushed nodes */
 	int eflushed;
 	/* hash table used by emergency flush. Protected by ->eflush_guard */
@@ -375,49 +375,17 @@ extern void build_object_ops(struct supe
 
 #define REISER4_SUPER_MAGIC 0x52345362	/* (*(__u32 *)"R4Sb"); */
 
-#define spin_ordering_pred_super(private) (1)
-SPIN_LOCK_FUNCTIONS(super, reiser4_super_info_data, guard);
-
-/*
- * lock reiser4-specific part of super block
- */
-static inline void reiser4_spin_lock_sb(reiser4_super_info_data * sbinfo)
+static inline void spin_lock_reiser4_super(reiser4_super_info_data *sbinfo)
 {
-	spin_lock_super(sbinfo);
+	spin_lock(&(sbinfo->guard));
 }
 
-/*
- * unlock reiser4-specific part of super block
- */
-static inline void reiser4_spin_unlock_sb(reiser4_super_info_data * sbinfo)
+static inline void spin_unlock_reiser4_super(reiser4_super_info_data *sbinfo)
 {
-	spin_unlock_super(sbinfo);
+	assert_spin_locked(&(sbinfo->guard));
+	spin_unlock(&(sbinfo->guard));
 }
 
-#if REISER4_USE_EFLUSH
-
-#define spin_ordering_pred_super_eflush(private) (1)
-SPIN_LOCK_FUNCTIONS(super_eflush, reiser4_super_info_data, eflush_guard);
-
-/*
- * lock emergency flush data-structures for super block @s
- */
-static inline void spin_lock_eflush(const struct super_block *s)
-{
-	reiser4_super_info_data *sbinfo = get_super_private(s);
-	spin_lock_super_eflush(sbinfo);
-}
-
-/*
- * unlock emergency flush data-structures for super block @s
- */
-static inline void spin_unlock_eflush(const struct super_block *s)
-{
-	reiser4_super_info_data *sbinfo = get_super_private(s);
-	spin_unlock_super_eflush(sbinfo);
-}
-#endif
-
 extern __u64 flush_reserved(const struct super_block *);
 extern int reiser4_is_set(const struct super_block *super, reiser4_fs_flag f);
 extern long statfs_type(const struct super_block *super);
diff -puN fs/reiser4/super_ops.c~reiser4-spinlock-cleanup fs/reiser4/super_ops.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/super_ops.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.696986750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/super_ops.c	2005-10-20 14:01:52.824994750 +0400
@@ -107,7 +107,7 @@ static struct inode *reiser4_alloc_inode
 		seal_init(&info->sd_seal, NULL, NULL);
 		coord_init_invalid(&info->sd_coord, NULL);
 		info->flags = 0;
-		spin_inode_object_init(info);
+		spin_lock_init(&info->guard);
 		/* this deals with info's loading semaphore */
 		loading_alloc(info);
 		info->vroot = UBER_TREE_ADDR;
diff -puN fs/reiser4/tree.c~reiser4-spinlock-cleanup fs/reiser4/tree.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/tree.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.700987000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/tree.c	2005-10-20 14:01:52.828995000 +0400
@@ -628,15 +628,15 @@ znode *child_znode(const coord_t * paren
 
 	assert("nikita-1374", parent_coord != NULL);
 	assert("nikita-1482", parent != NULL);
-	assert("nikita-1384", ergo(setup_dkeys_p,
-				   rw_dk_is_not_locked(znode_get_tree
-						       (parent))));
+#if REISER4_DEBUG
+	if (setup_dkeys_p)
+		assert_rw_not_locked(&(znode_get_tree(parent)->dk_lock));
+#endif
 	assert("nikita-2947", znode_is_any_locked(parent));
 
 	if (znode_get_level(parent) <= LEAF_LEVEL) {
 		/* trying to get child of leaf node */
 		warning("nikita-1217", "Child of maize?");
-		print_znode("node", parent);
 		return ERR_PTR(RETERR(-EIO));
 	}
 	if (item_is_internal(parent_coord)) {
@@ -659,7 +659,6 @@ znode *child_znode(const coord_t * paren
 			set_child_delimiting_keys(parent, parent_coord, child);
 	} else {
 		warning("nikita-1483", "Internal item expected");
-		print_znode("node", parent);
 		child = ERR_PTR(RETERR(-EIO));
 	}
 	return child;
@@ -702,7 +701,7 @@ static void uncapture_znode(znode * node
 			assert("zam-939", atom != NULL);
 			spin_unlock_znode(node);
 			flush_reserved2grabbed(atom, (__u64) 1);
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		} else
 			spin_unlock_znode(node);
 	} else {
@@ -750,7 +749,7 @@ static void uncapture_znode(znode * node
 		}
 
 		uncapture_block(ZJNODE(node));
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 		zput(node);
 	}
 }
@@ -770,7 +769,7 @@ void forget_znode(lock_handle * handle)
 
 	assert("vs-164", znode_is_write_locked(node));
 	assert("nikita-1280", ZF_ISSET(node, JNODE_HEARD_BANSHEE));
-	assert("nikita-3337", rw_zlock_is_locked(&node->lock));
+	assert_rw_locked(&(node->lock.guard));
 
 	/* We assume that this node was detached from its parent before
 	 * unlocking, it gives no way to reach this node from parent through a
@@ -780,10 +779,10 @@ void forget_znode(lock_handle * handle)
 	 * right neighbors.  In the next several lines we remove the node from
 	 * the sibling list. */
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	sibling_list_remove(node);
 	znode_remove(node, tree);
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 
 	/* Here we set JNODE_DYING and cancel all pending lock requests.  It
 	 * forces all lock requestor threads to repeat iterations of getting
@@ -895,23 +894,25 @@ int find_child_ptr(znode * parent /* par
 	 * not aliased to ->in_parent of some znode. Otherwise,
 	 * parent_coord_to_coord() below would modify data protected by tree
 	 * lock. */
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 	/* fast path. Try to use cached value. Lock tree to keep
 	   node->pos_in_parent and pos->*_blocknr consistent. */
 	if (child->in_parent.item_pos + 1 != 0) {
 		parent_coord_to_coord(&child->in_parent, result);
 		if (check_tree_pointer(result, child) == NS_FOUND) {
-			RUNLOCK_TREE(tree);
+			read_unlock_tree(tree);
 			return NS_FOUND;
 		}
 
 		child->in_parent.item_pos = (unsigned short)~0;
 	}
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	/* is above failed, find some key from @child. We are looking for the
 	   least key in a child. */
-	UNDER_RW_VOID(dk, tree, read, ld = *znode_get_ld_key(child));
+	read_lock_dk(tree);
+	ld = *znode_get_ld_key(child);
+	read_unlock_dk(tree);
 	/*
 	 * now, lookup parent with key just found. Note, that left delimiting
 	 * key doesn't identify node uniquely, because (in extremely rare
@@ -923,9 +924,9 @@ int find_child_ptr(znode * parent /* par
 	lookup_res = nplug->lookup(parent, &ld, FIND_EXACT, result);
 	/* update cached pos_in_node */
 	if (lookup_res == NS_FOUND) {
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 		coord_to_parent_coord(result, &child->in_parent);
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 		lookup_res = check_tree_pointer(result, child);
 	}
 	if (lookup_res == NS_NOT_FOUND)
@@ -954,9 +955,9 @@ static int find_child_by_addr(znode * pa
 
 	for_all_units(result, parent) {
 		if (check_tree_pointer(result, child) == NS_FOUND) {
-			UNDER_RW_VOID(tree, znode_get_tree(parent), write,
-				      coord_to_parent_coord(result,
-							    &child->in_parent));
+			write_lock_tree(znode_get_tree(parent));
+			coord_to_parent_coord(result, &child->in_parent);
+			write_unlock_tree(znode_get_tree(parent));
 			ret = NS_FOUND;
 			break;
 		}
@@ -1201,9 +1202,9 @@ prepare_twig_kill(carry_kill_data * kdat
 			case -E_NO_NEIGHBOR:
 				/* there is no formatted node to the right of
 				   from->node */
-				UNDER_RW_VOID(dk, tree, read,
-					      key =
-					      *znode_get_rd_key(from->node));
+				read_lock_dk(tree);
+				key = *znode_get_rd_key(from->node);
+				read_unlock_dk(tree);
 				right_coord.node = NULL;
 				result = 0;
 				break;
@@ -1472,10 +1473,10 @@ int delete_node(znode * node, reiser4_ke
 	   be zero). */
 
 	tree = znode_get_tree(node);
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	init_parent_coord(&node->in_parent, NULL);
 	--parent_lock.node->c_count;
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 
 	assert("zam-989", item_is_internal(&cut_from));
 
@@ -1495,8 +1496,8 @@ int delete_node(znode * node, reiser4_ke
 		reiser4_tree *tree = current_tree;
 		__u64 start_offset = 0, end_offset = 0;
 
-		RLOCK_TREE(tree);
-		WLOCK_DK(tree);
+		read_lock_tree(tree);
+		write_lock_dk(tree);
 		if (object) {
 			/* We use @smallest_removed and the left delimiting of
 			 * the current node for @object->i_blocks, i_bytes
@@ -1513,8 +1514,8 @@ int delete_node(znode * node, reiser4_ke
 
 		*smallest_removed = *znode_get_ld_key(node);
 
-		WUNLOCK_DK(tree);
-		RUNLOCK_TREE(tree);
+		write_unlock_dk(tree);
+		read_unlock_tree(tree);
 
 		if (object) {
 			/* we used to perform actions which are to be performed on items on their removal from tree in
@@ -1534,6 +1535,16 @@ int delete_node(znode * node, reiser4_ke
 	return ret;
 }
 
+static int can_delete(const reiser4_key *key, znode *node)
+{
+	int result;
+
+	read_lock_dk(current_tree);
+	result = keyle(key, znode_get_ld_key(node));
+	read_unlock_dk(current_tree);
+	return result;
+}
+
 /**
  * This subroutine is not optimal but implementation seems to
  * be easier).
@@ -1580,11 +1591,9 @@ cut_tree_worker_common(tap_t * tap, cons
 			break;
 		/* Check can we delete the node as a whole. */
 		if (*progress && znode_get_level(node) == LEAF_LEVEL &&
-		    UNDER_RW(dk, current_tree, read,
-			     keyle(from_key, znode_get_ld_key(node)))) {
-			result =
-			    delete_node(node, smallest_removed, object,
-					truncate);
+		    can_delete(from_key, node)) {
+			result = delete_node(node, smallest_removed, object,
+					     truncate);
 		} else {
 			result = tap_load(tap);
 			if (result)
@@ -1817,8 +1826,8 @@ cut_tree(reiser4_tree * tree, const reis
 void init_tree_0(reiser4_tree * tree)
 {
 	assert("zam-683", tree != NULL);
-	rw_tree_init(tree);
-	spin_epoch_init(tree);
+	rwlock_init(&tree->tree_lock);
+	spin_lock_init(&tree->epoch_lock);
 }
 
 /* finishing reiser4 initialization */
diff -puN fs/reiser4/tree.h~reiser4-spinlock-cleanup fs/reiser4/tree.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/tree.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.704987250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/tree.h	2005-10-20 14:01:52.828995000 +0400
@@ -8,7 +8,6 @@
 
 #include "forward.h"
 #include "debug.h"
-#include "spin_macros.h"
 #include "dformat.h"
 #include "plugin/node/node.h"
 #include "plugin/plugin.h"
@@ -59,7 +58,7 @@ typedef struct cbk_cache_slot {
 */
 typedef struct cbk_cache {
 	/* serializator */
-	reiser4_rw_data guard;
+	rwlock_t guard;
 	int nr_slots;
 	/* head of LRU list of cache slots */
 	struct list_head lru;
@@ -67,10 +66,6 @@ typedef struct cbk_cache {
 	cbk_cache_slot *slot;
 } cbk_cache;
 
-#define rw_ordering_pred_cbk_cache(cache) (1)
-
-/* defined read-write locking functions for cbk_cache */
-RW_LOCK_FUNCTIONS(cbk_cache, cbk_cache, guard);
 
 /* level_lookup_result - possible outcome of looking up key at some level.
    This is used by coord_by_key when traversing tree downward. */
@@ -139,13 +134,13 @@ struct reiser4_tree {
 	   4) SMP machines.  Current 4-ways machine test does not show that tree
 	   lock is contented and it is a bottleneck (2003.07.25). */
 
-	reiser4_rw_data tree_lock;
+	rwlock_t tree_lock;
 
 	/* lock protecting delimiting keys */
-	reiser4_rw_data dk_lock;
+	rwlock_t dk_lock;
 
 	/* spin lock protecting znode_epoch */
-	reiser4_spin_data epoch_lock;
+	spinlock_t epoch_lock;
 	/* version stamp used to mark znode updates. See seal.[ch] for more
 	 * information. */
 	__u64 znode_epoch;
@@ -165,9 +160,6 @@ struct reiser4_tree {
 	} carry;
 };
 
-#define spin_ordering_pred_epoch(tree) (1)
-SPIN_LOCK_FUNCTIONS(epoch, reiser4_tree, epoch_lock);
-
 extern void init_tree_0(reiser4_tree *);
 
 extern int init_tree(reiser4_tree * tree,
@@ -441,37 +433,126 @@ int lookup_couple(reiser4_tree * tree,
 		  tree_level lock_level, tree_level stop_level, __u32 flags,
 		  int *result1, int *result2);
 
-/* ordering constraint for tree spin lock: tree lock is "strongest" */
-#define rw_ordering_pred_tree(tree)			\
-	(lock_counters()->spin_locked_txnh == 0) &&	\
-	(lock_counters()->rw_locked_tree == 0) &&	\
-	(lock_counters()->rw_locked_dk == 0)
-
-/* Define spin_lock_tree, spin_unlock_tree, and spin_tree_is_locked:
-   spin lock protecting znode hash, and parent and sibling pointers. */
-RW_LOCK_FUNCTIONS(tree, reiser4_tree, tree_lock);
-
-/* ordering constraint for delimiting key spin lock: dk lock is weaker than
-   tree lock */
-#define rw_ordering_pred_dk( tree ) 1
-#if 0
-(lock_counters()->rw_locked_tree == 0) &&
-    (lock_counters()->spin_locked_jnode == 0) &&
-    (lock_counters()->rw_locked_zlock == 0) &&
-    (lock_counters()->spin_locked_txnh == 0) &&
-    (lock_counters()->spin_locked_atom == 0) &&
-    (lock_counters()->spin_locked_inode_object == 0) &&
-    (lock_counters()->spin_locked_txnmgr == 0)
-#endif
-/* Define spin_lock_dk(), spin_unlock_dk(), etc: locking for delimiting
-   keys. */
-    RW_LOCK_FUNCTIONS(dk, reiser4_tree, dk_lock);
 
-#if REISER4_DEBUG
-#define check_tree() print_tree_rec( "", current_tree, REISER4_TREE_CHECK )
-#else
-#define check_tree() noop
-#endif
+static inline void read_lock_tree(reiser4_tree *tree)
+{
+	/* check that tree is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_tree) &&
+		    LOCK_CNT_NIL(read_locked_tree) &&
+		    LOCK_CNT_NIL(write_locked_tree)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(spin_locked_txnh) &&
+		    LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(spin_locked_stack)));
+
+	read_lock(&(tree->tree_lock));
+
+	LOCK_CNT_INC(read_locked_tree);
+	LOCK_CNT_INC(rw_locked_tree);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void read_unlock_tree(reiser4_tree *tree)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(read_locked_tree));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(read_locked_tree);
+	LOCK_CNT_DEC(rw_locked_tree);
+	LOCK_CNT_DEC(spin_locked);
+
+	read_unlock(&(tree->tree_lock));
+}
+
+static inline void write_lock_tree(reiser4_tree *tree)
+{
+	/* check that tree is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_tree) &&
+		    LOCK_CNT_NIL(read_locked_tree) &&
+		    LOCK_CNT_NIL(write_locked_tree)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(spin_locked_txnh) &&
+		    LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(spin_locked_stack)));
+
+	write_lock(&(tree->tree_lock));
+
+	LOCK_CNT_INC(write_locked_tree);
+	LOCK_CNT_INC(rw_locked_tree);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void write_unlock_tree(reiser4_tree *tree)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(write_locked_tree));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(write_locked_tree);
+	LOCK_CNT_DEC(rw_locked_tree);
+	LOCK_CNT_DEC(spin_locked);
+
+	write_unlock(&(tree->tree_lock));
+}
+
+static inline void read_lock_dk(reiser4_tree *tree)
+{
+	/* check that dk is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(read_locked_dk) &&
+		    LOCK_CNT_NIL(write_locked_dk)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", LOCK_CNT_NIL(spin_locked_stack));
+
+	read_lock(&((tree)->dk_lock));
+
+	LOCK_CNT_INC(read_locked_dk);
+	LOCK_CNT_INC(rw_locked_dk);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void read_unlock_dk(reiser4_tree *tree)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(read_locked_dk));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(read_locked_dk);
+	LOCK_CNT_DEC(rw_locked_dk);
+	LOCK_CNT_DEC(spin_locked);
+
+	read_unlock(&(tree->dk_lock));
+}
+
+static inline void write_lock_dk(reiser4_tree *tree)
+{
+	/* check that dk is not locked */
+	assert("", (LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(read_locked_dk) &&
+		    LOCK_CNT_NIL(write_locked_dk)));
+	/* check that spinlocks of lower priorities are not held */
+	assert("", LOCK_CNT_NIL(spin_locked_stack));
+
+	write_lock(&((tree)->dk_lock));
+
+	LOCK_CNT_INC(write_locked_dk);
+	LOCK_CNT_INC(rw_locked_dk);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline void write_unlock_dk(reiser4_tree *tree)
+{
+	assert("nikita-1375", LOCK_CNT_GTZ(write_locked_dk));
+	assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(write_locked_dk);
+	LOCK_CNT_DEC(rw_locked_dk);
+	LOCK_CNT_DEC(spin_locked);
+
+	write_unlock(&(tree->dk_lock));
+}
 
 /* estimate api. Implementation is in estimate.c */
 reiser4_block_nr estimate_one_insert_item(reiser4_tree *);
@@ -482,13 +563,6 @@ reiser4_block_nr calc_estimate_one_inser
 reiser4_block_nr estimate_disk_cluster(struct inode *);
 reiser4_block_nr estimate_insert_cluster(struct inode *, int);
 
-/* take read or write tree lock, depending on @takeread argument */
-#define XLOCK_TREE(tree, takeread)				\
-	(takeread ? RLOCK_TREE(tree) : WLOCK_TREE(tree))
-
-/* release read or write tree lock, depending on @takeread argument */
-#define XUNLOCK_TREE(tree, takeread)				\
-	(takeread ? RUNLOCK_TREE(tree) : WUNLOCK_TREE(tree))
 
 /* __REISER4_TREE_H__ */
 #endif
diff -puN fs/reiser4/tree_mod.c~reiser4-spinlock-cleanup fs/reiser4/tree_mod.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/tree_mod.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.708987500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/tree_mod.c	2005-10-20 14:01:52.832995250 +0400
@@ -152,7 +152,7 @@ znode *add_tree_root(znode * old_root /*
 				znode_make_dirty(fake);
 
 				/* new root is a child of "fake" node */
-				WLOCK_TREE(tree);
+				write_lock_tree(tree);
 
 				++tree->height;
 
@@ -168,17 +168,17 @@ znode *add_tree_root(znode * old_root /*
 				 * balancing are connected after balancing is
 				 * done---useful invariant to check. */
 				sibling_list_insert_nolock(new_root, NULL);
-				WUNLOCK_TREE(tree);
+				write_unlock_tree(tree);
 
 				/* insert into new root pointer to the
 				   @old_root. */
 				assert("nikita-1110",
 				       WITH_DATA(new_root,
 						 node_is_empty(new_root)));
-				WLOCK_DK(tree);
+				write_lock_dk(tree);
 				znode_set_ld_key(new_root, min_key());
 				znode_set_rd_key(new_root, max_key());
-				WUNLOCK_DK(tree);
+				write_unlock_dk(tree);
 				if (REISER4_DEBUG) {
 					ZF_CLR(old_root, JNODE_LEFT_CONNECTED);
 					ZF_CLR(old_root, JNODE_RIGHT_CONNECTED);
@@ -234,7 +234,7 @@ static int add_child_ptr(znode * parent,
 	coord_t coord;
 	reiser4_item_data data;
 	int result;
-	reiser4_key *key;
+	reiser4_key key;
 
 	assert("nikita-1111", parent != NULL);
 	assert("nikita-1112", child != NULL);
@@ -250,10 +250,12 @@ static int add_child_ptr(znode * parent,
 	build_child_ptr_data(child, &data);
 	data.arg = NULL;
 
-	key =
-	    UNDER_RW(dk, znode_get_tree(parent), read, znode_get_ld_key(child));
-	result =
-	    node_plugin_by_node(parent)->create_item(&coord, key, &data, NULL);
+	read_lock_dk(znode_get_tree(parent));
+	key = *znode_get_ld_key(child);
+	read_unlock_dk(znode_get_tree(parent));
+
+	result = node_plugin_by_node(parent)->create_item(&coord, &key, &data,
+							  NULL);
 	znode_make_dirty(parent);
 	zrelse(parent);
 	return result;
@@ -293,7 +295,7 @@ static int kill_root(reiser4_tree * tree
 
 		/* don't take long term lock a @new_root. Take spinlock. */
 
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 
 		tree->root_block = *new_root_blk;
 		--tree->height;
@@ -309,7 +311,7 @@ static int kill_root(reiser4_tree * tree
 		++uber->c_count;
 
 		/* sibling_list_insert_nolock(new_root, NULL); */
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 
 		/* reinitialise old root. */
 		result = node_plugin_by_node(old_root)->init(old_root);
diff -puN fs/reiser4/tree_walk.c~reiser4-spinlock-cleanup fs/reiser4/tree_walk.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/tree_walk.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.712987750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/tree_walk.c	2005-10-20 14:01:52.832995250 +0400
@@ -66,7 +66,7 @@ static int lock_neighbor(
 
 	assert("umka-236", node != NULL);
 	assert("umka-237", tree != NULL);
-	assert("umka-301", rw_tree_is_locked(tree));
+	assert_rw_locked(&(tree->tree_lock));
 
 	if (flags & GN_TRY_LOCK)
 		req |= ZNODE_LOCK_NONBLOCK;
@@ -94,14 +94,14 @@ static int lock_neighbor(
 		/* protect it from deletion. */
 		zref(neighbor);
 
-		XUNLOCK_TREE(tree, rlocked);
+		rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree);
 
 		ret = longterm_lock_znode(result, neighbor, mode, req);
 
 		/* The lock handle obtains its own reference, release the one from above. */
 		zput(neighbor);
 
-		XLOCK_TREE(tree, rlocked);
+		rlocked ? read_lock_tree(tree) : write_lock_tree(tree);
 
 		/* restart if node we got reference to is being
 		   invalidated. we should not get reference to this node
@@ -118,22 +118,26 @@ static int lock_neighbor(
 
 		/* znode was locked by mistake; unlock it and restart locking
 		   process from beginning. */
-		XUNLOCK_TREE(tree, rlocked);
+		rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree);
 		longterm_unlock_znode(result);
-		XLOCK_TREE(tree, rlocked);
+		rlocked ? read_lock_tree(tree) : write_lock_tree(tree);
 	}
 }
 
 /* get parent node with longterm lock, accepts GN* flags. */
-int reiser4_get_parent_flags(lock_handle * result /* resulting lock handle */ ,
+int reiser4_get_parent_flags(lock_handle * lh /* resulting lock handle */ ,
 			     znode * node /* child node */ ,
 			     znode_lock_mode mode
 			     /* type of lock: read or write */ ,
 			     int flags /* GN_* flags */ )
 {
-	return UNDER_RW(tree, znode_get_tree(node), read,
-			lock_neighbor(result, node, PARENT_PTR_OFFSET, mode,
-				      ZNODE_LOCK_HIPRI, flags, 1));
+	int result;
+
+	read_lock_tree(znode_get_tree(node));
+	result = lock_neighbor(lh, node, PARENT_PTR_OFFSET, mode,
+			       ZNODE_LOCK_HIPRI, flags, 1);
+	read_unlock_tree(znode_get_tree(node));
+	return result;
 }
 
 /* wrapper function to lock right or left neighbor depending on GN_GO_LEFT
@@ -184,7 +188,7 @@ int check_sibling_list(znode * node)
 		return 1;
 
 	assert("nikita-3270", node != NULL);
-	assert("nikita-3269", rw_tree_is_write_locked(znode_get_tree(node)));
+	assert_rw_write_locked(&(znode_get_tree(node)->tree_lock));
 
 	for (scan = node; znode_is_left_connected(scan); scan = next) {
 		next = scan->left;
@@ -331,7 +335,7 @@ static int far_next_coord(coord_t * coor
 
 	node = handle->node;
 	tree = znode_get_tree(node);
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 
 	coord_init_zero(coord);
 
@@ -358,7 +362,7 @@ static int far_next_coord(coord_t * coor
 	      error_locked:
 		longterm_unlock_znode(handle);
 	}
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	return ret;
 }
 
@@ -385,12 +389,12 @@ renew_sibling_link(coord_t * coord, lock
 	assert("umka-247", child != NULL);
 	assert("umka-303", tree != NULL);
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	ret = far_next_coord(coord, handle, flags);
 
 	if (ret) {
 		if (ret != -ENOENT) {
-			WUNLOCK_TREE(tree);
+			write_unlock_tree(tree);
 			return ret;
 		}
 	} else {
@@ -407,11 +411,11 @@ renew_sibling_link(coord_t * coord, lock
 		iplug = item_plugin_by_coord(coord);
 		if (!item_is_internal(coord)) {
 			link_znodes(child, NULL, to_left);
-			WUNLOCK_TREE(tree);
+			write_unlock_tree(tree);
 			/* we know there can't be formatted neighbor */
 			return RETERR(-E_NO_NEIGHBOR);
 		}
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 
 		iplug->s.internal.down_link(coord, NULL, &da);
 
@@ -431,7 +435,7 @@ renew_sibling_link(coord_t * coord, lock
 			/* update delimiting keys */
 			set_child_delimiting_keys(coord->node, coord, neighbor);
 
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 	}
 
 	if (likely(neighbor == NULL ||
@@ -445,7 +449,7 @@ renew_sibling_link(coord_t * coord, lock
 		ret = RETERR(-EIO);
 	}
 
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 
 	/* if GN_NO_ALLOC isn't set we keep reference to neighbor znode */
 	if (neighbor != NULL && (flags & GN_NO_ALLOC))
@@ -526,21 +530,21 @@ int connect_znode(coord_t * parent_coord
 		return ret;
 
 	/* protect `connected' state check by tree_lock */
-	RLOCK_TREE(tree);
+	read_lock_tree(tree);
 
 	if (!znode_is_right_connected(child)) {
-		RUNLOCK_TREE(tree);
+		read_unlock_tree(tree);
 		/* connect right (default is right) */
 		ret = connect_one_side(parent_coord, child, GN_NO_ALLOC);
 		if (ret)
 			goto zrelse_and_ret;
 
-		RLOCK_TREE(tree);
+		read_lock_tree(tree);
 	}
 
 	ret = znode_is_left_connected(child);
 
-	RUNLOCK_TREE(tree);
+	read_unlock_tree(tree);
 
 	if (!ret) {
 		ret =
@@ -593,8 +597,9 @@ renew_neighbor(coord_t * coord, znode * 
 	   and reference to neighbor znode incremented */
 	neighbor = (flags & GN_GO_LEFT) ? node->left : node->right;
 
-	ret = UNDER_RW(tree, tree, read, znode_is_connected(neighbor));
-
+	read_lock_tree(tree);
+	ret = znode_is_connected(neighbor);
+	read_unlock_tree(tree);
 	if (ret) {
 		ret = 0;
 		goto out;
@@ -676,9 +681,9 @@ reiser4_get_neighbor(lock_handle * neigh
       again:
 	/* first, we try to use simple lock_neighbor() which requires sibling
 	   link existence */
-	ret = UNDER_RW(tree, tree, read,
-		       lock_side_neighbor(neighbor, node, lock_mode, flags, 1));
-
+	read_lock_tree(tree);
+	ret = lock_side_neighbor(neighbor, node, lock_mode, flags, 1);
+	read_unlock_tree(tree);
 	if (!ret) {
 		/* load znode content if it was specified */
 		if (flags & GN_LOAD_NEIGHBOR) {
@@ -797,10 +802,10 @@ void sibling_list_remove(znode * node)
 
 	tree = znode_get_tree(node);
 	assert("umka-255", node != NULL);
-	assert("zam-878", rw_tree_is_write_locked(tree));
+	assert_rw_write_locked(&(tree->tree_lock));
 	assert("nikita-3275", check_sibling_list(node));
 
-	WLOCK_DK(tree);
+	write_lock_dk(tree);
 	if (znode_is_right_connected(node) && node->right != NULL &&
 	    znode_is_left_connected(node) && node->left != NULL) {
 		assert("zam-32245",
@@ -808,7 +813,7 @@ void sibling_list_remove(znode * node)
 			     znode_get_ld_key(node->right)));
 		znode_set_rd_key(node->left, znode_get_ld_key(node->right));
 	}
-	WUNLOCK_DK(tree);
+	write_unlock_dk(tree);
 
 	if (znode_is_right_connected(node) && node->right != NULL) {
 		assert("zam-322", znode_is_left_connected(node->right));
diff -puN fs/reiser4/txnmgr.c~reiser4-spinlock-cleanup fs/reiser4/txnmgr.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/txnmgr.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.716988000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/txnmgr.c	2005-10-20 14:01:52.840995750 +0400
@@ -344,10 +344,8 @@ void init_txnmgr(txn_mgr *mgr)
 
 	mgr->atom_count = 0;
 	mgr->id_count = 1;
-
 	INIT_LIST_HEAD(&mgr->atoms_list);
-	spin_txnmgr_init(mgr);
-
+	spin_lock_init(&mgr->tmgr_lock);
 	sema_init(&mgr->commit_semaphore, 1);
 }
 
@@ -373,9 +371,7 @@ static void txnh_init(txn_handle * txnh,
 	txnh->mode = mode;
 	txnh->atom = NULL;
 	txnh->flags = 0;
-
-	spin_txnh_init(txnh);
-
+	spin_lock_init(&txnh->hlock);
 	INIT_LIST_HEAD(&txnh->txnh_link);
 }
 
@@ -384,7 +380,8 @@ static void txnh_init(txn_handle * txnh,
 static int txnh_isclean(txn_handle * txnh)
 {
 	assert("umka-172", txnh != NULL);
-	return txnh->atom == NULL && spin_txnh_is_not_locked(txnh);
+	return txnh->atom == NULL &&
+		LOCK_CNT_NIL(spin_locked_txnh);
 }
 #endif
 
@@ -407,7 +404,7 @@ static void atom_init(txn_atom * atom)
 	INIT_LIST_HEAD(ATOM_OVRWR_LIST(atom));
 	INIT_LIST_HEAD(ATOM_WB_LIST(atom));
 	INIT_LIST_HEAD(&atom->inodes);
-	spin_atom_init(atom);
+	spin_lock_init(&atom->alock);
 	/* list of transaction handles */
 	INIT_LIST_HEAD(&atom->txnh_list);
 	/* link to transaction manager's list of atoms */
@@ -524,10 +521,10 @@ static txn_atom *txnh_get_atom(txn_handl
 	txn_atom *atom;
 
 	assert("umka-180", txnh != NULL);
-	assert("jmacd-5108", spin_txnh_is_not_locked(txnh));
+	assert_spin_not_locked(&(txnh->hlock));
 
 	while (1) {
-		LOCK_TXNH(txnh);
+		spin_lock_txnh(txnh);
 		atom = txnh->atom;
 
 		if (atom == NULL)
@@ -538,16 +535,16 @@ static txn_atom *txnh_get_atom(txn_handl
 
 		atomic_inc(&atom->refcount);
 
-		UNLOCK_TXNH(txnh);
-		LOCK_ATOM(atom);
-		LOCK_TXNH(txnh);
+		spin_unlock_txnh(txnh);
+		spin_lock_atom(atom);
+		spin_lock_txnh(txnh);
 
 		if (txnh->atom == atom) {
 			atomic_dec(&atom->refcount);
 			break;
 		}
 
-		UNLOCK_TXNH(txnh);
+		spin_unlock_txnh(txnh);
 		atom_dec_and_unlock(atom);
 	}
 
@@ -569,7 +566,7 @@ txn_atom *get_current_atom_locked_nochec
 
 	atom = txnh_get_atom(txnh);
 
-	UNLOCK_TXNH(txnh);
+	spin_unlock_txnh(txnh);
 	return atom;
 }
 
@@ -584,7 +581,7 @@ txn_atom *jnode_get_atom(jnode * node)
 	assert("umka-181", node != NULL);
 
 	while (1) {
-		assert("jmacd-5108", spin_jnode_is_locked(node));
+		assert_spin_locked(&(node->guard));
 
 		atom = node->atom;
 		/* node is not in any atom */
@@ -598,11 +595,11 @@ txn_atom *jnode_get_atom(jnode * node)
 		/* At least one jnode belongs to this atom it guarantees that
 		 * atom->refcount > 0, we can safely increment refcount. */
 		atomic_inc(&atom->refcount);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		/* re-acquire spin locks in the right order */
-		LOCK_ATOM(atom);
-		LOCK_JNODE(node);
+		spin_lock_atom(atom);
+		spin_lock_jnode(node);
 
 		/* check if node still points to the same atom. */
 		if (node->atom == atom) {
@@ -612,7 +609,7 @@ txn_atom *jnode_get_atom(jnode * node)
 
 		/* releasing of atom lock and reference requires not holding
 		 * locks on jnodes.  */
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 
 		/* We do not sure that this atom has extra references except our
 		 * one, so we should call proper function which may free atom if
@@ -621,7 +618,7 @@ txn_atom *jnode_get_atom(jnode * node)
 
 		/* lock jnode again for getting valid node->atom pointer
 		 * value. */
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 	}
 
 	return atom;
@@ -650,14 +647,14 @@ same_slum_check(jnode * node, jnode * ch
 	   check->atom) because atom could be locked and being fused at that
 	   moment, jnodes of the atom of that state (being fused) can point to
 	   different objects, but the atom is the same. */
-	LOCK_JNODE(check);
+	spin_lock_jnode(check);
 
 	atom = jnode_get_atom(check);
 
 	if (atom == NULL) {
 		compat = 0;
 	} else {
-		compat = (node->atom == atom && jnode_is_dirty(check));
+		compat = (node->atom == atom && JF_ISSET(check, JNODE_DIRTY));
 
 		if (compat && jnode_is_znode(check)) {
 			compat &= znode_is_connected(JZNODE(check));
@@ -667,10 +664,10 @@ same_slum_check(jnode * node, jnode * ch
 			compat &= (alloc_value == jnode_is_flushprepped(check));
 		}
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 
-	UNLOCK_JNODE(check);
+	spin_unlock_jnode(check);
 
 	return compat;
 }
@@ -681,7 +678,7 @@ void atom_dec_and_unlock(txn_atom * atom
 	txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr;
 
 	assert("umka-186", atom != NULL);
-	assert("jmacd-1071", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("zam-1039", atomic_read(&atom->refcount) > 0);
 
 	if (atomic_dec_and_test(&atom->refcount)) {
@@ -690,21 +687,21 @@ void atom_dec_and_unlock(txn_atom * atom
 			/* This atom should exist after we re-acquire its
 			 * spinlock, so we increment its reference counter. */
 			atomic_inc(&atom->refcount);
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 			spin_lock_txnmgr(mgr);
-			LOCK_ATOM(atom);
+			spin_lock_atom(atom);
 
 			if (!atomic_dec_and_test(&atom->refcount)) {
-				UNLOCK_ATOM(atom);
+				spin_unlock_atom(atom);
 				spin_unlock_txnmgr(mgr);
 				return;
 			}
 		}
-		assert("nikita-2656", spin_txnmgr_is_locked(mgr));
+		assert_spin_locked(&(mgr->tmgr_lock));
 		atom_free(atom);
 		spin_unlock_txnmgr(mgr);
 	} else
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 }
 
 /* Create new atom and connect it to given transaction handle.  This adds the
@@ -733,14 +730,14 @@ static int atom_begin_and_assign_to_txnh
 	   locks. */
 	mgr = &get_super_private(reiser4_get_current_sb())->tmgr;
 	spin_lock_txnmgr(mgr);
-	LOCK_TXNH(txnh);
+	spin_lock_txnh(txnh);
 
 	/* Check whether new atom still needed */
 	if (txnh->atom != NULL) {
 		/* NOTE-NIKITA probably it is rather better to free
 		 * atom_alloc here than thread it up to try_capture(). */
 
-		UNLOCK_TXNH(txnh);
+		spin_unlock_txnh(txnh);
 		spin_unlock_txnmgr(mgr);
 
 		return -E_REPEAT;
@@ -753,9 +750,11 @@ static int atom_begin_and_assign_to_txnh
 
 	assert("jmacd-17", atom_isclean(atom));
 
-	/* Take the atom and txnmgr lock. No checks for lock ordering, because
-	   @atom is new and inaccessible for others. */
-	spin_lock_atom_no_ord(atom);
+        /*
+	 * do not use spin_lock_atom because we have broken lock ordering here
+	 * which is ok, as long as @atom is new and inaccessible for others.
+	 */
+	spin_lock(&(atom->alock));
 
 	/* add atom to the end of transaction manager's list of atoms */
 	list_add_tail(&atom->atom_link, &mgr->atoms_list);
@@ -771,8 +770,8 @@ static int atom_begin_and_assign_to_txnh
 	atom->super = reiser4_get_current_sb();
 	capture_assign_txnh_nolock(atom, txnh);
 
-	UNLOCK_ATOM(atom);
-	UNLOCK_TXNH(txnh);
+	spin_unlock(&(atom->alock));
+	spin_unlock_txnh(txnh);
 
 	return -E_REPEAT;
 }
@@ -806,10 +805,10 @@ static void atom_free(txn_atom * atom)
 	txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr;
 
 	assert("umka-188", atom != NULL);
-	assert("jmacd-18", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	/* Remove from the txn_mgr's atom list */
-	assert("nikita-2657", spin_txnmgr_is_locked(mgr));
+	assert_spin_locked(&(mgr->tmgr_lock));
 	mgr->atom_count -= 1;
 	list_del_init(&atom->atom_link);
 
@@ -823,7 +822,7 @@ static void atom_free(txn_atom * atom)
 
 	assert("jmacd-16", atom_isclean(atom));
 
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	kmem_cache_free(_atom_slab, atom);
 }
@@ -836,7 +835,7 @@ static int atom_is_dotard(const txn_atom
 
 static int atom_can_be_committed(txn_atom * atom)
 {
-	assert("zam-884", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("zam-885", atom->txnh_count > atom->nr_waiters);
 	return atom->txnh_count == atom->nr_waiters + 1;
 }
@@ -862,7 +861,7 @@ int current_atom_should_commit(void)
 	atom = get_current_atom_locked_nocheck();
 	if (atom) {
 		result = atom_should_commit(atom);
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 	return result;
 }
@@ -906,7 +905,7 @@ jnode *find_first_dirty_jnode(txn_atom *
 	jnode *first_dirty;
 	tree_level level;
 
-	assert("zam-753", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	/* The flush starts from LEAF_LEVEL (=1). */
 	for (level = 1; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) {
@@ -940,7 +939,7 @@ static void dispatch_wb_list(txn_atom * 
 	jnode *cur;
 	int total, moved;
 
-	assert("zam-905", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	total = 0;
 	moved = 0;
@@ -998,7 +997,7 @@ static void dispatch_wb_list(txn_atom * 
 	while (ATOM_WB_LIST(atom) != &cur->capture_link) {
 		jnode *next = list_entry(cur->capture_link.next, jnode, capture_link);
 
-		LOCK_JNODE(cur);
+		spin_lock_jnode(cur);
 		if (!JF_ISSET(cur, JNODE_WRITEBACK)) {
 			if (JF_ISSET(cur, JNODE_DIRTY)) {
 				queue_jnode(fq, cur);
@@ -1009,7 +1008,7 @@ static void dispatch_wb_list(txn_atom * 
 					      ATOM_CLEAN_LIST(atom));
 			}
 		}
-		UNLOCK_JNODE(cur);
+		spin_unlock_jnode(cur);
 
 		cur = next;
 	}
@@ -1029,7 +1028,7 @@ static int submit_wb_list(void)
 		return PTR_ERR(fq);
 
 	dispatch_wb_list(fq->atom, fq);
-	UNLOCK_ATOM(fq->atom);
+	spin_unlock_atom(fq->atom);
 
 	ret = write_fq(fq, NULL, 1);
 	fq_put(fq);
@@ -1087,7 +1086,7 @@ static int commit_current_atom(long *nr_
 	int flushiters;
 
 	assert("zam-888", atom != NULL && *atom != NULL);
-	assert("zam-886", spin_atom_is_locked(*atom));
+	assert_spin_locked(&((*atom)->alock));
 	assert("zam-887", get_current_context()->trans->atom == *atom);
 	assert("jmacd-151", atom_isopen(*atom));
 
@@ -1121,10 +1120,10 @@ static int commit_current_atom(long *nr_
 	if (ret)
 		return ret;
 
-	assert("zam-882", spin_atom_is_locked(*atom));
+	assert_spin_locked(&((*atom)->alock));
 
 	if (!atom_can_be_committed(*atom)) {
-		UNLOCK_ATOM(*atom);
+		spin_unlock_atom(*atom);
 		return RETERR(-E_REPEAT);
 	}
 
@@ -1136,7 +1135,7 @@ static int commit_current_atom(long *nr_
 	   at this point, commit should be successful. */
 	atom_set_stage(*atom, ASTAGE_PRE_COMMIT);
 	ON_DEBUG(((*atom)->committer = current));
-	UNLOCK_ATOM(*atom);
+	spin_unlock_atom(*atom);
 
 	ret = current_atom_complete_writes();
 	if (ret)
@@ -1164,8 +1163,8 @@ static int commit_current_atom(long *nr_
 	invalidate_list(ATOM_WB_LIST(*atom));
 	assert("zam-927", list_empty(&(*atom)->inodes));
 
-	LOCK_ATOM(*atom);
-	done:
+	spin_lock_atom(*atom);
+ done:
 	atom_set_stage(*atom, ASTAGE_DONE);
 	ON_DEBUG((*atom)->committer = NULL);
 
@@ -1180,7 +1179,7 @@ static int commit_current_atom(long *nr_
 	assert("jmacd-1070", atomic_read(&(*atom)->refcount) > 0);
 	assert("jmacd-1062", (*atom)->capture_count == 0);
 	BUG_ON((*atom)->capture_count != 0);
-	assert("jmacd-1071", spin_atom_is_locked(*atom));
+	assert_spin_locked(&((*atom)->alock));
 
 	return ret;
 }
@@ -1194,21 +1193,21 @@ static int force_commit_atom_nolock(txn_
 	txn_atom *atom;
 
 	assert("zam-837", txnh != NULL);
-	assert("zam-835", spin_txnh_is_locked(txnh));
+	assert_spin_locked(&(txnh->hlock));
 	assert("nikita-2966", lock_stack_isclean(get_current_lock_stack()));
 
 	atom = txnh->atom;
 
 	assert("zam-834", atom != NULL);
-	assert("zam-836", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	/* Set flags for atom and txnh: forcing atom commit and waiting for
 	 * commit completion */
 	txnh->flags |= TXNH_WAIT_COMMIT;
 	atom->flags |= ATOM_FORCE_COMMIT;
 
-	UNLOCK_TXNH(txnh);
-	UNLOCK_ATOM(atom);
+	spin_unlock_txnh(txnh);
+	spin_unlock_atom(atom);
 
 	txn_restart_current();
 	return 0;
@@ -1240,7 +1239,7 @@ int txnmgr_force_commit_all(struct super
 	spin_lock_txnmgr(mgr);
 
 	list_for_each_entry(atom, &mgr->atoms_list, atom_link) {
-		LOCK_ATOM(atom);
+		spin_lock_atom(atom);
 
 		/* Commit any atom which can be committed.  If @commit_new_atoms
 		 * is not set we commit only atoms which were created before
@@ -1251,7 +1250,7 @@ int txnmgr_force_commit_all(struct super
 				spin_unlock_txnmgr(mgr);
 
 				if (atom->stage < ASTAGE_PRE_COMMIT) {
-					LOCK_TXNH(txnh);
+					spin_lock_txnh(txnh);
 					/* Add force-context txnh */
 					capture_assign_txnh_nolock(atom, txnh);
 					ret = force_commit_atom_nolock(txnh);
@@ -1265,17 +1264,17 @@ int txnmgr_force_commit_all(struct super
 			}
 		}
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 
 #if REISER4_DEBUG
 	if (commit_all_atoms) {
 		reiser4_super_info_data *sbinfo = get_super_private(super);
-		reiser4_spin_lock_sb(sbinfo);
+		spin_lock_reiser4_super(sbinfo);
 		assert("zam-813",
 		       sbinfo->blocks_fake_allocated_unformatted == 0);
 		assert("zam-812", sbinfo->blocks_fake_allocated == 0);
-		reiser4_spin_unlock_sb(sbinfo);
+		spin_unlock_reiser4_super(sbinfo);
 	}
 #endif
 
@@ -1324,10 +1323,10 @@ int commit_some_atoms(txn_mgr * mgr)
 		 */
 		if (atom_is_committable(atom)) {
 			/* now, take spin lock and re-check */
-			LOCK_ATOM(atom);
+			spin_lock_atom(atom);
 			if (atom_is_committable(atom))
 				break;
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
 	}
 
@@ -1340,7 +1339,7 @@ int commit_some_atoms(txn_mgr * mgr)
 		return 0;
 	}
 
-	LOCK_TXNH(txnh);
+	spin_lock_txnh(txnh);
 
 	BUG_ON(atom == NULL);
 	/* Set the atom to force committing */
@@ -1349,8 +1348,8 @@ int commit_some_atoms(txn_mgr * mgr)
 	/* Add force-context txnh */
 	capture_assign_txnh_nolock(atom, txnh);
 
-	UNLOCK_TXNH(txnh);
-	UNLOCK_ATOM(atom);
+	spin_unlock_txnh(txnh);
+	spin_unlock_atom(atom);
 
 	/* we are about to release daemon spin lock, notify daemon it
 	   has to rescan atoms */
@@ -1372,9 +1371,9 @@ static int txn_try_to_fuse_small_atom(tx
 	repeat = 0;
 
 	if (!spin_trylock_txnmgr(tmgr)) {
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 		spin_lock_txnmgr(tmgr);
-		LOCK_ATOM(atom);
+		spin_lock_atom(atom);
 		repeat = 1;
 		if (atom->stage != atom_stage)
 			goto out;
@@ -1394,14 +1393,14 @@ static int txn_try_to_fuse_small_atom(tx
 				/* all locks are lost we can only repeat here */
 				return -E_REPEAT;
 			}
-			UNLOCK_ATOM(atom_2);
+			spin_unlock_atom(atom_2);
 		}
 	}
 	atom->flags |= ATOM_CANCEL_FUSION;
       out:
 	spin_unlock_txnmgr(tmgr);
 	if (repeat) {
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 		return -E_REPEAT;
 	}
 	return 0;
@@ -1441,7 +1440,7 @@ flush_some_atom(jnode * start, long *nr_
 		/* traverse the list of all atoms */
 		list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
 			/* lock atom before checking its state */
-			LOCK_ATOM(atom);
+			spin_lock_atom(atom);
 
 			/*
 			 * we need an atom which is not being committed and
@@ -1450,14 +1449,14 @@ flush_some_atom(jnode * start, long *nr_
 			 */
 			if (atom->stage < ASTAGE_PRE_COMMIT &&
 			    atom->nr_flushers == 0) {
-				LOCK_TXNH(txnh);
+				spin_lock_txnh(txnh);
 				capture_assign_txnh_nolock(atom, txnh);
-				UNLOCK_TXNH(txnh);
+				spin_unlock_txnh(txnh);
 
 				goto found;
 			}
 
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 		}
 
 		/*
@@ -1466,13 +1465,13 @@ flush_some_atom(jnode * start, long *nr_
 		 */
 		if (!current_is_pdflush() && !wbc->nonblocking) {
 			list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
-				LOCK_ATOM(atom);
+				spin_lock_atom(atom);
 				/* Repeat the check from the above. */
 				if (atom->stage < ASTAGE_PRE_COMMIT
 				    && atom->nr_flushers == 0) {
-					LOCK_TXNH(txnh);
+					spin_lock_txnh(txnh);
 					capture_assign_txnh_nolock(atom, txnh);
-					UNLOCK_TXNH(txnh);
+					spin_unlock_txnh(txnh);
 
 					goto found;
 				}
@@ -1486,7 +1485,7 @@ flush_some_atom(jnode * start, long *nr_
 					atom_wait_event(atom);
 					goto repeat;
 				}
-				UNLOCK_ATOM(atom);
+				spin_unlock_atom(atom);
 			}
 		}
 		spin_unlock_txnmgr(tmgr);
@@ -1521,7 +1520,7 @@ flush_some_atom(jnode * start, long *nr_
 				txnh->flags |= TXNH_WAIT_COMMIT;
 			atom->flags |= ATOM_FORCE_COMMIT;
 		}
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	} else if (ret == -E_REPEAT) {
 		if (*nr_submitted == 0) {
 			/* let others who hampers flushing (hold longterm locks,
@@ -1556,12 +1555,12 @@ void invalidate_list(capture_list_head *
 		spin_unlock(&scan_lock);
 
 		atom = node->atom;
-		LOCK_ATOM(atom);
+		spin_lock_atom(atom);
 		LOCK_JNODE(node);
 		if (JF_ISSET(node, JNODE_CC) && node->pg)
 			page_cache_release(node->pg);
 		uncapture_block(node);
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 		JF_CLR(node, JNODE_SCANNED);
 		jput(node);
 
@@ -1579,7 +1578,7 @@ void invalidate_list(struct list_head *h
 		jnode *node;
 
 		node = list_entry(head->next, jnode, capture_link);
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		uncapture_block(node);
 		jput(node);
 	}
@@ -1601,7 +1600,7 @@ void atom_wait_event(txn_atom * atom)
 {
 	txn_wait_links _wlinks;
 
-	assert("zam-744", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("nikita-3156",
 	       lock_stack_isclean(get_current_lock_stack()) ||
 	       atom->nr_running_queues > 0);
@@ -1609,12 +1608,12 @@ void atom_wait_event(txn_atom * atom)
 	init_wlinks(&_wlinks);
 	list_add_tail(&_wlinks._fwaitfor_link, &atom->fwaitfor_list);
 	atomic_inc(&atom->refcount);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	prepare_to_sleep(_wlinks._lock_stack);
 	go_to_sleep(_wlinks._lock_stack);
 
-	LOCK_ATOM(atom);
+	spin_lock_atom(atom);
 	list_del(&_wlinks._fwaitfor_link);
 	atom_dec_and_unlock(atom);
 }
@@ -1622,7 +1621,7 @@ void atom_wait_event(txn_atom * atom)
 void atom_set_stage(txn_atom * atom, txn_stage stage)
 {
 	assert("nikita-3535", atom != NULL);
-	assert("nikita-3538", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	assert("nikita-3536", ASTAGE_FREE <= stage && stage <= ASTAGE_INVALID);
 	/* Excelsior! */
 	assert("nikita-3537", stage >= atom->stage);
@@ -1635,7 +1634,7 @@ void atom_set_stage(txn_atom * atom, txn
 /* wake all threads which wait for an event */
 void atom_send_event(txn_atom * atom)
 {
-	assert("zam-745", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 	wakeup_atom_waitfor_list(atom);
 }
 
@@ -1675,7 +1674,7 @@ static int try_commit_txnh(commit_data *
 	/* Get the atom and txnh locked. */
 	cd->atom = txnh_get_atom(cd->txnh);
 	assert("jmacd-309", cd->atom != NULL);
-	UNLOCK_TXNH(cd->txnh);
+	spin_unlock_txnh(cd->txnh);
 
 	if (cd->wait) {
 		cd->atom->nr_waiters--;
@@ -1751,7 +1750,7 @@ static int try_commit_txnh(commit_data *
 						    LONG_MAX, &cd->nr_written,
 						    &cd->atom, NULL);
 			if (result == 0) {
-				UNLOCK_ATOM(cd->atom);
+				spin_unlock_atom(cd->atom);
 				cd->preflush = 0;
 				result = RETERR(-E_REPEAT);
 			} else	/* Atoms wasn't flushed
@@ -1772,7 +1771,11 @@ static int try_commit_txnh(commit_data *
 	} else
 		result = 0;
 
-	assert("jmacd-1027", ergo(result == 0, spin_atom_is_locked(cd->atom)));
+#if REISER4_DEBUG
+	if (result == 0)
+		assert_spin_locked(&(cd->atom->alock));
+#endif
+
 	/* perfectly valid assertion, except that when atom/txnh is not locked
 	 * fusion can take place, and cd->atom points nowhere. */
 	/*
@@ -1798,15 +1801,14 @@ static int commit_txnh(txn_handle * txnh
 	while (try_commit_txnh(&cd) != 0)
 		preempt_point();
 
-	assert("nikita-3171", spin_txnh_is_not_locked(txnh));
-	LOCK_TXNH(txnh);
+	spin_lock_txnh(txnh);
 
 	cd.atom->txnh_count -= 1;
 	txnh->atom = NULL;
 	/* remove transaction handle from atom's list of transaction handles */
 	list_del_init(&txnh->txnh_link);
 
-	UNLOCK_TXNH(txnh);
+	spin_unlock_txnh(txnh);
 	atom_dec_and_unlock(cd.atom);
 	/* if we don't want to do a commit (TXNH_DONT_COMMIT is set, probably
 	 * because it takes time) by current thread, we do that work
@@ -1873,18 +1875,18 @@ try_capture_block(txn_handle * txnh, jno
 	assert("umka-195", node != NULL);
 
 	/* The jnode is already locked!  Being called from try_capture(). */
-	assert("jmacd-567", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	block_atom = node->atom;
 
 	/* Get txnh spinlock, this allows us to compare txn_atom pointers but it doesn't
 	   let us touch the atoms themselves. */
-	LOCK_TXNH(txnh);
+	spin_lock_txnh(txnh);
 
 	txnh_atom = txnh->atom;
 
 	if (txnh_atom != NULL && block_atom == txnh_atom) {
-		UNLOCK_TXNH(txnh);
+		spin_unlock_txnh(txnh);
 		return 0;
 	}
 	/* NIKITA-HANS: nothing */
@@ -1928,17 +1930,14 @@ try_capture_block(txn_handle * txnh, jno
 			if (ret) {
 				JF_SET(node, JNODE_MISSED_IN_CAPTURE);
 
-				assert("zam-687",
-				       spin_txnh_is_not_locked(txnh));
-				assert("zam-688",
-				       spin_jnode_is_not_locked(node));
-
+				assert_spin_not_locked(&(txnh->hlock));
+				assert_spin_not_locked(&(node->guard));
 				return ret;
 			} else
 				JF_CLR(node, JNODE_MISSED_IN_CAPTURE);
 
-			assert("zam-701", spin_txnh_is_locked(txnh));
-			assert("zam-702", spin_jnode_is_locked(node));
+			assert_spin_locked(&(txnh->hlock));
+			assert_spin_locked(&(node->guard));
 		}
 	}
 
@@ -1952,10 +1951,8 @@ try_capture_block(txn_handle * txnh, jno
 			ret = capture_assign_txnh(node, txnh, mode, can_coc);
 			if (ret != 0) {
 				/* E_REPEAT or otherwise */
-				assert("jmacd-6129",
-				       spin_txnh_is_not_locked(txnh));
-				assert("jmacd-6130",
-				       spin_jnode_is_not_locked(node));
+				assert_spin_not_locked(&(txnh->hlock));
+				assert_spin_not_locked(&(node->guard));
 				return ret;
 			}
 
@@ -1963,8 +1960,8 @@ try_capture_block(txn_handle * txnh, jno
 			   granted because the block is committing.  Locks still held. */
 		} else {
 			if (mode & TXN_CAPTURE_DONT_FUSE) {
-				UNLOCK_TXNH(txnh);
-				UNLOCK_JNODE(node);
+				spin_unlock_txnh(txnh);
+				spin_unlock_jnode(node);
 				/* we are in a "no-fusion" mode and @node is
 				 * already part of transaction. */
 				return RETERR(-E_NO_NEIGHBOR);
@@ -1973,10 +1970,8 @@ try_capture_block(txn_handle * txnh, jno
 			   returns -E_REPEAT on successful fusion, 0 on the fall-through case. */
 			ret = capture_init_fusion(node, txnh, mode, can_coc);
 			if (ret != 0) {
-				assert("jmacd-6131",
-				       spin_txnh_is_not_locked(txnh));
-				assert("jmacd-6132",
-				       spin_jnode_is_not_locked(node));
+				assert_spin_not_locked(&(txnh->hlock));
+				assert_spin_not_locked(&(node->guard));
 				return ret;
 			}
 
@@ -1993,10 +1988,8 @@ try_capture_block(txn_handle * txnh, jno
 			ret = capture_assign_block(txnh, node);
 			if (ret != 0) {
 				/* E_REPEAT or otherwise */
-				assert("jmacd-6133",
-				       spin_txnh_is_not_locked(txnh));
-				assert("jmacd-6134",
-				       spin_jnode_is_not_locked(node));
+				assert_spin_not_locked(&(txnh->hlock));
+				assert_spin_not_locked(&(node->guard));
 				return ret;
 			}
 
@@ -2006,8 +1999,8 @@ try_capture_block(txn_handle * txnh, jno
 
 			/* In this case, neither txnh nor page are assigned to
 			 * an atom. */
-			UNLOCK_JNODE(node);
-			UNLOCK_TXNH(txnh);
+			spin_unlock_jnode(node);
+			spin_unlock_txnh(txnh);
 			return atom_begin_and_assign_to_txnh(atom_alloc, txnh);
 		}
 
@@ -2018,11 +2011,11 @@ try_capture_block(txn_handle * txnh, jno
 	}
 
 	/* Successful case: both jnode and txnh are still locked. */
-	assert("jmacd-740", spin_txnh_is_locked(txnh));
-	assert("jmacd-741", spin_jnode_is_locked(node));
+	assert_spin_locked(&(txnh->hlock));
+	assert_spin_locked(&(node->guard));
 
 	/* Release txnh lock, return with the jnode still locked. */
-	UNLOCK_TXNH(txnh);
+	spin_unlock_txnh(txnh);
 
 	return 0;
 }
@@ -2032,7 +2025,7 @@ build_capture_mode(jnode * node, znode_l
 {
 	txn_capture cap_mode;
 
-	assert("nikita-3187", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	/* FIXME_JMACD No way to set TXN_CAPTURE_READ_MODIFY yet. */
 
@@ -2091,7 +2084,7 @@ try_capture(jnode * node, znode_lock_mod
 #endif
 	int ret;
 
-	assert("jmacd-604", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
       repeat:
 	cap_mode = build_capture_mode(node, lock_mode, flags);
@@ -2111,15 +2104,18 @@ try_capture(jnode * node, znode_lock_mod
 	   If ret == 0 then jnode is still locked.
 	   If ret != 0 then jnode is unlocked.
 	 */
-	assert("nikita-2674", ergo(ret == 0, spin_jnode_is_locked(node)));
-	assert("nikita-2675", ergo(ret != 0, spin_jnode_is_not_locked(node)));
-
-	assert("nikita-2974", spin_txnh_is_not_locked(txnh));
+#if REISER4_DEBUG
+	if (ret == 0)
+		assert_spin_locked(&(node->guard));
+	else
+		assert_spin_not_locked(&(node->guard));
+#endif
+	assert_spin_not_locked(&(txnh->guard));
 
 	if (ret == -E_REPEAT) {
 		/* E_REPEAT implies all locks were released, therefore we need
 		   to take the jnode's lock again. */
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 
 		/* Although this may appear to be a busy loop, it is not.
 		   There are several conditions that cause E_REPEAT to be
@@ -2152,7 +2148,7 @@ try_capture(jnode * node, znode_lock_mod
 		reiser4_stat_inc(coc.coc_wait);
 		/* disable COC for the next loop iteration */
 		coc_enabled = 0;
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		goto repeat;
 	}
 #endif
@@ -2178,11 +2174,11 @@ try_capture(jnode * node, znode_lock_mod
 		   re-acquiring it, but there are cases were failure occurs
 		   when the lock is not held, and those cases would need to be
 		   modified to re-take the lock. */
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 	}
 
 	/* Jnode is still locked. */
-	assert("jmacd-760", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	return ret;
 }
 
@@ -2200,12 +2196,11 @@ static int fuse_not_fused_lock_owners(tx
 	int repeat = 0;
 	txn_atom *atomh = txnh->atom;
 
-/*	assert ("zam-689", znode_is_rlocked (node));*/
-	assert("zam-690", spin_znode_is_locked(node));
-	assert("zam-691", spin_txnh_is_locked(txnh));
+	assert_spin_locked(&(ZJNODE(node)->guard));
+	assert_spin_locked(&(txnh->hlock));
 	assert("zam-692", atomh != NULL);
 
-	RLOCK_ZLOCK(&node->lock);
+	read_lock_zlock(&node->lock);
 
 	if (!spin_trylock_atom(atomh)) {
 		repeat = 1;
@@ -2231,35 +2226,35 @@ static int fuse_not_fused_lock_owners(tx
 
 		if (atomf == NULL) {
 			capture_assign_txnh_nolock(atomh, ctx->trans);
-			UNLOCK_TXNH(ctx->trans);
+			spin_unlock_txnh(ctx->trans);
 
 			reiser4_wake_up(lh->owner);
 			continue;
 		}
 
 		if (atomf == atomh) {
-			UNLOCK_TXNH(ctx->trans);
+			spin_unlock_txnh(ctx->trans);
 			continue;
 		}
 
 		if (!spin_trylock_atom(atomf)) {
-			UNLOCK_TXNH(ctx->trans);
+			spin_unlock_txnh(ctx->trans);
 			repeat = 1;
 			continue;
 		}
 
-		UNLOCK_TXNH(ctx->trans);
+		spin_unlock_txnh(ctx->trans);
 
 		if (atomf == atomh || atomf->stage > ASTAGE_CAPTURE_WAIT) {
-			UNLOCK_ATOM(atomf);
+			spin_unlock_atom(atomf);
 			continue;
 		}
 		// repeat = 1;
 
 		reiser4_wake_up(lh->owner);
 
-		UNLOCK_TXNH(txnh);
-		RUNLOCK_ZLOCK(&node->lock);
+		spin_unlock_txnh(txnh);
+		read_unlock_zlock(&node->lock);
 		spin_unlock_znode(node);
 
 		/* @atomf is "small" and @atomh is "large", by
@@ -2270,17 +2265,17 @@ static int fuse_not_fused_lock_owners(tx
 		return RETERR(-E_REPEAT);
 	}
 
-	UNLOCK_ATOM(atomh);
+	spin_unlock_atom(atomh);
 
 	if (repeat) {
 	      fail:
-		UNLOCK_TXNH(txnh);
-		RUNLOCK_ZLOCK(&node->lock);
+		spin_unlock_txnh(txnh);
+		read_unlock_zlock(&node->lock);
 		spin_unlock_znode(node);
 		return RETERR(-E_REPEAT);
 	}
 
-	RUNLOCK_ZLOCK(&node->lock);
+	read_unlock_zlock(&node->lock);
 	return 0;
 }
 
@@ -2298,12 +2293,12 @@ int try_capture_page_to_invalidate(struc
 		return PTR_ERR(node);
 	}
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	unlock_page(pg);
 
 	ret =
 	    try_capture(node, ZNODE_WRITE_LOCK, 0, 0 /* no copy on capture */ );
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 	jput(node);
 	lock_page(pg);
 	return ret;
@@ -2334,15 +2329,15 @@ void uncapture_page(struct page *pg)
 	node = (jnode *) (pg->private);
 	BUG_ON(node == NULL);
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 
 	eflush_del(node, 1 /* page is locked */ );
 	/*assert ("zam-815", !JF_ISSET(node, JNODE_EFLUSH)); */
 
 	atom = jnode_get_atom(node);
 	if (atom == NULL) {
-		assert("jmacd-7111", !jnode_is_dirty(node));
-		UNLOCK_JNODE(node);
+		assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY));
+		spin_unlock_jnode(node);
 		return;
 	}
 
@@ -2356,7 +2351,7 @@ void uncapture_page(struct page *pg)
 	 * wait all write_fq() for this atom to complete. This is not
 	 * significant overhead. */
 	while (JF_ISSET(node, JNODE_FLUSH_QUEUED) && atom->nr_running_queues) {
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		/*
 		 * at this moment we want to wait for "atom event", viz. wait
 		 * until @node can be removed from flush queue. But
@@ -2373,18 +2368,18 @@ void uncapture_page(struct page *pg)
 		 * page may has been detached by ->writepage()->releasepage().
 		 */
 		reiser4_wait_page_writeback(pg);
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		eflush_del(node, 1);
 		page_cache_release(pg);
 		atom = jnode_get_atom(node);
 /* VS-FIXME-HANS: improve the commenting in this function */
 		if (atom == NULL) {
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 			return;
 		}
 	}
 	uncapture_block(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 	jput(node);
 }
 
@@ -2394,7 +2389,7 @@ void uncapture_jnode(jnode * node)
 {
 	txn_atom *atom;
 
-	assert("vs-1462", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	assert("", node->pg == 0);
 
 	if (JF_ISSET(node, JNODE_EFLUSH)) {
@@ -2406,13 +2401,13 @@ void uncapture_jnode(jnode * node)
 	/*jnode_make_clean(node); */
 	atom = jnode_get_atom(node);
 	if (atom == NULL) {
-		assert("jmacd-7111", !jnode_is_dirty(node));
-		UNLOCK_JNODE(node);
+		assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY));
+		spin_unlock_jnode(node);
 		return;
 	}
 
 	uncapture_block(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 	jput(node);
 }
 
@@ -2423,8 +2418,8 @@ static void capture_assign_txnh_nolock(t
 	assert("umka-200", atom != NULL);
 	assert("umka-201", txnh != NULL);
 
-	assert("jmacd-822", spin_txnh_is_locked(txnh));
-	assert("jmacd-823", spin_atom_is_locked(atom));
+	assert_spin_locked(&(txnh->hlock));
+	assert_spin_locked(&(atom->alock));
 	assert("jmacd-824", txnh->atom == NULL);
 	assert("nikita-3540", atom_isopen(atom));
 	BUG_ON(txnh->atom != NULL);
@@ -2441,11 +2436,11 @@ static void capture_assign_block_nolock(
 {
 	assert("umka-202", atom != NULL);
 	assert("umka-203", node != NULL);
-	assert("jmacd-321", spin_jnode_is_locked(node));
-	assert("umka-295", spin_atom_is_locked(atom));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&(atom->alock));
 	assert("jmacd-323", node->atom == NULL);
 	BUG_ON(!list_empty_careful(&node->capture_link));
-	assert("nikita-3470", !jnode_is_dirty(node));
+	assert("nikita-3470", !JF_ISSET(node, JNODE_DIRTY));
 
 	/* Pointer from jnode to atom is not counted in atom->refcount. */
 	node->atom = atom;
@@ -2481,9 +2476,9 @@ int is_cced(const jnode * node)
 /* common code for dirtying both unformatted jnodes and formatted znodes. */
 static void do_jnode_make_dirty(jnode * node, txn_atom * atom)
 {
-	assert("zam-748", spin_jnode_is_locked(node));
-	assert("zam-750", spin_atom_is_locked(atom));
-	assert("jmacd-3981", !jnode_is_dirty(node));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&(atom->alock));
+	assert("jmacd-3981", !JF_ISSET(node, JNODE_DIRTY));
 
 	JF_SET(node, JNODE_DIRTY);
 
@@ -2540,7 +2535,7 @@ static void do_jnode_make_dirty(jnode * 
 void jnode_make_dirty_locked(jnode * node)
 {
 	assert("umka-204", node != NULL);
-	assert("zam-7481", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 
 	if (REISER4_DEBUG && rofs_jnode(node)) {
 		warning("nikita-3365", "Dirtying jnode on rofs");
@@ -2548,16 +2543,16 @@ void jnode_make_dirty_locked(jnode * nod
 	}
 
 	/* Fast check for already dirty node */
-	if (!jnode_is_dirty(node)) {
+	if (!JF_ISSET(node, JNODE_DIRTY)) {
 		txn_atom *atom;
 
 		atom = jnode_get_atom(node);
 		assert("vs-1094", atom);
 		/* Check jnode dirty status again because node spin lock might
 		 * be released inside jnode_get_atom(). */
-		if (likely(!jnode_is_dirty(node)))
+		if (likely(!JF_ISSET(node, JNODE_DIRTY)))
 			do_jnode_make_dirty(node, atom);
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 }
 
@@ -2580,7 +2575,7 @@ void znode_make_dirty(znode * z)
 		return;
 	}
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	jnode_make_dirty_locked(node);
 	page = jnode_page(node);
 	if (page != NULL) {
@@ -2594,7 +2589,7 @@ void znode_make_dirty(znode * z)
 
 		/* jnode lock is not needed for the rest of
 		 * znode_set_dirty(). */
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 		/* reiser4 file write code calls set_page_dirty for
 		 * unformatted nodes, for formatted nodes we do it here. */
 		set_page_dirty_internal(page, 0);
@@ -2603,7 +2598,7 @@ void znode_make_dirty(znode * z)
 		z->version = znode_build_version(jnode_get_tree(node));
 	} else {
 		assert("zam-596", znode_above_root(JZNODE(node)));
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 	}
 
 	assert("nikita-1900", znode_is_write_locked(z));
@@ -2620,7 +2615,7 @@ int sync_atom(txn_atom * atom)
 	result = 0;
 	if (atom != NULL) {
 		if (atom->stage < ASTAGE_PRE_COMMIT) {
-			LOCK_TXNH(txnh);
+			spin_lock_txnh(txnh);
 			capture_assign_txnh_nolock(atom, txnh);
 			result = force_commit_atom_nolock(txnh);
 		} else if (atom->stage < ASTAGE_POST_COMMIT) {
@@ -2629,7 +2624,7 @@ int sync_atom(txn_atom * atom)
 			/* try once more */
 			result = RETERR(-E_REPEAT);
 		} else
-			UNLOCK_ATOM(atom);
+			spin_unlock_atom(atom);
 	}
 	return result;
 }
@@ -2644,11 +2639,11 @@ count_jnode(txn_atom * atom, jnode * nod
 {
 	struct list_head *pos;
 #if REISER4_COPY_ON_CAPTURE
-	assert("", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 #else
 	assert("zam-1018", atom_is_protected(atom));
 #endif
-	assert("", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 	assert("", NODE_LIST(node) == old_list);
 
 	switch (NODE_LIST(node)) {
@@ -2782,7 +2777,7 @@ void jnode_make_wander_nolock(jnode * no
 
 	assert("nikita-2431", node != NULL);
 	assert("nikita-2432", !JF_ISSET(node, JNODE_RELOC));
-	assert("nikita-3153", jnode_is_dirty(node));
+	assert("nikita-3153", JF_ISSET(node, JNODE_DIRTY));
 	assert("zam-897", !JF_ISSET(node, JNODE_FLUSH_QUEUED));
 	assert("nikita-3367", !blocknr_is_fake(jnode_get_block(node)));
 
@@ -2804,21 +2799,21 @@ void jnode_make_wander(jnode * node)
 {
 	txn_atom *atom;
 
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 	atom = jnode_get_atom(node);
 	assert("zam-913", atom != NULL);
 	assert("zam-914", !JF_ISSET(node, JNODE_RELOC));
 
 	jnode_make_wander_nolock(node);
-	UNLOCK_ATOM(atom);
-	UNLOCK_JNODE(node);
+	spin_unlock_atom(atom);
+	spin_unlock_jnode(node);
 }
 
 /* this just sets RELOC bit  */
 static void jnode_make_reloc_nolock(flush_queue_t * fq, jnode * node)
 {
-	assert("vs-1480", spin_jnode_is_locked(node));
-	assert("zam-916", jnode_is_dirty(node));
+	assert_spin_locked(&(node->guard));
+	assert("zam-916", JF_ISSET(node, JNODE_DIRTY));
 	assert("zam-917", !JF_ISSET(node, JNODE_RELOC));
 	assert("zam-918", !JF_ISSET(node, JNODE_OVRWR));
 	assert("zam-920", !JF_ISSET(node, JNODE_FLUSH_QUEUED));
@@ -2834,7 +2829,7 @@ void znode_make_reloc(znode * z, flush_q
 	txn_atom *atom;
 
 	node = ZJNODE(z);
-	LOCK_JNODE(node);
+	spin_lock_jnode(node);
 
 	atom = jnode_get_atom(node);
 	assert("zam-919", atom != NULL);
@@ -2842,8 +2837,8 @@ void znode_make_reloc(znode * z, flush_q
 	jnode_make_reloc_nolock(fq, node);
 	queue_jnode(fq, node);
 
-	UNLOCK_ATOM(atom);
-	UNLOCK_JNODE(node);
+	spin_unlock_atom(atom);
+	spin_unlock_jnode(node);
 
 }
 
@@ -2861,10 +2856,10 @@ static int trylock_wait(txn_atom * atom,
 	if (unlikely(!spin_trylock_atom(atom))) {
 		atomic_inc(&atom->refcount);
 
-		UNLOCK_JNODE(node);
-		UNLOCK_TXNH(txnh);
+		spin_unlock_jnode(node);
+		spin_unlock_txnh(txnh);
 
-		LOCK_ATOM(atom);
+		spin_lock_atom(atom);
 		/* caller should eliminate extra reference by calling
 		 * atom_dec_and_unlock() for this atom. */
 		return 1;
@@ -2900,8 +2895,8 @@ static int trylock_throttle(txn_atom * a
 	assert("nikita-3225", txnh != NULL);
 	assert("nikita-3226", node != NULL);
 
-	assert("nikita-3227", spin_txnh_is_locked(txnh));
-	assert("nikita-3229", spin_jnode_is_locked(node));
+	assert_spin_locked(&(txnh->hlock));
+	assert_spin_locked(&(node->guard));
 
 	if (unlikely(trylock_wait(atom, txnh, node) != 0)) {
 		atom_dec_and_unlock(atom);
@@ -2937,7 +2932,7 @@ static int capture_assign_block(txn_hand
 		capture_assign_block_nolock(atom, node);
 
 		/* Success holds onto jnode & txnh locks.  Unlock atom. */
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 		return 0;
 	}
 }
@@ -2979,7 +2974,7 @@ capture_assign_txnh(jnode * node, txn_ha
 	 * modify it somehow depending on its ->stage. In the simplest case,
 	 * where ->stage is ASTAGE_CAPTURE_FUSE, txnh should be added to
 	 * atom's list. Problem is that atom spin lock nests outside of jnode
-	 * and transaction handle ones. So, we cannot just LOCK_ATOM here.
+	 * and transaction handle ones. So, we cannot just spin_lock_atom here.
 	 *
 	 * Solutions tried here:
 	 *
@@ -2995,15 +2990,15 @@ capture_assign_txnh(jnode * node, txn_ha
 	 *
 	 */
 	if (trylock_wait(atom, txnh, node) != 0) {
-		LOCK_JNODE(node);
-		LOCK_TXNH(txnh);
+		spin_lock_jnode(node);
+		spin_lock_txnh(txnh);
 		/* NOTE-NIKITA is it at all possible that current txnh
 		 * spontaneously changes ->atom from NULL to non-NULL? */
 		if (node->atom == NULL ||
 		    txnh->atom != NULL || atom != node->atom) {
 			/* something changed. Caller have to re-decide */
-			UNLOCK_TXNH(txnh);
-			UNLOCK_JNODE(node);
+			spin_unlock_txnh(txnh);
+			spin_unlock_jnode(node);
 			atom_dec_and_unlock(atom);
 			return RETERR(-E_REPEAT);
 		} else {
@@ -3067,7 +3062,7 @@ capture_assign_txnh(jnode * node, txn_ha
 	}
 
 	/* Unlock the atom */
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 	return 0;
 }
 
@@ -3131,7 +3126,7 @@ static void wakeup_atom_waiting_list(txn
 static int wait_for_fusion(txn_atom * atom, txn_wait_links * wlinks)
 {
 	assert("nikita-3330", atom != NULL);
-	assert("nikita-3331", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 
 	/* atom->txnh_count == 1 is for waking waiters up if we are releasing
 	 * last transaction handle. */
@@ -3167,14 +3162,14 @@ capture_fuse_wait(jnode * node, txn_hand
 	assert("umka-214", atomf != NULL);
 
 	/* We do not need the node lock. */
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 
 	if ((mode & TXN_CAPTURE_NONBLOCKING) != 0) {
-		UNLOCK_TXNH(txnh);
-		UNLOCK_ATOM(atomf);
+		spin_unlock_txnh(txnh);
+		spin_unlock_atom(atomf);
 
 		if (atomh) {
-			UNLOCK_ATOM(atomh);
+			spin_unlock_atom(atomh);
 		}
 
 		return RETERR(-E_BLOCK);
@@ -3187,17 +3182,17 @@ capture_fuse_wait(jnode * node, txn_hand
 	list_add_tail(&wlinks._fwaitfor_link, &atomf->fwaitfor_list);
 	wlinks.waitfor_cb = wait_for_fusion;
 	atomic_inc(&atomf->refcount);
-	UNLOCK_ATOM(atomf);
+	spin_unlock_atom(atomf);
 
 	if (atomh) {
 		/* Add txnh to atomh's waiting list, unlock atomh. */
 		list_add_tail(&wlinks._fwaiting_link, &atomh->fwaiting_list);
 		atomic_inc(&atomh->refcount);
-		UNLOCK_ATOM(atomh);
+		spin_unlock_atom(atomh);
 	}
 
 	/* Go to sleep. */
-	UNLOCK_TXNH(txnh);
+	spin_unlock_txnh(txnh);
 
 	ret = prepare_to_sleep(wlinks._lock_stack);
 	if (ret == 0) {
@@ -3206,19 +3201,21 @@ capture_fuse_wait(jnode * node, txn_hand
 	}
 
 	/* Remove from the waitfor list. */
-	LOCK_ATOM(atomf);
+	spin_lock_atom(atomf);
 
 	list_del(&wlinks._fwaitfor_link);
 	atom_dec_and_unlock(atomf);
 
 	if (atomh) {
 		/* Remove from the waiting list. */
-		LOCK_ATOM(atomh);
+		spin_lock_atom(atomh);
 		list_del(&wlinks._fwaiting_link);
 		atom_dec_and_unlock(atomh);
 	}
-
-	assert("nikita-2186", ergo(ret, spin_jnode_is_not_locked(node)));
+#if REISER4_DEBUG
+	if (ret)
+		assert_spin_not_locked(&(node->guard));
+#endif
 	return ret;
 }
 
@@ -3257,8 +3254,8 @@ capture_init_fusion_locked(jnode * node,
 			/* A read request for a committing block can be satisfied w/o
 			   COPY-ON-CAPTURE.  Success holds onto the jnode & txnh
 			   locks. */
-			UNLOCK_ATOM(atomf);
-			UNLOCK_ATOM(atomh);
+			spin_unlock_atom(atomf);
+			spin_unlock_atom(atomh);
 			return 0;
 		} else {
 			/* Perform COPY-ON-CAPTURE.  Copy and try again.  This function
@@ -3279,8 +3276,8 @@ capture_init_fusion_locked(jnode * node,
 	       || atomf->txnh_count == 0);
 
 	/* Now release the txnh lock: only holding the atoms at this point. */
-	UNLOCK_TXNH(txnh);
-	UNLOCK_JNODE(node);
+	spin_unlock_txnh(txnh);
+	spin_unlock_jnode(node);
 
 	/* Decide which should be kept and which should be merged. */
 	if (atom_pointer_count(atomf) < atom_pointer_count(atomh)) {
@@ -3311,12 +3308,12 @@ capture_init_fusion(jnode * node, txn_ha
 			return capture_init_fusion_locked(node, txnh, mode,
 							  can_coc);
 		else {
-			UNLOCK_ATOM(node->atom);
+			spin_unlock_atom(node->atom);
 		}
 	}
 
-	UNLOCK_JNODE(node);
-	UNLOCK_TXNH(txnh);
+	spin_unlock_jnode(node);
+	spin_unlock_txnh(txnh);
 	return RETERR(-E_REPEAT);
 }
 
@@ -3333,14 +3330,16 @@ capture_fuse_jnode_lists(txn_atom *large
 	assert("umka-219", large_head != NULL);
 	assert("umka-220", small_head != NULL);
 	/* small atom should be locked also. */
-	assert("zam-968", spin_atom_is_locked(large));
+	assert_spin_locked(&(large->alock));
 
 	/* For every jnode on small's capture list... */
 	list_for_each_entry(node, small_head, capture_link) {
 		count += 1;
 
 		/* With the jnode lock held, update atom pointer. */
-		UNDER_SPIN_VOID(jnode, node, node->atom = large);
+		spin_lock_jnode(node);
+		node->atom = large;
+		spin_unlock_jnode(node);
 	}
 
 	/* Splice the lists. */
@@ -3367,7 +3366,9 @@ capture_fuse_txnh_lists(txn_atom *large,
 		count += 1;
 
 		/* With the txnh lock held, update atom pointer. */
-		UNDER_SPIN_VOID(txnh, txnh, txnh->atom = large);
+		spin_lock_txnh(txnh);
+		txnh->atom = large;
+		spin_unlock_txnh(txnh);
 	}
 
 	/* Splice the txn_handle list. */
@@ -3391,8 +3392,8 @@ static void capture_fuse_into(txn_atom *
 	assert("umka-224", small != NULL);
 	assert("umka-225", small != NULL);
 
-	assert("umka-299", spin_atom_is_locked(large));
-	assert("umka-300", spin_atom_is_locked(small));
+	assert_spin_locked(&(large->alock));
+	assert_spin_locked(&(small->alock));
 
 	assert("jmacd-201", atom_isopen(small));
 	assert("jmacd-202", atom_isopen(large));
@@ -3427,11 +3428,11 @@ static void capture_fuse_into(txn_atom *
 		list_for_each_entry(node, &prot_list->nodes, capture_link) {
 			zcount += 1;
 
-			LOCK_JNODE(node);
+			spin_lock_jnode(node);
 			assert("nikita-3375", node->atom == small);
 			/* With the jnode lock held, update atom pointer. */
 			node->atom = large;
-			UNLOCK_JNODE(node);
+			spin_unlock_jnode(node);
 		}
 	}
 	/* Splice the lists of lists. */
@@ -3521,7 +3522,7 @@ static void capture_fuse_into(txn_atom *
 	wakeup_atom_waiting_list(small);
 
 	/* Unlock atoms */
-	UNLOCK_ATOM(large);
+	spin_unlock_atom(large);
 	atom_dec_and_unlock(small);
 }
 
@@ -3534,7 +3535,7 @@ void protected_jnodes_init(protected_jno
 	atom = get_current_atom_locked();
 	list_add(&list->inatom, &atom->protected);
 	INIT_LIST_HEAD(&list->nodes);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 void protected_jnodes_done(protected_jnodes *list)
@@ -3545,7 +3546,7 @@ void protected_jnodes_done(protected_jno
 
 	atom = get_current_atom_locked();
 	list_del_init(&list->inatom);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 }
 
 /* TXNMGR STUFF */
@@ -3647,10 +3648,10 @@ static void fake_jload(jnode * node)
 }
 
 /* for now - refuse to copy-on-capture any suspicious nodes (WRITEBACK, DIRTY, FLUSH_QUEUED) */
-static int check_capturable(const jnode * node, const txn_atom * atom)
+static int check_capturable(jnode * node, const txn_atom * atom)
 {
-	assert("vs-1429", spin_jnode_is_locked(node));
-	assert("vs-1487", check_spin_is_locked(&scan_lock));
+	assert_spin_locked(&(node->guard));
+	assert_spin_locked(&scan_lock);
 
 	if (JF_ISSET(node, JNODE_WRITEBACK)) {
 		reiser4_stat_inc(coc.writeback);
@@ -3723,8 +3724,8 @@ static int copy_on_capture_clean(jnode *
 {
 	int result;
 
-	assert("vs-1625", spin_atom_is_locked(atom));
-	assert("vs-1432", spin_jnode_is_locked(node));
+	assert_spin_locked(&(atom->alock));
+	assert_spin_locked(&(node->guard));
 	assert("vs-1627", !JF_ISSET(node, JNODE_WRITEBACK));
 
 	spin_lock(&scan_lock);
@@ -3735,8 +3736,8 @@ static int copy_on_capture_clean(jnode *
 		reiser4_stat_inc(coc.ok_clean);
 	}
 	spin_unlock(&scan_lock);
-	UNLOCK_JNODE(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_jnode(node);
+	spin_unlock_atom(atom);
 
 	return result;
 }
@@ -3744,11 +3745,11 @@ static int copy_on_capture_clean(jnode *
 static void lock_two_nodes(jnode * node1, jnode * node2)
 {
 	if (node1 > node2) {
-		LOCK_JNODE(node2);
-		LOCK_JNODE(node1);
+		spin_lock_jnode(node2);
+		spin_lock_jnode(node1);
 	} else {
-		LOCK_JNODE(node1);
-		LOCK_JNODE(node2);
+		spin_lock_jnode(node1);
+		spin_lock_jnode(node2);
 	}
 }
 
@@ -3759,12 +3760,13 @@ static int copy_on_capture_nopage(jnode 
 	int result;
 	jnode *copy;
 
+	
 	assert("vs-1432", spin_atom_is_locked(atom));
 	assert("vs-1432", spin_jnode_is_locked(node));
 
 	jref(node);
-	UNLOCK_JNODE(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_jnode(node);
+	spin_unlock_atom(atom);
 	assert("nikita-3475", schedulable());
 	copy = jclone(node);
 	if (IS_ERR(copy)) {
@@ -3772,7 +3774,7 @@ static int copy_on_capture_nopage(jnode 
 		return PTR_ERR(copy);
 	}
 
-	LOCK_ATOM(atom);
+	spin_lock_atom(atom);
 	lock_two_nodes(node, copy);
 	spin_lock(&scan_lock);
 
@@ -3791,9 +3793,9 @@ static int copy_on_capture_nopage(jnode 
 	}
 
 	spin_unlock(&scan_lock);
-	UNLOCK_JNODE(node);
-	UNLOCK_JNODE(copy);
-	UNLOCK_ATOM(atom);
+	spin_unlock_jnode(node);
+	spin_unlock_jnode(copy);
+	spin_unlock_atom(atom);
 	assert("nikita-3476", schedulable());
 	jput(copy);
 	assert("nikita-3477", schedulable());
@@ -3822,7 +3824,7 @@ handle_coc(jnode * node, jnode * copy, s
 	 * free space may not be re-used in insertion.
 	 */
 	radix_tree_preload(GFP_KERNEL);
-	LOCK_ATOM(atom);
+	spin_lock_atom(atom);
 	lock_two_nodes(node, copy);
 	spin_lock(&scan_lock);
 
@@ -3856,9 +3858,9 @@ handle_coc(jnode * node, jnode * copy, s
 
 		assert("vs-1419", page_count(new_page) >= 3);
 		spin_unlock(&scan_lock);
-		UNLOCK_JNODE(node);
-		UNLOCK_JNODE(copy);
-		UNLOCK_ATOM(atom);
+		spin_unlock_jnode(node);
+		spin_unlock_jnode(copy);
+		spin_unlock_atom(atom);
 		radix_tree_preload_end();
 		unlock_page(page);
 
@@ -3879,9 +3881,9 @@ handle_coc(jnode * node, jnode * copy, s
 		ON_TRACE(TRACE_CAPTURE_COPY, "copy on capture done\n");
 	} else {
 		spin_unlock(&scan_lock);
-		UNLOCK_JNODE(node);
-		UNLOCK_JNODE(copy);
-		UNLOCK_ATOM(atom);
+		spin_unlock_jnode(node);
+		spin_unlock_jnode(copy);
+		spin_unlock_atom(atom);
 		radix_tree_preload_end();
 		kunmap(page);
 		unlock_page(page);
@@ -3906,8 +3908,8 @@ static int real_copy_on_capture(jnode * 
 	page = node->pg;
 	page_cache_get(page);
 	jref(node);
-	UNLOCK_JNODE(node);
-	UNLOCK_ATOM(atom);
+	spin_unlock_jnode(node);
+	spin_unlock_atom(atom);
 
 	/* prevent node from eflushing */
 	result = jload(node);
@@ -3951,8 +3953,8 @@ static int create_copy_and_replace(jnode
 	if (JF_ISSET(node, JNODE_CCED)) {
 		/* node is under copy on capture already */
 		reiser4_stat_inc(coc.coc_race);
-		UNLOCK_JNODE(node);
-		UNLOCK_ATOM(atom);
+		spin_unlock_jnode(node);
+		spin_unlock_atom(atom);
 		return RETERR(-E_WAIT);
 	}
 
@@ -3962,8 +3964,8 @@ static int create_copy_and_replace(jnode
 	ON_TRACE(TRACE_CAPTURE_COPY, "copy_on_capture: node %p, atom %p..",
 		 node, atom);
 	if (JF_ISSET(node, JNODE_EFLUSH)) {
-		UNLOCK_JNODE(node);
-		UNLOCK_ATOM(atom);
+		spin_unlock_jnode(node);
+		spin_unlock_atom(atom);
 
 		reiser4_stat_inc(coc.eflush);
 		ON_TRACE(TRACE_CAPTURE_COPY, "eflushed\n");
@@ -3988,8 +3990,8 @@ static int create_copy_and_replace(jnode
 		assert("vs-1640", inode != NULL);
 		assert("vs-1641", page != NULL);
 		assert("vs-1642", page->mapping != NULL);
-		UNLOCK_JNODE(node);
-		UNLOCK_ATOM(atom);
+		spin_unlock_jnode(node);
+		spin_unlock_atom(atom);
 
 		down_write(&reiser4_inode_data(inode)->coc_sem);
 		lock_page(page);
@@ -4015,8 +4017,8 @@ static int create_copy_and_replace(jnode
 		}
 		pte_chain_unlock(page);
 		unlock_page(page);
-		LOCK_ATOM(atom);
-		LOCK_JNODE(node);
+		spin_lock_atom(atom);
+		spin_lock_jnode(node);
 	} else
 		inode = NULL;
 
@@ -4057,9 +4059,9 @@ capture_copy(jnode * node, txn_handle * 
 
 		/* The txnh and its (possibly NULL) atom's locks are not needed
 		   at this point. */
-		UNLOCK_TXNH(txnh);
+		spin_unlock_txnh(txnh);
 		if (atomh != NULL)
-			UNLOCK_ATOM(atomh);
+			spin_unlock_atom(atomh);
 
 		/* create a copy of node, detach node from atom and attach its copy
 		   instead */
@@ -4067,7 +4069,7 @@ capture_copy(jnode * node, txn_handle * 
 		result = create_copy_and_replace(node, atomf);
 		assert("nikita-3474", schedulable());
 		preempt_point();
-		LOCK_ATOM(atomf);
+		spin_lock_atom(atomf);
 		atom_dec_and_unlock(atomf);
 		preempt_point();
 
@@ -4108,9 +4110,9 @@ void uncapture_block(jnode * node)
 	assert("umka-228", atom != NULL);
 
 	assert("jmacd-1021", node->atom == atom);
-	assert("jmacd-1022", spin_jnode_is_locked(node));
+	assert_spin_locked(&(node->guard));
 #if REISER4_COPY_ON_CAPTURE
-	assert("jmacd-1023", spin_atom_is_locked(atom));
+	assert_spin_locked(&(atom->alock));
 #else
 	assert("jmacd-1023", atom_is_protected(atom));
 #endif
@@ -4134,7 +4136,7 @@ void uncapture_block(jnode * node)
 	ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), NOT_CAPTURED, 1));
 	node->atom = NULL;
 
-	UNLOCK_JNODE(node);
+	spin_unlock_jnode(node);
 	LOCK_CNT_DEC(t_refs);
 }
 
@@ -4143,9 +4145,8 @@ void uncapture_block(jnode * node)
    transaction. @atom and @node are spin locked */
 void insert_into_atom_ovrwr_list(txn_atom * atom, jnode * node)
 {
-	assert("zam-538", spin_atom_is_locked(atom)
-	       || atom->stage >= ASTAGE_PRE_COMMIT);
-	assert("zam-539", spin_jnode_is_locked(node));
+	assert("zam-538", atom_is_protected(atom));
+	assert_spin_locked(&(node->guard));
 	assert("zam-899", JF_ISSET(node, JNODE_OVRWR));
 	assert("zam-543", node->atom == NULL);
 	assert("vs-1433", !jnode_is_unformatted(node) && !jnode_is_znode(node));
@@ -4201,10 +4202,10 @@ reiser4_block_nr txnmgr_count_deleted_bl
 
 	spin_lock_txnmgr(tmgr);
 	list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
-		LOCK_ATOM(atom);
+		spin_lock_atom(atom);
 		blocknr_set_iterator(atom, &atom->delete_set,
 				     count_deleted_blocks_actor, &result, 0);
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 	spin_unlock_txnmgr(tmgr);
 
diff -puN fs/reiser4/txnmgr.h~reiser4-spinlock-cleanup fs/reiser4/txnmgr.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/txnmgr.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.720988250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/txnmgr.h	2005-10-20 14:01:52.844996000 +0400
@@ -8,7 +8,6 @@
 #define __REISER4_TXNMGR_H__
 
 #include "forward.h"
-#include "spin_macros.h"
 #include "dformat.h"
 
 #include <linux/fs.h>
@@ -210,7 +209,7 @@ struct blocknr_set {
 struct txn_atom {
 	/* The spinlock protecting the atom, held during fusion and various other state
 	   changes. */
-	reiser4_spin_data alock;
+	spinlock_t alock;
 
 	/* The atom's reference counter, increasing (in case of a duplication
 	   of an existing reference or when we are sure that some other
@@ -343,7 +342,7 @@ typedef struct protected_jnodes {
    the system to a txn_atom. */
 struct txn_handle {
 	/* Spinlock protecting ->atom pointer */
-	reiser4_spin_data hlock;
+	spinlock_t hlock;
 
 	/* Flags for controlling commit_txnh() behavior */
 	/* from txn_handle_flags_t */
@@ -362,7 +361,7 @@ struct txn_handle {
 /* The transaction manager: one is contained in the reiser4_super_info_data */
 struct txn_mgr {
 	/* A spinlock protecting the atom list, id_count, flush_control */
-	reiser4_spin_data tmgr_lock;
+	spinlock_t tmgr_lock;
 
 	/* List of atoms. */
 	struct list_head atoms_list;
@@ -440,7 +439,24 @@ extern int uncapture_inode(struct inode 
 
 extern txn_atom *get_current_atom_locked_nocheck(void);
 
-#define atom_is_protected(atom) (spin_atom_is_locked(atom) || (atom)->stage >= ASTAGE_PRE_COMMIT)
+#if REISER4_DEBUG
+
+/**
+ * atom_is_protected - make sure that nobody but us can do anything with atom
+ * @atom: atom to be checked
+ *
+ * This is used to assert that atom either entered commit stages or is spin
+ * locked.
+ */
+static inline int atom_is_protected(txn_atom *atom)
+{
+	if (atom->stage >= ASTAGE_PRE_COMMIT)
+		return 1;
+	assert_spin_locked(&(atom->alock));
+	return 1;
+}
+
+#endif
 
 /* Get the current atom and spinlock it if current atom present. May not return NULL */
 static inline txn_atom *get_current_atom_locked(void)
@@ -487,31 +503,123 @@ extern int blocknr_set_iterator(txn_atom
 extern void flush_init_atom(txn_atom * atom);
 extern void flush_fuse_queues(txn_atom * large, txn_atom * small);
 
-/* INLINE FUNCTIONS */
+static inline void spin_lock_atom(txn_atom *atom)
+{
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(spin_locked_txnh) &&
+		    LOCK_CNT_NIL(spin_locked_jnode) &&
+		    LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(rw_locked_tree)));
+
+	spin_lock(&(atom->alock));
+
+	LOCK_CNT_INC(spin_locked_atom);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline int spin_trylock_atom(txn_atom *atom)
+{
+	if (spin_trylock(&(atom->alock))) {
+		LOCK_CNT_INC(spin_locked_atom);
+		LOCK_CNT_INC(spin_locked);
+		return 1;
+	}
+	return 0;
+}
+
+static inline void spin_unlock_atom(txn_atom *atom)
+{
+	assert_spin_locked(&(atom->alock));
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_atom));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(spin_locked_atom);
+	LOCK_CNT_DEC(spin_locked);
+
+	spin_unlock(&(atom->alock));
+}
+
+static inline void spin_lock_txnh(txn_handle *txnh)
+{
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(rw_locked_tree)));
+
+	spin_lock(&(txnh->hlock));
+
+	LOCK_CNT_INC(spin_locked_txnh);
+	LOCK_CNT_INC(spin_locked);
+}
+
+static inline int spin_trylock_txnh(txn_handle *txnh)
+{
+	if (spin_trylock(&(txnh->hlock))) {
+		LOCK_CNT_INC(spin_locked_txnh);
+		LOCK_CNT_INC(spin_locked);
+		return 1;
+	}
+	return 0;
+}
+
+static inline void spin_unlock_txnh(txn_handle *txnh)
+{
+	assert_spin_locked(&(txnh->hlock));
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnh));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(spin_locked_txnh);
+	LOCK_CNT_DEC(spin_locked);
+
+	spin_unlock(&(txnh->hlock));
+}
+
+#define spin_ordering_pred_txnmgr(tmgr)		\
+	( LOCK_CNT_NIL(spin_locked_atom) &&	\
+	  LOCK_CNT_NIL(spin_locked_txnh) &&	\
+	  LOCK_CNT_NIL(spin_locked_jnode) &&	\
+	  LOCK_CNT_NIL(rw_locked_zlock) &&	\
+	  LOCK_CNT_NIL(rw_locked_dk) &&		\
+	  LOCK_CNT_NIL(rw_locked_tree) )
+
+static inline void spin_lock_txnmgr(txn_mgr *mgr)
+{
+	/* check that spinlocks of lower priorities are not held */
+	assert("", (LOCK_CNT_NIL(spin_locked_atom) &&
+		    LOCK_CNT_NIL(spin_locked_txnh) &&
+		    LOCK_CNT_NIL(spin_locked_jnode) &&
+		    LOCK_CNT_NIL(rw_locked_zlock) &&
+		    LOCK_CNT_NIL(rw_locked_dk) &&
+		    LOCK_CNT_NIL(rw_locked_tree)));
+
+	spin_lock(&(mgr->tmgr_lock));
+
+	LOCK_CNT_INC(spin_locked_txnmgr);
+	LOCK_CNT_INC(spin_locked);
+}
 
-#define spin_ordering_pred_atom(atom)				\
-	( ( lock_counters() -> spin_locked_txnh == 0 ) &&	\
-	  ( lock_counters() -> spin_locked_jnode == 0 ) &&	\
-	  ( lock_counters() -> rw_locked_zlock == 0 ) &&	\
-	  ( lock_counters() -> rw_locked_dk == 0 ) &&		\
-	  ( lock_counters() -> rw_locked_tree == 0 ) )
-
-#define spin_ordering_pred_txnh(txnh)				\
-	( ( lock_counters() -> rw_locked_dk == 0 ) &&		\
-	  ( lock_counters() -> rw_locked_zlock == 0 ) &&	\
-	  ( lock_counters() -> rw_locked_tree == 0 ) )
-
-#define spin_ordering_pred_txnmgr(tmgr) 			\
-	( ( lock_counters() -> spin_locked_atom == 0 ) &&	\
-	  ( lock_counters() -> spin_locked_txnh == 0 ) &&	\
-	  ( lock_counters() -> spin_locked_jnode == 0 ) &&	\
-	  ( lock_counters() -> rw_locked_zlock == 0 ) &&	\
-	  ( lock_counters() -> rw_locked_dk == 0 ) &&		\
-	  ( lock_counters() -> rw_locked_tree == 0 ) )
-
-SPIN_LOCK_FUNCTIONS(atom, txn_atom, alock);
-SPIN_LOCK_FUNCTIONS(txnh, txn_handle, hlock);
-SPIN_LOCK_FUNCTIONS(txnmgr, txn_mgr, tmgr_lock);
+static inline int spin_trylock_txnmgr(txn_mgr *mgr)
+{
+	if (spin_trylock(&(mgr->tmgr_lock))) {
+		LOCK_CNT_INC(spin_locked_txnmgr);
+		LOCK_CNT_INC(spin_locked);
+		return 1;
+	}
+	return 0;
+}
+
+static inline void spin_unlock_txnmgr(txn_mgr *mgr)
+{
+	assert_spin_locked(&(mgr->tmgr_lock));
+	assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnmgr));
+	assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
+
+	LOCK_CNT_DEC(spin_locked_txnmgr);
+	LOCK_CNT_DEC(spin_locked);
+
+	spin_unlock(&(mgr->tmgr_lock));
+}
 
 typedef enum {
 	FQ_IN_USE = 0x1
@@ -532,7 +640,7 @@ struct flush_queue {
 	   easier.  See field in atom struct for description of list. */
 	struct list_head alink;
 	/* A spinlock to protect changes of fq state and fq->atom pointer */
-	reiser4_spin_data guard;
+	spinlock_t guard;
 	/* flush_queue state: [in_use | ready] */
 	flush_queue_state_t state;
 	/* A list which contains queued nodes, queued nodes are removed from any
diff -puN fs/reiser4/vfs_ops.h~reiser4-spinlock-cleanup fs/reiser4/vfs_ops.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/vfs_ops.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.724988500 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/vfs_ops.h	2005-10-20 14:01:52.844996000 +0400
@@ -35,7 +35,7 @@ extern int reiser4_del_nlink(struct inod
 extern int reiser4_start_up_io(struct page *page);
 extern void reiser4_clear_page_dirty(struct page *);
 extern void reiser4_throttle_write(struct inode *);
-ON_DEBUG(int jnode_is_releasable(jnode *));
+extern int jnode_is_releasable(jnode *);
 
 #define CAPTURE_APAGE_BURST (1024l)
 void writeout(struct super_block *, struct writeback_control *);
diff -puN fs/reiser4/wander.c~reiser4-spinlock-cleanup fs/reiser4/wander.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/wander.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.728988750 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/wander.c	2005-10-20 14:01:52.848996250 +0400
@@ -550,10 +550,10 @@ static void undo_bio(struct bio *bio)
 		pg = bio->bi_io_vec[i].bv_page;
 		ClearPageWriteback(pg);
 		node = jprivate(pg);
-		LOCK_JNODE(node);
+		spin_lock_jnode(node);
 		JF_CLR(node, JNODE_WRITEBACK);
 		JF_SET(node, JNODE_DIRTY);
-		UNLOCK_JNODE(node);
+		spin_unlock_jnode(node);
 	}
 	bio_put(bio);
 }
@@ -648,22 +648,22 @@ static int get_overwrite_set(struct comm
 				if (IS_ERR(sj))
 					return PTR_ERR(sj);
 
-				LOCK_ATOM(ch->atom);
-				LOCK_JNODE(sj);
+				spin_lock_atom(ch->atom);
+				spin_lock_jnode(sj);
 				JF_SET(sj, JNODE_OVRWR);
 				insert_into_atom_ovrwr_list(ch->atom, sj);
-				UNLOCK_JNODE(sj);
-				UNLOCK_ATOM(ch->atom);
+				spin_unlock_jnode(sj);
+				spin_unlock_atom(ch->atom);
 
 				/* jload it as the rest of overwrite set */
 				jload_gfp(sj, GFP_KERNEL, 0);
 
 				ch->overwrite_set_size++;
 			}
-			LOCK_ATOM(ch->atom);
-			LOCK_JNODE(cur);
+			spin_lock_atom(ch->atom);
+			spin_lock_jnode(cur);
 			uncapture_block(cur);
-			UNLOCK_ATOM(ch->atom);
+			spin_unlock_atom(ch->atom);
 			jput(cur);
 
 			spin_lock(&scan_lock);
@@ -793,7 +793,7 @@ write_jnodes_to_disk_extent(capture_list
 
 			lock_and_wait_page_writeback(pg);
 
-			LOCK_JNODE(cur);
+			spin_lock_jnode(cur);
 			assert("nikita-3553", jnode_page(cur) == pg);
 			assert("nikita-3554", jprivate(pg) == cur);
 
@@ -803,7 +803,7 @@ write_jnodes_to_disk_extent(capture_list
 			if (!JF_ISSET(cur, JNODE_WRITEBACK)) {
 				assert("nikita-3165",
 				       !jnode_is_releasable(cur));
-				UNLOCK_JNODE(cur);
+				spin_unlock_jnode(cur);
 				if (!bio_add_page(bio,
 						  pg, super->s_blocksize, 0)) {
 					/*
@@ -815,11 +815,11 @@ write_jnodes_to_disk_extent(capture_list
 					break;
 				}
 
-				LOCK_JNODE(cur);
+				spin_lock_jnode(cur);
 				JF_SET(cur, JNODE_WRITEBACK);
 				JF_CLR(cur, JNODE_DIRTY);
 				ON_DEBUG(cur->written++);
-				UNLOCK_JNODE(cur);
+				spin_unlock_jnode(cur);
 
 				SetPageWriteback(pg);
 				if (for_reclaim)
@@ -845,7 +845,7 @@ write_jnodes_to_disk_extent(capture_list
 				   encountered this CC jnode. Do not submit i/o
 				   for it */
 				assert("zam-912", JF_ISSET(cur, JNODE_CC));
-				UNLOCK_JNODE(cur);
+				spin_unlock_jnode(cur);
 			}
 			unlock_page(pg);
 
@@ -990,7 +990,7 @@ add_region_to_wmap(jnode * cur, int len,
 			return ret;
 		}
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 
 		cur = capture_list_next(cur);
 		++block;
@@ -1131,17 +1131,17 @@ static int get_overwrite_set(struct comm
 				if (IS_ERR(sj))
 					return PTR_ERR(sj);
 
-				LOCK_JNODE(sj);
+				spin_lock_jnode(sj);
 				JF_SET(sj, JNODE_OVRWR);
 				insert_into_atom_ovrwr_list(ch->atom, sj);
-				UNLOCK_JNODE(sj);
+				spin_unlock_jnode(sj);
 
 				/* jload it as the rest of overwrite set */
 				jload_gfp(sj, GFP_KERNEL, 0);
 
 				ch->overwrite_set_size++;
 			}
-			LOCK_JNODE(cur);
+			spin_lock_jnode(cur);
 			uncapture_block(cur);
 			jput(cur);
 
@@ -1253,7 +1253,6 @@ write_jnodes_to_disk_extent(struct list_
 		bio->bi_sector = block * (super->s_blocksize >> 9);
 		for (nr_used = 0, i = 0; i < nr_blocks; i++) {
 			struct page *pg;
-			ON_DEBUG(int jnode_is_releasable(jnode *));
 
 			pg = jnode_page(cur);
 			assert("zam-573", pg != NULL);
@@ -1272,7 +1271,7 @@ write_jnodes_to_disk_extent(struct list_
 				break;
 			}
 
-			LOCK_JNODE(cur);
+			spin_lock_jnode(cur);
 			assert("nikita-3166",
 			       pg->mapping == jnode_get_mapping(cur));
 			assert("zam-912", !JF_ISSET(cur, JNODE_WRITEBACK));
@@ -1280,7 +1279,7 @@ write_jnodes_to_disk_extent(struct list_
 			JF_SET(cur, JNODE_WRITEBACK);
 			JF_CLR(cur, JNODE_DIRTY);
 			ON_DEBUG(cur->written++);
-			UNLOCK_JNODE(cur);
+			spin_unlock_jnode(cur);
 
 			set_page_writeback(pg);
 			if (for_reclaim)
@@ -1393,7 +1392,7 @@ add_region_to_wmap(jnode * cur, int len,
 			return ret;
 		}
 
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 
 		cur = list_entry(cur->capture_link.next, jnode, capture_link);
 		++block;
@@ -1537,7 +1536,7 @@ static int alloc_tx(struct commit_handle
 		atom = get_current_atom_locked();
 		blocknr_set_iterator(atom, &atom->wandered_map,
 				     &store_wmap_actor, &params, 0);
-		UNLOCK_ATOM(atom);
+		spin_unlock_atom(atom);
 	}
 
 	{ /* relse all jnodes from tx_list */
@@ -1594,11 +1593,11 @@ int reiser4_write_logs(long *nr_submitte
 	 * early flushed jnodes with CREATED bit are transferred to the
 	 * overwrite list. */
 	invalidate_list(ATOM_CLEAN_LIST(atom));
-	LOCK_ATOM(atom);
+	spin_lock_atom(atom);
 	/* There might be waiters for the relocate nodes which we have
 	 * released, wake them up. */
 	atom_send_event(atom);
-	UNLOCK_ATOM(atom);
+	spin_unlock_atom(atom);
 
 	if (REISER4_DEBUG) {
 		int level;
@@ -1650,7 +1649,7 @@ int reiser4_write_logs(long *nr_submitte
 			goto up_and_ret;
 		}
 
-		UNLOCK_ATOM(fq->atom);
+		spin_unlock_atom(fq->atom);
 
 		do {
 			ret = alloc_wandered_blocks(&ch, fq);
@@ -1678,7 +1677,9 @@ int reiser4_write_logs(long *nr_submitte
 	if ((ret = update_journal_header(&ch)))
 		goto up_and_ret;
 
-	UNDER_SPIN_VOID(atom, atom, atom_set_stage(atom, ASTAGE_POST_COMMIT));
+	spin_lock_atom(atom);
+	atom_set_stage(atom, ASTAGE_POST_COMMIT);
+	spin_unlock_atom(atom);
 
 	post_commit_hook();
 
@@ -1694,7 +1695,7 @@ int reiser4_write_logs(long *nr_submitte
 			goto up_and_ret;
 		}
 
-		UNLOCK_ATOM(fq->atom);
+		spin_unlock_atom(fq->atom);
 
 		ret =
 		    write_jnode_list(ch.overwrite_set, fq, NULL,
diff -puN fs/reiser4/znode.c~reiser4-spinlock-cleanup fs/reiser4/znode.c
--- linux-2.6.14-rc4-mm1/fs/reiser4/znode.c~reiser4-spinlock-cleanup	2005-10-20 14:01:52.732989000 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/znode.c	2005-10-20 14:01:52.848996250 +0400
@@ -235,7 +235,7 @@ int znodes_tree_init(reiser4_tree * tree
 	int result;
 	assert("umka-050", tree != NULL);
 
-	rw_dk_init(tree);
+	rwlock_init(&tree->dk_lock);
 
 	result = z_hash_init(&tree->zhash_table, REISER4_ZNODE_HASH_TABLE_SIZE);
 	if (result != 0)
@@ -341,7 +341,7 @@ void znode_remove(znode * node /* znode 
 {
 	assert("nikita-2108", node != NULL);
 	assert("nikita-470", node->c_count == 0);
-	assert("zam-879", rw_tree_is_write_locked(tree));
+	assert_rw_write_locked(&(tree->tree_lock));
 
 	/* remove reference to this znode from cbk cache */
 	cbk_cache_invalidate(node, tree);
@@ -385,7 +385,7 @@ int znode_rehash(znode * node /* node to
 	oldtable = znode_get_htable(node);
 	newtable = get_htable(tree, new_block_nr);
 
-	WLOCK_TREE(tree);
+	write_lock_tree(tree);
 	/* remove znode from hash-table */
 	z_hash_remove_rcu(oldtable, node);
 
@@ -398,7 +398,7 @@ int znode_rehash(znode * node /* node to
 
 	/* insert it into hash */
 	z_hash_insert_rcu(newtable, node);
-	WUNLOCK_TREE(tree);
+	write_unlock_tree(tree);
 	return 0;
 }
 
@@ -516,7 +516,7 @@ znode *zget(reiser4_tree * tree,
 		ZJNODE(result)->key.z = *blocknr;
 		result->level = level;
 
-		WLOCK_TREE(tree);
+		write_lock_tree(tree);
 
 		shadow = z_hash_find_index(zth, hashi, blocknr);
 		if (unlikely(shadow != NULL && !ZF_ISSET(shadow, JNODE_RIP))) {
@@ -533,7 +533,7 @@ znode *zget(reiser4_tree * tree,
 
 		add_x_ref(ZJNODE(result));
 
-		WUNLOCK_TREE(tree);
+		write_unlock_tree(tree);
 	}
 #if REISER4_DEBUG
 	if (!blocknr_is_fake(blocknr) && *blocknr != 0)
@@ -666,7 +666,7 @@ unsigned znode_free_space(znode * node /
 reiser4_key *znode_get_rd_key(znode * node /* znode to query */ )
 {
 	assert("nikita-958", node != NULL);
-	assert("nikita-1661", rw_dk_is_locked(znode_get_tree(node)));
+	assert_rw_locked(&(znode_get_tree(node)->dk_lock));
 	assert("nikita-3067", LOCK_CNT_GTZ(rw_locked_dk));
 	assert("nikita-30671", node->rd_key_version != 0);
 	return &node->rd_key;
@@ -676,7 +676,7 @@ reiser4_key *znode_get_rd_key(znode * no
 reiser4_key *znode_get_ld_key(znode * node /* znode to query */ )
 {
 	assert("nikita-974", node != NULL);
-	assert("nikita-1662", rw_dk_is_locked(znode_get_tree(node)));
+	assert_rw_locked(&(znode_get_tree(node)->dk_lock));
 	assert("nikita-3068", LOCK_CNT_GTZ(rw_locked_dk));
 	assert("nikita-30681", node->ld_key_version != 0);
 	return &node->ld_key;
@@ -690,7 +690,7 @@ reiser4_key *znode_set_rd_key(znode * no
 {
 	assert("nikita-2937", node != NULL);
 	assert("nikita-2939", key != NULL);
-	assert("nikita-2938", rw_dk_is_write_locked(znode_get_tree(node)));
+	assert_rw_write_locked(&(znode_get_tree(node)->dk_lock));
 	assert("nikita-3069", LOCK_CNT_GTZ(write_locked_dk));
 	assert("nikita-2944",
 	       znode_is_any_locked(node) ||
@@ -709,7 +709,7 @@ reiser4_key *znode_set_ld_key(znode * no
 {
 	assert("nikita-2940", node != NULL);
 	assert("nikita-2941", key != NULL);
-	assert("nikita-2942", rw_dk_is_write_locked(znode_get_tree(node)));
+	assert_rw_write_locked(&(znode_get_tree(node)->dk_lock));
 	assert("nikita-3070", LOCK_CNT_GTZ(write_locked_dk));
 	assert("nikita-2943",
 	       znode_is_any_locked(node) || keyeq(&node->ld_key, min_key()));
@@ -735,11 +735,15 @@ int znode_contains_key(znode * node /* z
 int znode_contains_key_lock(znode * node /* znode to look in */ ,
 			    const reiser4_key * key /* key to look for */ )
 {
+	int result;
+
 	assert("umka-056", node != NULL);
 	assert("umka-057", key != NULL);
 
-	return UNDER_RW(dk, znode_get_tree(node),
-			read, znode_contains_key(node, key));
+	read_lock_dk(znode_get_tree(node));
+	result = znode_contains_key(node, key);
+	read_unlock_dk(znode_get_tree(node));
+	return result;
 }
 
 /* get parent pointer, assuming tree is not locked */
@@ -798,7 +802,12 @@ int znode_just_created(const znode * nod
 /* obtain updated ->znode_epoch. See seal.c for description. */
 __u64 znode_build_version(reiser4_tree * tree)
 {
-	return UNDER_SPIN(epoch, tree, ++tree->znode_epoch);
+	__u64 result;
+
+	spin_lock(&tree->epoch_lock);
+	result = ++tree->znode_epoch;
+	spin_unlock(&tree->epoch_lock);
+	return result;
 }
 
 void init_load_count(load_count * dh)
@@ -975,7 +984,7 @@ static int znode_invariant_f(const znode
 }
 
 /* debugging aid: check znode invariant and panic if it doesn't hold */
-int znode_invariant(const znode * node /* znode to check */ )
+int znode_invariant(znode * node /* znode to check */ )
 {
 	char const *failed_msg;
 	int result;
@@ -983,85 +992,18 @@ int znode_invariant(const znode * node /
 	assert("umka-063", node != NULL);
 	assert("umka-064", current_tree != NULL);
 
-	spin_lock_znode((znode *) node);
-	RLOCK_TREE(znode_get_tree(node));
+	spin_lock_znode(node);
+	read_lock_tree(znode_get_tree(node));
 	result = znode_invariant_f(node, &failed_msg);
 	if (!result) {
 		/* print_znode("corrupted node", node); */
 		warning("jmacd-555", "Condition %s failed", failed_msg);
 	}
-	RUNLOCK_TREE(znode_get_tree(node));
-	spin_unlock_znode((znode *) node);
+	read_unlock_tree(znode_get_tree(node));
+	spin_unlock_znode(node);
 	return result;
 }
 
-/* debugging aid: output human readable information about @node */
-static void info_znode(const char *prefix /* prefix to print */ ,
-		       const znode * node /* node to print */ )
-{
-	if (node == NULL) {
-		return;
-	}
-	info_jnode(prefix, ZJNODE(node));
-	if (!jnode_is_znode(ZJNODE(node)))
-		return;
-
-	printk("c_count: %i, readers: %i, items: %i\n",
-	       node->c_count, node->lock.nr_readers, node->nr_items);
-}
-
-/* debugging aid: output more human readable information about @node that
-   info_znode(). */
-void print_znode(const char *prefix /* prefix to print */ ,
-		 const znode * node /* node to print */ )
-{
-	if (node == NULL) {
-		printk("%s: null\n", prefix);
-		return;
-	}
-
-	info_znode(prefix, node);
-	if (!jnode_is_znode(ZJNODE(node)))
-		return;
-	info_znode("\tparent", znode_parent_nolock(node));
-	info_znode("\tleft", node->left);
-	info_znode("\tright", node->right);
-	print_key("\tld", &node->ld_key);
-	print_key("\trd", &node->rd_key);
-	printk("\n");
-}
-
-/* print all znodes in @tree */
-void print_znodes(const char *prefix, reiser4_tree * tree)
-{
-	znode *node;
-	znode *next;
-	z_hash_table *htable;
-	int tree_lock_taken;
-
-	if (tree == NULL)
-		tree = current_tree;
-
-	/* this is debugging function. It can be called by reiser4_panic()
-	   with tree spin-lock already held. Trylock is not exactly what we
-	   want here, but it is passable.
-	 */
-	tree_lock_taken = write_trylock_tree(tree);
-
-	htable = &tree->zhash_table;
-	for_all_in_htable(htable, z, node, next) {
-		info_znode(prefix, node);
-	}
-
-	htable = &tree->zfake_table;
-	for_all_in_htable(htable, z, node, next) {
-		info_znode(prefix, node);
-	}
-
-	if (tree_lock_taken)
-		WUNLOCK_TREE(tree);
-}
-
 /* return non-0 iff data are loaded into znode */
 int znode_is_loaded(const znode * node /* znode to query */ )
 {
diff -puN fs/reiser4/znode.h~reiser4-spinlock-cleanup fs/reiser4/znode.h
--- linux-2.6.14-rc4-mm1/fs/reiser4/znode.h~reiser4-spinlock-cleanup	2005-10-20 14:01:52.736989250 +0400
+++ linux-2.6.14-rc4-mm1-vs/fs/reiser4/znode.h	2005-10-20 14:01:52.848996250 +0400
@@ -9,7 +9,6 @@
 #include "forward.h"
 #include "debug.h"
 #include "dformat.h"
-#include "spin_macros.h"
 #include "key.h"
 #include "coord.h"
 #include "plugin/node/node.h"
@@ -261,15 +260,15 @@ extern void print_lock_stack(const char 
 #define znode_make_clean(x)         jnode_make_clean   ( ZJNODE(x) )
 #define znode_set_block(x, b)       jnode_set_block ( ZJNODE(x), (b) )
 
-#define spin_lock_znode(x)          LOCK_JNODE ( ZJNODE(x) )
-#define spin_unlock_znode(x)        UNLOCK_JNODE ( ZJNODE(x) )
+#define spin_lock_znode(x)          spin_lock_jnode ( ZJNODE(x) )
+#define spin_unlock_znode(x)        spin_unlock_jnode ( ZJNODE(x) )
 #define spin_trylock_znode(x)       spin_trylock_jnode ( ZJNODE(x) )
 #define spin_znode_is_locked(x)     spin_jnode_is_locked ( ZJNODE(x) )
 #define spin_znode_is_not_locked(x) spin_jnode_is_not_locked ( ZJNODE(x) )
 
 #if REISER4_DEBUG
 extern int znode_x_count_is_protected(const znode * node);
-extern int znode_invariant(const znode * node);
+extern int znode_invariant(znode * node);
 #endif
 
 /* acquire reference to @node */

_
