diff -urN v2.4.19p5/fs/reiserfs/file.c linux/fs/reiserfs/file.c
--- v2.4.19p5/fs/reiserfs/file.c	Mon Apr  1 13:24:42 2002
+++ linux/fs/reiserfs/file.c	Fri Apr  5 16:09:57 2002
@@ -47,6 +47,7 @@
 #ifdef REISERFS_PREALLOCATE
     reiserfs_discard_prealloc (&th, inode);
 #endif
+
     journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
 
     if (atomic_read(&inode->i_count) <= 1 &&
@@ -60,6 +61,13 @@
 	reiserfs_truncate_file(inode, 0) ;
 	pop_journal_writer(windex) ;
     }
+
+    if (reiserfs_iicache(inode->i_sb)) {
+      if (inode->u.reiserfs_i.iic) {
+	kfree(inode->u.reiserfs_i.iic);
+      }
+    }
+
     up (&inode->i_sem); 
     unlock_kernel() ;
     return 0;
diff -urN v2.4.19p5/fs/reiserfs/inode.c linux/fs/reiserfs/inode.c
--- v2.4.19p5/fs/reiserfs/inode.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/inode.c	Fri Apr  5 16:09:57 2002
@@ -17,6 +17,8 @@
 #define GET_BLOCK_READ_DIRECT 4  /* read the tail if indirect item not found */
 #define GET_BLOCK_NO_ISEM     8 /* i_sem is not held, don't preallocate */
 
+#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+
 static int reiserfs_get_block (struct inode * inode, long block,
 			       struct buffer_head * bh_result, int create);
 //
@@ -53,6 +55,7 @@
 	/* no object items are in the tree */
 	;
     }
+
     clear_inode (inode); /* note this must go after the journal_end to prevent deadlock */
     inode->i_blocks = 0;
     unlock_kernel() ;
@@ -240,83 +243,286 @@
   reiserfs_update_inode_transaction(inode) ;
 }
 
-// it is called by get_block when create == 0. Returns block number
-// for 'block'-th logical block of file. When it hits direct item it
-// returns 0 (being called from bmap) or read direct item into piece
-// of page (bh_result)
+/* 
+** Get block number from the indirect item by position. 
+*/
+static inline long iitem_get_blocknr (struct path *path, int pos)
+{
+  struct buffer_head * bh = get_last_bh (path);
+  struct item_head   * ih = get_ih (path);
+  __u32 * ind_item;
+
+  if (is_indirect_le_ih (ih)) {
+    ind_item = (__u32 *)B_I_PITEM (bh, ih);
+    return le32_to_cpu(ind_item [path->pos_in_item + pos]);
+  }
 
-// Please improve the english/clarity in the comment above, as it is
-// hard to understand.
+  return 0;
+}
 
-static int _get_block_create_0 (struct inode * inode, long block,
-				 struct buffer_head * bh_result,
-				 int args)
+/* 
+** Get the indirect item size. 
+*/
+static inline int iitem_size (struct path *path)
 {
-    INITIALIZE_PATH (path);
-    struct cpu_key key;
-    struct buffer_head * bh;
-    struct item_head * ih, tmp_ih;
-    int fs_gen ;
-    int blocknr;
-    char * p = NULL;
-    int chars;
-    int ret ;
-    int done = 0 ;
-    unsigned long offset ;
+  struct item_head * ih = get_ih (path);
+  return (I_UNFM_NUM(ih) - (path->pos_in_item + 1));
+}
 
-    // prepare the key to look for the 'block'-th block of file
-    make_cpu_key (&key, inode,
-		  (loff_t)block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 3);
+/* 
+** Return "1" if last position of the indirect item reached,
+**        "0" - otherwise. 
+*/
+static inline int last_pos_of_iitem (struct path *path, int pos)
+{
+  struct item_head * ih = get_ih (path);
+  return ((path->pos_in_item + 1 + pos) >= (I_UNFM_NUM(ih)) ? 1 : 0);
+}
 
-research:
-    if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) {
-	pathrelse (&path);
-        if (p)
-            kunmap(bh_result->b_page) ;
-	// We do not return -ENOENT if there is a hole but page is uptodate, because it means
-	// That there is some MMAPED data associated with it that is yet to be written to disk.
-	if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
-	    return -ENOENT ;
-	}
-        return 0 ;
+/* 
+** Get the number of contiguous blocks in the indirect item
+** from given pos to the end of the item.  
+*/
+static inline int iitem_amount_contiguous (struct path *path, int pos)
+{
+  long curr = 0;
+  long next = 0;
+  int  item_size = iitem_size(path);
+  int  amount = 1;
+
+  if (pos >= item_size) { 
+    return 0;
+  }
+  curr = iitem_get_blocknr(path, pos++);
+
+  if (curr==0) {
+    while (pos <= item_size) {
+      next = iitem_get_blocknr(path, pos++);
+      if (next != 0) break;
+      amount++;
     }
-    
-    //
-    bh = get_last_bh (&path);
-    ih = get_ih (&path);
-    if (is_indirect_le_ih (ih)) {
-	__u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+    return amount;
+  }
+
+  while (pos <= item_size) {
+    next = iitem_get_blocknr(path, pos++);
+    if ((next - curr) != 1) break;
+    curr = next;
+    amount++;
+  }
+
+  return amount;
+}
+
+/*
+** Return "1" if fs changed and item moved. 
+*/
+static inline int need_research (int fs_gen, struct super_block * sb,  
+				 struct item_head * ih, struct path * path ) 
+{
+  return (fs_changed(fs_gen, sb) && item_moved(ih, path));
+}
+
+/* Fill indirect item cache.
+** Put N block numbers from current indirect item.
+*/
+static inline void iicache_fill (struct inode * inode, long block, 
+				 struct path * path, struct cpu_key * key)
+{
+  long blocknr=0, blk=block;
+  int  pos=0; 
+  int  amount=0,i=0;
+  long file_size = inode->i_size >> inode->i_blkbits;
+  int asize = (file_size > 1012)?(file_size/1012+8):(file_size/8 + 1);
+  int iic_size = (sizeof(struct iicache)) * asize;
+  struct super_block * sb = inode->i_sb;
+  struct item_head * ih = get_ih (path);
+
+  if (inode->u.reiserfs_i.iic==NULL) {
+    inode->u.reiserfs_i.iic = (struct iicache *)kmalloc(iic_size, GFP_NOFS);
+    if (inode->u.reiserfs_i.iic==NULL) {
+      return;
+    }
+    iicache_set_asize(inode, asize);
+  }
+  iicache_clear(inode);
+  
+  for (i=0; i<iicache_get_asize(inode); i++) {
+
+    amount  = iitem_amount_contiguous (path, pos);     
+    blocknr = iitem_get_blocknr (path, pos);
+
+    if ((amount>0) && (amount<=1012)) {       
+      iicache_set (inode,  amount, IICACHE_SIZE,   i);
+      iicache_set (inode,     blk, IICACHE_BLOCK,  i);    
+      iicache_set (inode, blocknr, IICACHE_BLOCKNR,i);
+    } else {
+      iicache_set (inode,  0, IICACHE_SIZE,   i);
+      iicache_set (inode,  0, IICACHE_BLOCK,  i);    
+      iicache_set (inode,  0, IICACHE_BLOCKNR,i);    
+    }
+
+    pos += amount;
+    blk += amount;
+
+    if (pos <= last_pos_of_iitem(path, pos)) continue;
+ 
+    if((blk * sb->s_blocksize) < inode->i_size) {
+      if ((i+1) < iicache_get_asize(inode)) {
+ 	set_cpu_key_k_offset (key, cpu_key_k_offset(key) + pos * sb->s_blocksize);	
+
+	if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) {	  	   
+	  break;
+	}	        
+
+        ih = get_ih (path);        	
+	if (!is_indirect_le_ih(ih) || 
+	    (le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size) {
+	  break ;
+	}
+	pos=0; amount=0;
 	
-	/* FIXME: here we could cache indirect item or part of it in
-	   the inode to avoid search_by_key in case of subsequent
-	   access to file */
-	blocknr = get_block_num(ind_item, path.pos_in_item) ;
-	ret = 0 ;
-	if (blocknr) {
-	    bh_result->b_dev = inode->i_dev;
-	    bh_result->b_blocknr = blocknr;
-	    bh_result->b_state |= (1UL << BH_Mapped);
-	} else
-	    // We do not return -ENOENT if there is a hole but page is uptodate, because it means
-	    // That there is some MMAPED data associated with it that is yet to be written to disk.
-	    if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
-		ret = -ENOENT ;
-	    }
+      } 
+    }
+  }
+
+  if (i < iicache_get_asize(inode)) {
+    iicache_clear_from_pos(inode, i);
+  }
+
+}
+
+/*
+** Truncate indirect item cache. 
+*/
+static inline void iicache_truncate (struct  inode * inode)
+{
+  long new_file_end = inode->i_size >> inode->i_blkbits;  
+  long last_cached, truncate_size, ii_size=0, n=0;
+  int i=0;
+
+  if (inode->u.reiserfs_i.iic==NULL) return;
+
+  if (iicache_size(inode,0)) {
+    if (new_file_end <= iicache_first_cached(inode,0)) {
+      iicache_clear(inode);
+      return;
+    }
+    if ((n=block_is_iicached(inode, new_file_end))) {
+      last_cached = iicache_last_cached(inode, n-1); 
+
+      if (iicache_size(inode,n) && (new_file_end <= last_cached)) {
+	truncate_size = last_cached - new_file_end + 1;
+	ii_size = iicache_get (inode, IICACHE_SIZE, n-1);
+	ii_size -= truncate_size;
+	iicache_set (inode, ii_size, IICACHE_SIZE, n-1);
+	i=n;
+	while(i<iicache_get_asize(inode)) {
+	  iicache_set (inode, 0, IICACHE_SIZE, i++);
+	}
+      }
 
-	pathrelse (&path);
-        if (p)
-            kunmap(bh_result->b_page) ;
-	return ret ;
     }
+  }
+}		   
+
+
+/*
+** Helper function for _get_block_create_0
+*/
+static inline int iitem_map_indirect_block (struct path * path, struct inode * inode, 
+					    long block, struct buffer_head * bh_result, 
+					    int args, struct cpu_key * key)
+{
+  struct buffer_head * bh = get_last_bh (path);
+  struct item_head   * ih = get_ih (path);
+  __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+  int blocknr= get_block_num(ind_item, path->pos_in_item) ; 
+ 
+  // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+  // That there is some MMAPED data associated with it that is yet to be written to disk.
+  if (!blocknr && (args & GET_BLOCK_NO_HOLE)&& !Page_Uptodate(bh_result->b_page)) {
+    return -ENOENT ;
+  }
+
+  // map the found block
+  set_block_dev_mapped (bh_result, blocknr, inode);
+
+  return 0;
+}
+
+
+
+/*
+** Helper function for _get_block_create_0
+*/
+static inline void path_relse_page_unmap (struct path * path, char * p, 
+					  struct page * page) {
+  pathrelse(path);
+  if (p) 
+    kunmap(page);
+}
+
+/*
+** Handle Indirect Item case and simple direct case.
+** "gbc0" stands for "get_block_create_0"  
+*/
+static inline int gbc0_indirect_case (char * p, struct path * path, 
+				      struct inode *inode, long block, 
+				      struct buffer_head * bh_result, 
+				      int args, struct cpu_key * key) 
+{
+  struct super_block * sb = inode->i_sb;
+  struct page * page = bh_result->b_page;
+  struct item_head * ih = get_ih (path);
+  int ret=0;
+
+  // requested data are in indirect item(s)
+  if (is_indirect_le_ih (ih)) {
+
+    ret = iitem_map_indirect_block (path, inode, block, bh_result, args, key);
+    if (ret<0) {
+      path_relse_page_unmap (path, p, page);
+      return ret;
+    }
+
+    if (p) 
+      kunmap(page);
+
+    /*
+    ** Here we fill indirect item cache or part of it 
+    ** in the inode to avoid search_by_key in case of 
+    ** subsequent access to file.   
+    */
+    // if "iicache" mount option is used 
+    if (reiserfs_iicache(sb)) {	
+      iicache_fill (inode, block, path, key);
+    }	
+    pathrelse(path);
+    //path_relse_page_unmap (path, p, page);
+    return 0 ; 
+  }
+
+  return 1;
+}
+
+/*
+** Direct Item case start.
+** "gbc0" stands for "get_block_create_0"  
+*/
+static inline int gbc0_direct_case_start (char * p, struct path * path, 
+					  struct inode *inode, 
+					  struct buffer_head * bh_result, 
+					  int args) 
+{
+  struct page * page = bh_result->b_page;
 
     // requested data are in direct item(s)
     if (!(args & GET_BLOCK_READ_DIRECT)) {
-	// we are called by bmap. FIXME: we can not map block of file
-	// when it is stored in direct item(s)
-	pathrelse (&path);	
-        if (p)
-            kunmap(bh_result->b_page) ;
-	return -ENOENT;
+      // we are called by bmap. FIXME: we can not map block of file
+      // when it is stored in direct item(s)
+      path_relse_page_unmap (path, p, page);
+      return -ENOENT;
     }
 
     /* if we've got a direct item, and the buffer was uptodate,
@@ -324,90 +530,202 @@
     ** end, where we map the buffer and return
     */
     if (buffer_uptodate(bh_result)) {
-        goto finished ;
-    } else 
-	/*
-	** grab_tail_page can trigger calls to reiserfs_get_block on up to date
-	** pages without any buffers.  If the page is up to date, we don't want
-	** read old data off disk.  Set the up to date bit on the buffer instead
-	** and jump to the end
-	*/
-	    if (Page_Uptodate(bh_result->b_page)) {
-		mark_buffer_uptodate(bh_result, 1);
-		goto finished ;
+      set_block_dev_mapped (bh_result, 0, inode);
+      path_relse_page_unmap (path, p, page);
+      return 0;
+    } else {
+      /*
+      ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
+      ** pages without any buffers.  If the page is up to date, we don't want
+      ** read old data off disk.  Set the up to date bit on the buffer instead
+      ** and jump to the end
+      */
+      if (Page_Uptodate(bh_result->b_page)) {
+	mark_buffer_uptodate(bh_result, 1);
+	set_block_dev_mapped (bh_result, 0, inode);
+	path_relse_page_unmap (path, p, page);
+	return 0;
+      }
     }
+    return 1;
+}
 
-    // read file tail into part of page
-    offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
-    fs_gen = get_generation(inode->i_sb) ;
-    copy_item_head (&tmp_ih, ih);
-
-    /* we only want to kmap if we are reading the tail into the page.
-    ** this is not the common case, so we don't kmap until we are
-    ** sure we need to.  But, this means the item might move if
-    ** kmap schedules
+/*
+** Handle Direct Item case.
+** "gbc0" stands for "get_block_create_0"  
+*/
+static inline void gbc0_direct_case (char * p, struct path * path, 
+				     struct inode *inode, 
+				     struct cpu_key * key)			
+{
+  struct buffer_head * bh;
+  struct super_block * sb = inode->i_sb;
+  struct item_head * ih = get_ih (path);
+  int chars=0, done=0;
+
+  do {
+    if (!is_direct_le_ih (ih)) {
+      BUG ();
+    }
+    /* make sure we don't read more bytes than actually exist in
+    ** the file.  This can happen in odd cases where i_size isn't
+    ** correct, and when direct item padding results in a few 
+    ** extra bytes at the end of the direct item
     */
-    if (!p) {
-    p = (char *)kmap(bh_result->b_page) ;
-    if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
-        goto research;
-    }
+    if ((le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size)
+      break ;
+
+    if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
+      chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path->pos_in_item;
+      done = 1 ;
+    } else {
+      chars = ih_item_len(ih) - path->pos_in_item;
     }
-    p += offset ;
-    memset (p, 0, inode->i_sb->s_blocksize);
-    do {
-	if (!is_direct_le_ih (ih)) {
-	    BUG ();
-        }
-	/* make sure we don't read more bytes than actually exist in
-	** the file.  This can happen in odd cases where i_size isn't
-	** correct, and when direct item padding results in a few 
-	** extra bytes at the end of the direct item
-	*/
-        if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
-	    break ;
-	if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
-	    chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path.pos_in_item;
-	    done = 1 ;
-	} else {
-	    chars = ih_item_len(ih) - path.pos_in_item;
-	}
-	memcpy (p, B_I_PITEM (bh, ih) + path.pos_in_item, chars);
 
-	if (done) 
-	    break ;
+    bh = get_last_bh (path);
+    memcpy (p, B_I_PITEM (bh, ih) + path->pos_in_item, chars);
 
-	p += chars;
+    if (done) 
+      break ;
 
-	if (PATH_LAST_POSITION (&path) != (B_NR_ITEMS (bh) - 1))
-	    // we done, if read direct item is not the last item of
-	    // node FIXME: we could try to check right delimiting key
-	    // to see whether direct item continues in the right
-	    // neighbor or rely on i_size
-	    break;
+    p += chars;
 
-	// update key to look for the next piece
-	set_cpu_key_k_offset (&key, cpu_key_k_offset (&key) + chars);
-	if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND)
-	    // we read something from tail, even if now we got IO_ERROR
-	    break;
-	bh = get_last_bh (&path);
-	ih = get_ih (&path);
-    } while (1);
+    if (PATH_LAST_POSITION (path) != (B_NR_ITEMS (bh) - 1))
+      // we done, if read direct item is not the last item of
+      // node FIXME: we could try to check right delimiting key
+      // to see whether direct item continues in the right
+      // neighbor or rely on i_size
+      break;
 
-    flush_dcache_page(bh_result->b_page) ;
-    kunmap(bh_result->b_page) ;
+    // update key to look for the next piece
+    set_cpu_key_k_offset (key, cpu_key_k_offset(key) + chars);
 
-finished:
-    pathrelse (&path);
-    bh_result->b_blocknr = 0 ;
-    bh_result->b_dev = inode->i_dev;
+    if (search_for_position_by_key (sb, key, path) != POSITION_FOUND)
+      // we read something from tail, even if now we got IO_ERROR
+      break;
+
+    bh = get_last_bh (path);
+    ih = get_ih (path);
+
+  } while (1);
+
+}
+
+
+/*
+** Helper function for _get_block_create_0
+** Check iicache. 
+** If needed block is in iicache we map it and return "1". 
+*/
+static int check_iicache (struct inode * inode, long block,
+			  struct buffer_head * bh_result)
+{
+  struct super_block * sb = inode->i_sb;
+  int n=0, block_nr=0;
+
+  /* 
+  ** Here we use the cache of indirect item.
+  ** Getting the unfm_block number from the cache   
+  ** we are trying to avoid some of the search_by_key() calls.  
+  */
+
+  // if "iicache" mount option is used 
+  if (reiserfs_iicache(sb)) {
+    if (inode->u.reiserfs_i.iic==NULL) {
+      return 0;
+    }
+    // Check iicache and get the iicache array number + 1 ,
+    // where the needed block_nr corresponded given logical block 
+    // could be found.
+    n = block_is_iicached(inode, block);
+
+    // if the iicache is not empty for this file and
+    // the requested logical block of file is cached 
+    // then we return corresponded block number. 
+    if (n>0) {
+      block_nr = iicache_get_blocknr_by_block(inode, block, n-1);
+      if ((block_nr >= 0)) {               
+	set_block_dev_mapped (bh_result, block_nr, inode);
+	return 1; 
+      }   
+    } 
+
+  }
+  return 0;
+}
+
+//
+// It is called by reiserfs_get_block when create == 0. 
+// Returns disk block number by logical block number of file.
+//
+// When it hits direct item it returns 0 (being called from bmap) 
+// or read direct item into piece of page (bh_result)
+//
+static int _get_block_create_0 (struct inode * inode, long block,
+				struct buffer_head * bh_result,
+				int args)
+{
+    INITIALIZE_PATH (path);
+    struct cpu_key key;
+    struct item_head   * ih, tmp_ih;
+    struct super_block * sb = inode->i_sb;
+    struct page * page = bh_result->b_page;
+    char * p = NULL;   
+    unsigned long offset ;  
+    int fs_gen=0, ret=0, block_iicached=0;
+
+
+    block_iicached = check_iicache (inode, block, bh_result);
+    if (block_iicached) {
+      return 0;
+    }
+
+    // prepare the key to look for the 'block'-th block of file
+    offset = block * sb->s_blocksize + 1;
+    make_cpu_key (&key, inode, (loff_t)offset, TYPE_ANY, 3);
+
+    do {
+      
+      if (search_for_position_by_key (sb, &key, &path) != POSITION_FOUND) {
+	path_relse_page_unmap (&path, p, page);
+	// We do not return -ENOENT if there is a hole but page is uptodate, because it means
+	// That there is some MMAPED data associated with it that is yet to be written to disk.
+	return (((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page)) ? (-ENOENT) : 0 ) ;
+      }
+
+      // check and handle indirect case 
+      ret = gbc0_indirect_case (p, &path, inode, block, bh_result, args, &key);
+      if (ret <= 0) 
+	return ret;
+
+      // start the direct case 
+      ret = gbc0_direct_case_start (p, &path, inode, bh_result, args);
+      if (ret <= 0) 
+	return ret;
+
+      // we should read the file tail into part of page.
+      offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
+      fs_gen = get_generation(sb) ;
+      ih = get_ih (&path);
+      copy_item_head (&tmp_ih, ih);
+      if (!p) 
+	p=(char *)kmap(page);
+      
+    } while (need_research(fs_gen, sb, &tmp_ih, &path));
+
+    // ok, we have direct item and kmapped page,
+    // do copy from direct item to page now. 
+    p += offset;
+    memset (p, 0, sb->s_blocksize);
+    gbc0_direct_case (p, &path, inode, &key);
+
+    flush_dcache_page(page) ;
+    path_relse_page_unmap (&path, p, page);
+    set_block_dev_mapped (bh_result, 0, inode);
     mark_buffer_uptodate (bh_result, 1);
-    bh_result->b_state |= (1UL << BH_Mapped);
     return 0;
 }
 
-
 // this is called to create file map. So, _get_block_create_0 will not
 // read direct item
 int reiserfs_bmap (struct inode * inode, long block,
@@ -560,10 +878,13 @@
     struct cpu_key key;
     struct buffer_head * bh, * unbh = 0;
     struct item_head * ih, tmp_ih;
+    struct super_block * sb = inode->i_sb;
     __u32 * item;
     int done;
     int fs_gen;
     int windex ;
+    int block_iicached=0;
+
     struct reiserfs_transaction_handle th ;
     /* space reserved in transaction batch: 
         . 3 balancings in direct->indirect conversion
@@ -590,6 +911,7 @@
 	return -EFBIG;
     }
 
+
     /* if !create, we aren't changing the FS, so we don't need to
     ** log anything, so we don't need to start a transaction
     */
@@ -683,14 +1005,14 @@
 	    inode->i_blocks += (inode->i_sb->s_blocksize / 512) ;
 	    reiserfs_update_sd(&th, inode) ;
 	}
+
 	set_block_dev_mapped(bh_result, unfm_ptr, inode);
 	pathrelse (&path);
 	pop_journal_writer(windex) ;
 	if (transaction_started)
 	    journal_end(&th, inode->i_sb, jbegin_count) ;
-
 	unlock_kernel() ;
-	 
+		 
 	/* the item was found, so new blocks were not added to the file
 	** there is no need to make sure the inode is updated with this 
 	** transaction
@@ -921,6 +1243,10 @@
 
     INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ;
 
+    if (reiserfs_iicache(inode->i_sb)) {
+      iicache_spin_lock_init (inode);
+    }
+
     if (stat_data_v1 (ih)) {
 	struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih);
 	unsigned long blocks;
@@ -1531,6 +1857,8 @@
     /* item head of new item */
     ih.ih_key.k_dir_id = INODE_PKEY (dir)->k_objectid;
     ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th));
+
+
     if (!ih.ih_key.k_objectid) {
 	iput(inode) ;
 	*err = -ENOMEM;
@@ -1597,6 +1925,10 @@
 
     INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ;
 
+    if (reiserfs_iicache(inode->i_sb)) {
+      iicache_spin_lock_init (inode);
+    }
+
     if (old_format_only (sb)) {
 	if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
 	    pathrelse (&path_to_key);
@@ -1757,6 +2089,7 @@
 */
 void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
     struct reiserfs_transaction_handle th ;
+    struct super_block * sb = p_s_inode->i_sb;
     int windex ;
 
     /* we want the offset for the first byte after the end of the file */
@@ -1792,6 +2125,15 @@
     journal_begin(&th, p_s_inode->i_sb,  JOURNAL_PER_BALANCE_CNT * 2 + 1 ) ;
     reiserfs_update_inode_transaction(p_s_inode) ;
     windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
+
+/*********
+    if (reiserfs_iicache(sb)) {
+      iicache_spin_lock(p_s_inode);
+      iicache_truncate (p_s_inode); 
+      iicache_spin_unlock(p_s_inode);
+    }
+***********/
+
     if (update_timestamps)
            /* we are doing real truncate: if the system crashes before the last
               transaction of truncating gets committed - on reboot the file
diff -urN v2.4.19p5/fs/reiserfs/journal.c linux/fs/reiserfs/journal.c
--- v2.4.19p5/fs/reiserfs/journal.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/journal.c	Fri Apr  5 16:09:57 2002
@@ -1886,7 +1886,7 @@
       break ;
     }
     wake_up(&reiserfs_commit_thread_done) ;
-    interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5 * HZ) ;
+    interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5) ;
   }
   unlock_kernel() ;
   wake_up(&reiserfs_commit_thread_done) ;
diff -urN v2.4.19p5/fs/reiserfs/namei.c linux/fs/reiserfs/namei.c
--- v2.4.19p5/fs/reiserfs/namei.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/namei.c	Fri Apr  5 16:09:57 2002
@@ -309,10 +309,9 @@
 
     while (1) {
 	retval = search_by_entry_key (dir->i_sb, &key_to_search, path_to_entry, de);
-	if (retval == IO_ERROR) {
-	    reiserfs_warning ("zam-7001: io error in " __FUNCTION__ "\n");
-	    return IO_ERROR;
-	}
+	if (retval == IO_ERROR)
+	    // FIXME: still has to be dealt with
+	    reiserfs_panic (dir->i_sb, "zam-7001: io error in " __FUNCTION__ "\n");
 
 	/* compare names for all entries having given hash value */
 	retval = linear_search_in_dir_item (&key_to_search, de, name, namelen);
diff -urN v2.4.19p5/fs/reiserfs/procfs.c linux/fs/reiserfs/procfs.c
--- v2.4.19p5/fs/reiserfs/procfs.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/procfs.c	Fri Apr  5 16:09:57 2002
@@ -149,7 +149,7 @@
 	r = &sb->u.reiserfs_sb;
 	len += sprintf( &buffer[ len ], 
 			"state: \t%s\n"
-			"mount options: \t%s%s%s%s%s%s%s%s%s%s%s%s\n"
+			"mount options: \t%s%s%s%s%s%s%s%s%s%s%s%s%s\n"
 			"gen. counter: \t%i\n"
 			"s_kmallocs: \t%i\n"
 			"s_disk_reads: \t%i\n"
@@ -192,6 +192,7 @@
 			reiserfs_hashed_relocation( sb ) ? "UNHASHED_RELOCATION " : "",
 			reiserfs_test4( sb ) ? "TEST4 " : "",
 			dont_have_tails( sb ) ? "NO_TAILS " : "TAILS ",
+			reiserfs_iicache( sb ) ? "IICACHE " : "NO_IICACHE ",
 			replay_only( sb ) ? "REPLAY_ONLY " : "",
 			reiserfs_dont_log( sb ) ? "DONT_LOG " : "LOG ",
 			convert_reiserfs( sb ) ? "CONV " : "",
diff -urN v2.4.19p5/fs/reiserfs/stree.c linux/fs/reiserfs/stree.c
--- v2.4.19p5/fs/reiserfs/stree.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/stree.c	Fri Apr  5 16:09:57 2002
@@ -1031,7 +1031,6 @@
 	char                  c_mode;           /* Returned mode of the balance. */
 	int need_research;
 
-
 	n_blk_size = p_s_sb->s_blocksize;
 
 	/* Search for the needed object indirect item until there are no unformatted nodes to be removed. */
diff -urN v2.4.19p5/fs/reiserfs/super.c linux/fs/reiserfs/super.c
--- v2.4.19p5/fs/reiserfs/super.c	Mon Apr  1 13:28:06 2002
+++ linux/fs/reiserfs/super.c	Fri Apr  5 16:09:57 2002
@@ -455,6 +455,8 @@
 	    set_bit (REISERFS_HASHED_RELOCATION, mount_options);
 	} else if (!strcmp (this_char, "test4")) {
 	    set_bit (REISERFS_TEST4, mount_options);
+	} else if (!strcmp (this_char, "iicache")) {
+	    set_bit (REISERFS_IICACHE, mount_options);
 	} else if (!strcmp (this_char, "nolog")) {
 	    reiserfs_warning("reiserfs: nolog mount option not supported yet\n");
 	} else if (!strcmp (this_char, "replayonly")) {
@@ -549,6 +551,7 @@
 
   /* set options in the super-block bitmask */
   SET_OPT( NOTAIL, mount_options, s );
+  SET_OPT( REISERFS_IICACHE, mount_options, s );
   SET_OPT( REISERFS_NO_BORDER, mount_options, s );
   SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
   SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
@@ -557,7 +560,7 @@
 #undef SET_OPT
 
   handle_attrs( s );
-
+ 
   if(blocks) {
       int rc = reiserfs_resize(s, blocks);
       if (rc != 0)
diff -urN v2.4.19p5/include/linux/reiserfs_fs.h linux/include/linux/reiserfs_fs.h
--- v2.4.19p5/include/linux/reiserfs_fs.h	Mon Apr  1 13:28:22 2002
+++ linux/include/linux/reiserfs_fs.h	Fri Apr  5 16:10:14 2002
@@ -197,7 +197,6 @@
      ( (n_tail_size) >=   (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
 )
 
-
 /*
  * values for s_state field
  */
@@ -1731,6 +1730,218 @@
 				   struct dentry *dentry, struct inode *inode, int * err);
 int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode);
 void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode);
+
+/*
+** The indirect item cache - iicache.
+** 
+** We put the indirect item or part of it to iicache and  
+** can avoid now a lot of search_by_key calls.  
+*/
+
+#define IICACHE_BLOCKNR 1
+#define IICACHE_SIZE    2
+#define IICACHE_BLOCK   3
+
+/*
+** Get current iicache array size.
+*/
+static inline int iicache_get_asize (struct inode * inode)
+{
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+  return (inode->u.reiserfs_i.iic_asize);   
+} 
+
+/*
+** Set current iicache array size.
+*/
+static inline void iicache_set_asize (struct inode * inode, int asize)
+{
+  if (inode->u.reiserfs_i.iic==NULL) return;
+  inode->u.reiserfs_i.iic_asize = asize;   
+} 
+
+/* 
+** Set parameter of given type to iicache  
+*/
+static inline void iicache_set (struct inode * inode, 
+				long param, int type, int i)
+{
+  struct iicache * iic;
+  
+  if (inode->u.reiserfs_i.iic==NULL) return;
+
+  iic = inode->u.reiserfs_i.iic;
+
+  if (i >= iicache_get_asize(inode)) return;
+
+  iic += i;
+
+  switch (type) {
+  case IICACHE_BLOCKNR : iic->i_cache_blocknr = param; 
+                         break;
+  case IICACHE_SIZE    : iic->i_cache_size    = param; 
+                         break;
+  case IICACHE_BLOCK   : iic->i_cache_block   = param; 
+                         break;
+  }
+}
+
+/* 
+** Get parameter of given type from iicache  
+*/
+static inline long iicache_get (struct inode * inode, int type, int i)
+{
+  long val=0;
+  struct iicache * iic;
+
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+
+  iic = inode->u.reiserfs_i.iic;
+  
+  if (i >= iicache_get_asize(inode)) return 0;
+ 
+  iic += i;
+
+  switch (type) {
+  case IICACHE_BLOCKNR : val=iic->i_cache_blocknr;
+                         break; 
+  case IICACHE_SIZE    : val=iic->i_cache_size;
+                         break;  
+  case IICACHE_BLOCK   : val=iic->i_cache_block;
+                         break;  
+  }
+  return val;
+}
+
+/* 
+** Clear the indirect item cache 
+*/
+static inline void iicache_clear(struct inode * inode)
+{
+  int i;
+
+  if (inode->u.reiserfs_i.iic==NULL) return;
+
+  for (i=0; i<iicache_get_asize(inode); i++) {
+    iicache_set (inode, 0, IICACHE_SIZE, i);
+    iicache_set (inode, 0, IICACHE_BLOCK, i);
+    iicache_set (inode, 0, IICACHE_BLOCKNR, i);
+  }
+
+}
+
+/* 
+** Print the indirect item cache 
+*/
+static inline void iicache_print(struct inode * inode)
+{
+  int i;
+
+  if (inode->u.reiserfs_i.iic==NULL) return;
+
+  for (i=0; i<iicache_get_asize(inode); i++) {
+    printk("  (%i),s=%li,", i, iicache_get (inode, IICACHE_SIZE, i));
+    printk("b=%li,",  iicache_get (inode, IICACHE_BLOCK, i));
+    printk("d=%li\n", iicache_get (inode, IICACHE_BLOCKNR, i));
+  }
+}
+
+/* 
+** Clear the indirect item cache from given pos 
+*/
+static inline void iicache_clear_from_pos(struct inode * inode, int pos)
+{
+  int i;
+
+  if (inode->u.reiserfs_i.iic==NULL) return;
+
+  for (i=pos; i<iicache_get_asize(inode); i++) {
+    iicache_set (inode, 0, IICACHE_SIZE, i);
+    iicache_set (inode, 0, IICACHE_BLOCK, i);
+    iicache_set (inode, 0, IICACHE_BLOCKNR, i);
+  }
+}
+
+/* 
+** Get the first blocknr of the set of contiguous blocknrs
+*/
+static inline long iicache_get_blocknr(struct inode * inode, int i) 
+{
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+  return (iicache_get(inode, IICACHE_BLOCKNR, i));
+}
+
+/* 
+** Get the size of indirect item cache  
+*/
+static inline long iicache_size(struct inode * inode, int i)
+{
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+  return (iicache_get(inode, IICACHE_SIZE, i));
+}
+
+/* 
+** Get the first cached logical block of file 
+*/
+static inline long iicache_first_cached(struct inode * inode, int i)
+{
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+  return (iicache_get(inode, IICACHE_BLOCK, i));
+}
+
+/* 
+** Get the last cached logical block of file 
+*/
+static inline long iicache_last_cached(struct inode * inode, int i)
+{
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+  return (iicache_first_cached(inode,i) + iicache_size(inode,i) - 1);
+}
+
+/* 
+** Check the logical block of file: is it in iicache 
+*/
+static inline int block_is_iicached(struct inode * inode, long block)
+{
+  int i;
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+
+  for (i=0; i<iicache_get_asize(inode); i++) {
+    if ( (block >= iicache_first_cached(inode, i)) && 
+	 (block <= iicache_last_cached(inode, i)) )
+      return i+1;
+  } 
+  return 0;
+}
+
+/* 
+** Get the disk block number by the logical block number of file 
+*/
+static inline long iicache_get_blocknr_by_block(struct inode * inode, long block, int i)
+{    
+  long offset=0, block_nr=0;
+  if (inode->u.reiserfs_i.iic==NULL) return 0;
+ 
+  offset   = block - iicache_first_cached(inode,i);
+  block_nr = iicache_get_blocknr(inode,i);
+  
+  return ((block_nr==0) ? 0 : (block_nr + offset)); 
+}
+
+static inline void iicache_spin_lock_init(struct inode * inode) 
+{
+  inode->u.reiserfs_i.i_cache_lock = SPIN_LOCK_UNLOCKED; 
+}
+
+static inline void iicache_spin_lock(struct inode * inode) 
+{
+  spin_lock ( &(inode->u.reiserfs_i.i_cache_lock) );
+}
+
+static inline void iicache_spin_unlock(struct inode * inode) 
+{
+  spin_unlock ( &(inode->u.reiserfs_i.i_cache_lock) );
+}
 
 void sd_attrs_to_i_attrs( __u16 sd_attrs, struct inode *inode );
 void i_attrs_to_sd_attrs( struct inode *inode, __u16 *sd_attrs );
diff -urN v2.4.19p5/include/linux/reiserfs_fs_i.h linux/include/linux/reiserfs_fs_i.h
--- v2.4.19p5/include/linux/reiserfs_fs_i.h	Mon Apr  1 13:28:22 2002
+++ linux/include/linux/reiserfs_fs_i.h	Fri Apr  5 16:10:17 2002
@@ -2,6 +2,13 @@
 #define _REISER_FS_I
 
 #include <linux/list.h>
+ 
+// The cache for indirect item  (iicache).
+struct iicache {
+    long i_cache_blocknr;        /* the first of set of contiguous blocknrs */
+    long i_cache_size   ;        /* the amount of set of contiguous blocknrs */      
+    long i_cache_block  ;        /* the first, cached logical block of file  */
+};
 
 /** bitmasks for i_flags field in reiserfs-specific part of inode */
 typedef enum {
@@ -46,6 +53,11 @@
     ** flushed */
     unsigned long i_trans_id ;
     unsigned long i_trans_index ;
+
+    // The cache for indirect item  (iicache).
+    struct iicache * iic;
+    int        iic_asize;                    /* iicache array size */
+    spinlock_t i_cache_lock;                 /* spimlock to protect iicache changing     */
 };
 
 #endif
diff -urN v2.4.19p5/include/linux/reiserfs_fs_sb.h linux/include/linux/reiserfs_fs_sb.h
--- v2.4.19p5/include/linux/reiserfs_fs_sb.h	Mon Apr  1 13:28:22 2002
+++ linux/include/linux/reiserfs_fs_sb.h	Fri Apr  5 16:10:22 2002
@@ -482,6 +482,7 @@
 #define REISERFS_NO_UNHASHED_RELOCATION 12
 #define REISERFS_HASHED_RELOCATION 13
 #define REISERFS_TEST4 14 
+#define REISERFS_IICACHE 17 
 
 #define REISERFS_TEST1 11
 #define REISERFS_TEST2 12
@@ -498,6 +499,8 @@
 #define reiserfs_no_unhashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION))
 #define reiserfs_hashed_relocation(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_HASHED_RELOCATION))
 #define reiserfs_test4(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_TEST4))
+
+#define reiserfs_iicache(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_IICACHE))
 
 #define dont_have_tails(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << NOTAIL))
 #define replay_only(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REPLAYONLY))
