diff -Nru linux/fs/buffer.c linux-2.4.19-pre5-mjc/fs/buffer.c
--- linux/fs/buffer.c	Sat Apr  6 08:28:22 2002
+++ linux-2.4.19-pre5-mjc/fs/buffer.c	Sat Apr  6 09:20:34 2002
@@ -473,13 +473,25 @@
 	return ret;
 }
 
-/* After several hours of tedious analysis, the following hash
- * function won.  Do not mess with it... -DaveM
+/*
+ * The shift/add buffer cache hash function from Chuck Lever's paper.
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ * page 6 describes the behavior of various buffer cache hashes.
+ *
+ * The lack of an attempt to mix the bits of dev in this hash
+ * function appears disturbing to me, but I don't have the
+ * resources to investigate the value of attempting to do so.
+ *
+ * Changed to Lever's multiplicative hash function.
+ * -- wli
  */
-#define _hashfn(dev,block)	\
-	((((dev)<<(bh_hash_shift - 6)) ^ ((dev)<<(bh_hash_shift - 9))) ^ \
-	 (((block)<<(bh_hash_shift - 6)) ^ ((block) >> 13) ^ \
-	  ((block) << (bh_hash_shift - 12))))
+
+static inline unsigned long _hashfn(unsigned long dev, unsigned long block)
+{
+	return ((dev + block) * 2654435761UL)
+			>> (BITS_PER_LONG - bh_hash_shift);
+}
+	
 #define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)]
 
 static inline void __insert_into_hash_list(struct buffer_head *bh)
diff -Nru linux/fs/inode.c linux-2.4.19-pre5-mjc/fs/inode.c
--- linux/fs/inode.c	Sat Apr  6 09:11:20 2002
+++ linux-2.4.19-pre5-mjc/fs/inode.c	Sat Apr  6 09:20:34 2002
@@ -901,14 +901,30 @@
 	return inode;
 }
 
+/*
+ * The properties have changed from Lever's paper. This is
+ * the multiplicative page cache hash function from Chuck Lever's paper,
+ * adapted to the inode hash table.
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ * iput() appears to be showing up in profiles, So I put what appears to
+ * be a theoretically sounder hash function here.
+ *
+ * Heavy testing by Anton Blanchard and Rusty Russell has verified that
+ * this inode cache hash function distributes keys well under heavy stress.
+ *
+ * Anton, Rusty, please insert a comment here describing the nature of the
+ * results of the testing.
+ *
+ * -- wli
+ */
 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
 {
-	unsigned long tmp = i_ino + ((unsigned long) sb / L1_CACHE_BYTES);
-	tmp = tmp + (tmp >> I_HASHBITS);
-	return tmp & I_HASHMASK;
-}
+	unsigned long hashval = i_ino + (unsigned long) sb;
+
+	hashval = (hashval * 2654435761UL) >> (BITS_PER_LONG - I_HASHBITS);
 
-/* Yeah, I know about quadratic hash. Maybe, later. */
+	return hashval & I_HASHMASK;
+}
 
 /**
  *	iunique - get a unique inode number
diff -Nru linux/include/linux/dcache.h linux-2.4.19-pre5-mjc/include/linux/dcache.h
--- linux/include/linux/dcache.h	Sat Apr  6 08:40:34 2002
+++ linux-2.4.19-pre5-mjc/include/linux/dcache.h	Sat Apr  6 09:20:34 2002
@@ -36,17 +36,58 @@
 };
 extern struct dentry_stat_t dentry_stat;
 
-/* Name hashing routines. Initial hash value */
-/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
-#define init_name_hash()		0
+/*
+ * Fowler, Noll, & Vo hash function
+ * -- wli
+ */
 
-/* partial hash update function. Assume roughly 4 bits per character */
-static __inline__ unsigned long partial_name_hash(unsigned long c, unsigned long prevhash)
+/*
+ * Initial hash value for Fowler, Noll, & Vo hash function.
+ * FreeBSD appears to use 33554467UL decimal / 0x2000023UL hex.
+ * Sources I see elsewhere (Noll's webpage) describe using an offset
+ * basis of 2166136261UL decimal / 0x811C9DC5UL hex.
+ * -- wli
+ */
+#define init_name_hash()		0x811C9DC5UL
+
+/*
+ * This is a multiplicative hash function using the prime 16777619
+ * The Fowler, Noll, and Vo hash function is rated the best in
+ * string hashing benchmarks published on gcc-patches and NetBSD
+ * mailing lists.
+ * -- wli
+ */
+static __inline__ unsigned long partial_name_hash(unsigned long c,
+						unsigned long prevhash)
 {
-	return (prevhash + (c << 4) + (c >> 4)) * 11;
+	/*
+	 * A multiplicative definition would be:
+	 * --wli
+	 */
+	 return (prevhash * 0x01000193UL) ^ c;
+
+	/*
+	 * If I were to get overcomplicated, I would decode things
+	 * for each bit of 0x01000193UL and then expand to the shift
+	 * and add operations explicitly in order to avoid reliance on
+	 * the compiler for this.
+	 * The register pressure generated by this may not be a win
+	 * on i386 vs. actual multiplication, but results remain
+	 * to be seen.
+	 *
+	 * prevhash +=	  (prevhash << 24)
+	 *		+ (prevhash << 8)
+	 *		+ (prevhash << 7)
+	 *		+ (prevhash << 4)
+	 *		+ (prevhash << 1);
+	 * return prevhash ^ c;
+	 */
 }
 
-/* Finally: cut down the number of bits to a int value (and try to avoid losing bits) */
+/*
+ * Finally: cut down the number of bits to a int value (and try to
+ * avoid losing bits)
+ */
 static __inline__ unsigned long end_name_hash(unsigned long hash)
 {
 	return (unsigned int) hash;
diff -Nru linux/kernel/user.c linux-2.4.19-pre5-mjc/kernel/user.c
--- linux/kernel/user.c	Wed Nov 29 01:43:39 2000
+++ linux-2.4.19-pre5-mjc/kernel/user.c	Sat Apr  6 09:20:34 2002
@@ -19,7 +19,14 @@
 #define UIDHASH_BITS		8
 #define UIDHASH_SZ		(1 << UIDHASH_BITS)
 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
-#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) ^ uid) & UIDHASH_MASK)
+
+/*
+ * hash function borrowed from Chuck Lever's paper
+ * The effects of this replacement have not been measured.
+ * -- wli
+ */
+#define __uidhashfn(uid) \
+	(((2654435761UL*(uid)) >> (BITS_PER_LONG-UIDHASH_BITS)) & UIDHASH_MASK)
 #define uidhashentry(uid)	(uidhash_table + __uidhashfn(uid))
 
 static kmem_cache_t *uid_cachep;
