Tweak some of rmap.c's parameters for a (theoretical) benefit.

 rmap.c |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)


diff -urpN wli-2.5.51-bk1-13/mm/rmap.c wli-2.5.51-bk1-14/mm/rmap.c
--- wli-2.5.51-bk1-13/mm/rmap.c	2002-12-09 18:46:22.000000000 -0800
+++ wli-2.5.51-bk1-14/mm/rmap.c	2002-12-13 11:06:17.000000000 -0800
@@ -42,9 +42,11 @@
  * it belongs to and the offset within that process.
  *
  * We use an array of pte pointers in this structure to minimise cache misses
- * while traversing reverse maps.
+ * while traversing reverse maps. The length distribution for pte_chains
+ * (in terms of PTE's) has a mean around 6 and is long-tailed.
  */
-#define NRPTE ((L1_CACHE_BYTES - sizeof(void *))/sizeof(pte_addr_t))
+#define ptemax(m,n) ((m) < (n) ? (n) : (m))
+#define NRPTE ptemax(7,(L1_CACHE_BYTES - sizeof(void *))/sizeof(pte_addr_t))
 
 struct pte_chain {
 	struct pte_chain *next;
@@ -527,7 +529,7 @@ void __init pte_chain_init(void)
 	pte_chain_cache = kmem_cache_create(	"pte_chain",
 						sizeof(struct pte_chain),
 						0,
-						0,
+						SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
 						pte_chain_ctor,
 						NULL);