
From: Andy Whitcroft <apw@shadowen.org>

ppc64 has constraints on the mixing of small and large pages, such that
they cannot be mixed in the same 256Mb segment.  This patch provides an
architecture specific arch_get_unmapped_area_topdown() which implements
these contraints.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/ppc64/mm/hugetlbpage.c |  102 ++++++++++++++++++++++++++++++++++++
 25-akpm/include/asm-ppc64/pgtable.h |    1 
 2 files changed, 103 insertions(+)

diff -puN arch/ppc64/mm/hugetlbpage.c~ppc64-topdown-support-arch-specific-get_unmapped_area arch/ppc64/mm/hugetlbpage.c
--- 25/arch/ppc64/mm/hugetlbpage.c~ppc64-topdown-support-arch-specific-get_unmapped_area	Wed Sep  1 15:08:39 2004
+++ 25-akpm/arch/ppc64/mm/hugetlbpage.c	Wed Sep  1 15:08:39 2004
@@ -527,6 +527,108 @@ full_search:
 	return -ENOMEM;
 }
 
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ *
+ * Because we have an exclusive hugepage region which lies within the
+ * normal user address space, we have to take special measures to make
+ * non-huge mmap()s evade the hugepage reserved regions.
+ */
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma, *prev_vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long base = mm->mmap_base, addr = addr0;
+	int first_time = 1;
+
+	/* requested length too big for entire address space */
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	/* dont allow allocations above current base */
+	if (mm->free_area_cache > base)
+		mm->free_area_cache = base;
+
+	/* requesting a specific address */
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+				(!vma || addr + len <= vma->vm_start)
+				&& !is_hugepage_only_range(addr,len))
+			return addr;
+	}
+
+try_again:
+	/* make sure it can fit in the remaining address space */
+	if (mm->free_area_cache < len)
+		goto fail;
+
+	/* either no address requested or cant fit in requested address hole */
+	addr = (mm->free_area_cache - len) & PAGE_MASK;
+	do {
+hugepage_recheck:
+		if (touches_hugepage_low_range(addr, len)) {
+			addr = (addr & ((~0) << SID_SHIFT)) - len;
+			goto hugepage_recheck;
+		} else if (touches_hugepage_high_range(addr, len)) {
+			addr = TASK_HPAGE_BASE - len;
+		}
+
+		/*
+		 * Lookup failure means no vma is above this address,
+		 * i.e. return with success:
+		 */
+ 	 	if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+			return addr;
+
+		/*
+		 * new region fits between prev_vma->vm_end and
+		 * vma->vm_start, use it:
+		 */
+		if (addr+len <= vma->vm_start &&
+				(!prev_vma || (addr >= prev_vma->vm_end)))
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr);
+		else
+			/* pull free_area_cache down to the first hole */
+			if (mm->free_area_cache == vma->vm_end)
+				mm->free_area_cache = vma->vm_start;
+
+		/* try just below the current vma->vm_start */
+		addr = vma->vm_start-len;
+	} while (len <= vma->vm_start);
+
+fail:
+	/*
+	 * if hint left us with no space for the requested
+	 * mapping then try again:
+	 */
+	if (first_time) {
+		mm->free_area_cache = base;
+		first_time = 0;
+		goto try_again;
+	}
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	mm->free_area_cache = TASK_UNMAPPED_BASE;
+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+	/*
+	 * Restore the topdown base:
+	 */
+	mm->free_area_cache = base;
+
+	return addr;
+}
+
 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
 {
 	unsigned long addr = 0;
diff -puN include/asm-ppc64/pgtable.h~ppc64-topdown-support-arch-specific-get_unmapped_area include/asm-ppc64/pgtable.h
--- 25/include/asm-ppc64/pgtable.h~ppc64-topdown-support-arch-specific-get_unmapped_area	Wed Sep  1 15:08:39 2004
+++ 25-akpm/include/asm-ppc64/pgtable.h	Wed Sep  1 15:08:39 2004
@@ -167,6 +167,7 @@ int hash_huge_page(struct mm_struct *mm,
 #endif /* __ASSEMBLY__ */
 
 #define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 #else
 
 #define hash_huge_page(mm,a,ea,vsid,local)	-1
_
