View | Details | Raw Unified | Return to bug 64652
Collapse All | Expand All

(-)linux-2.4.21/include/linux/mm.h (-3 / +13 lines)
Lines 722-735 static inline int expand_stack(struct vm Link Here
722
	int err = -ENOMEM;
722
	int err = -ENOMEM;
723
723
724
	/*
724
	/*
725
	 * vma->vm_start/vm_end cannot change under us because the caller is required
725
	 * vma->vm_start/vm_end cannot change under us because the caller
726
	 * to hold the mmap_sem in write mode. We need to get the spinlock only
726
	 * is required to hold the mmap_sem in read mode.  We need the
727
	 * before relocating the vma range ourself.
727
	 * page_table_lock lock to serialize against concurrent expand_stacks.
728
	 */
728
	 */
729
	address &= PAGE_MASK;
729
	address &= PAGE_MASK;
730
	if (prev_vma && prev_vma->vm_end + (heap_stack_gap << PAGE_SHIFT) > address)
730
	if (prev_vma && prev_vma->vm_end + (heap_stack_gap << PAGE_SHIFT) > address)
731
		goto out;
731
		goto out;
732
	spin_lock(&vma->vm_mm->page_table_lock);
732
	spin_lock(&vma->vm_mm->page_table_lock);
733
 
734
	/* already expanded while we were spinning? */
735
	if ((vma->vm_flags & VM_GROWSDOWN) && vma->vm_start <= address) {
736
		spin_unlock(vma->vm_mm->page_table_lock);
737
		return 0;
738
	} else if ((vma->vm_flags & VM_GROWSUP) && vma->vm_start >= address) {
739
		spin_unlock(vma->vm_mm->page_table_lock);
740
		return 0;
741
	}
742
733
	grow = (vma->vm_start - address) >> PAGE_SHIFT;
743
	grow = (vma->vm_start - address) >> PAGE_SHIFT;
734
	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
744
	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
735
	    ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur)
745
	    ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur)

Return to bug 64652