View | Details | Raw Unified | Return to bug 105108
Collapse All | Expand All

(-)file_not_specified_in_diff (-3 / +7 lines)
Line  Link Here
 [PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes
 [PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes
1
 
1
 
2
 A malicious 32bit app can have an elf section at 0xffffe000.  During
2
 A malicious 32bit app can have an elf section at 0xffffe000.  During
3
 exec of this app, we will have a memory leak as insert_vm_struct() is
3
 exec of this app, we will have a memory leak as insert_vm_struct() is
4
 not checking for return value in syscall32_setup_pages() and thus not
4
 not checking for return value in syscall32_setup_pages() and thus not
5
 freeing the vma allocated for the vsyscall page.
5
 freeing the vma allocated for the vsyscall page.
6
 
6
 
7
 Check the return value and free the vma incase of failure.
7
 Check the return value and free the vma incase of failure.
8
 
8
 
9
 Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
9
 Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
10
 Signed-off-by: Linus Torvalds <torvalds@osdl.org>
10
 Signed-off-by: Linus Torvalds <torvalds@osdl.org>
11
--
11
++ b/arch/x86_64/ia32/syscall32.c
12
-- a/arch/x86_64/ia32/syscall32.c
Lines 57-62 int syscall32_setup_pages(struct linux_b Link Here
57
	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
57
	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
58
	struct vm_area_struct *vma;
58
	struct vm_area_struct *vma;
59
	struct mm_struct *mm = current->mm;
59
	struct mm_struct *mm = current->mm;
60
	int ret;
60
61
61
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
62
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
62
	if (!vma)
63
	if (!vma)
Lines 78-84 int syscall32_setup_pages(struct linux_b Link Here
78
	vma->vm_mm = mm;
79
	vma->vm_mm = mm;
79
80
80
	down_write(&mm->mmap_sem);
81
	down_write(&mm->mmap_sem);
81
	insert_vm_struct(mm, vma);
82
	if ((ret = insert_vm_struct(mm, vma))) {
83
		up_write(&mm->mmap_sem);
84
		kmem_cache_free(vm_area_cachep, vma);
85
		return ret;
86
	}
82
	mm->total_vm += npages;
87
	mm->total_vm += npages;
83
	up_write(&mm->mmap_sem);
88
	up_write(&mm->mmap_sem);
84
	return 0;
89
	return 0;

Return to bug 105108