From: Ingo Molnar Date: Fri, 13 Feb 2009 08:47:32 +0000 (+0100) Subject: Merge branches 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess... X-Git-Url: http://www.pilppa.org/gitweb/gitweb.cgi?a=commitdiff_plain;h=7032e8696726354d6180d8a2d17191f958cd93ae;p=linux-2.6-omap-h63xx.git Merge branches 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess' and 'x86/urgent' into x86/core --- 7032e8696726354d6180d8a2d17191f958cd93ae diff --cc arch/x86/mm/pat.c index 9127e31c726,8b08fb95527,aebbf67a79d..05f9aef6818 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@@@ -342,23 -333,11 -360,15 +369,15 @@@@ int reserve_memtype(u64 start, u64 end req_type & _PAGE_CACHE_MASK); } - is_range_ram = pagerange_is_ram(start, end); + if (new_type) + *new_type = actual_type; + - /* - * For legacy reasons, some parts of the physical address range in the - * legacy 1MB region is treated as non-RAM (even when listed as RAM in - * the e820 tables). So we will track the memory attributes of this - * legacy 1MB region using the linear memtype_list always. - */ - if (end >= ISA_END_ADDRESS) { - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return reserve_ram_pages_type(start, end, req_type, - new_type); - else if (is_range_ram < 0) - return -EINVAL; - } ++ is_range_ram = pat_pagerange_is_ram(start, end); + if (is_range_ram == 1) - return reserve_ram_pages_type(start, end, req_type, new_type); ++ return reserve_ram_pages_type(start, end, req_type, ++ new_type); + else if (is_range_ram < 0) + return -EINVAL; new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) @@@@ -455,19 -437,11 -465,11 +474,11 @@@@ int free_memtype(u64 start, u64 end if (is_ISA_range(start, end - 1)) return 0; - /* - * For legacy reasons, some parts of the physical address range in the - * legacy 1MB region is treated as non-RAM (even when listed as RAM in - * the e820 tables). So we will track the memory attributes of this - * legacy 1MB region using the linear memtype_list always. - */ - if (end >= ISA_END_ADDRESS) { - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return free_ram_pages_type(start, end); - else if (is_range_ram < 0) - return -EINVAL; - } - is_range_ram = pagerange_is_ram(start, end); ++ is_range_ram = pat_pagerange_is_ram(start, end); + if (is_range_ram == 1) + return free_ram_pages_type(start, end); + else if (is_range_ram < 0) + return -EINVAL; spin_lock(&memtype_lock); list_for_each_entry(entry, &memtype_list, nd) { diff --cc mm/mlock.c index 037161d61b4,2904a347e47,2b57f7e6039..cbe9e0581b7 --- a/mm/mlock.c +++ b/mm/mlock.c @@@@ -310,11 -314,20 -310,8 +310,11 @@@@ long mlock_vma_pages_range(struct vm_ar if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))) { - long error; - downgrade_write(&mm->mmap_sem); - error = __mlock_vma_pages_range(vma, start, end, 1); - return __mlock_vma_pages_range(vma, start, end, 1); ++ __mlock_vma_pages_range(vma, start, end, 1); + - up_read(&mm->mmap_sem); - /* vma can change or disappear */ - down_write(&mm->mmap_sem); - vma = find_vma(mm, start); - /* non-NULL vma must contain @start, but need to check @end */ - if (!vma || end > vma->vm_end) - return -ENOMEM; - - return 0; /* hide other errors from mmap(), et al */ ++ /* Hide errors from mmap() and other callers */ ++ return 0; } /*