X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;ds=sidebyside;f=arch%2Fx86%2Fmm%2Fgup.c;fp=arch%2Fx86%2Fmm%2Fgup.c;h=738e6593799dcce1973d5d082b6c0a732107cbcd;hb=ab09809f2eee1dc2d8f8bea636e77d176ba6c648;hp=71da1bca13cbb9fde26f59100f1b9b3b30f3a8e5;hpb=1a45dcfe2525e9432cb4aba461d4994fc2befe42;p=karo-tx-linux.git diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 71da1bca13cb..738e6593799d 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) #else /* * With get_user_pages_fast, we walk down the pagetables without taking - * any locks. For this we would like to load the pointers atoimcally, + * any locks. For this we would like to load the pointers atomically, * but that is not possible (without expensive cmpxchg8b) on PAE. What * we do have is the guarantee that a pte will only either go from not * present to present, or present to not present or both -- it will not