android_kernel_oneplus_msm8998/arch/m68k/include/asm/mcf_pgalloc.h
Greg Ungerer 8f2f46791e m68k: fix "bad page state" oops on ColdFire boot
[ Upstream commit ecd60532e060e45c63c57ecf1c8549b1d656d34d ]

Booting a ColdFire m68k core with MMU enabled causes a "bad page state"
oops since commit 1d40a5ea01d5 ("mm: mark pages in use for page tables"):

 BUG: Bad page state in process sh  pfn:01ce2
 page:004fefc8 count:0 mapcount:-1024 mapping:00000000 index:0x0
 flags: 0x0()
 raw: 00000000 00000000 00000000 fffffbff 00000000 00000100 00000200 00000000
 raw: 039c4000
 page dumped because: nonzero mapcount
 Modules linked in:
 CPU: 0 PID: 22 Comm: sh Not tainted 4.17.0-07461-g1d40a5ea01d5 #13

Fix by calling pgtable_page_dtor() in our __pte_free_tlb() code path,
so that the PG_table flag is cleared before we free the pte page.

Note that I had to change the type of pte_free() to be static from
extern. Otherwise you get a lot of warnings like this:

./arch/m68k/include/asm/mcf_pgalloc.h:80:2: warning: ‘pgtable_page_dtor’ is static but used in inline function ‘pte_free’ which is not static
  pgtable_page_dtor(page);
  ^

And making it static is consistent with our use of this in the other
m68k pgalloc definitions of pte_free().

Signed-off-by: Greg Ungerer <gerg@linux-m68k.org>
CC: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-24 13:26:57 +02:00

108 lines
2.4 KiB
C

#ifndef M68K_MCF_PGALLOC_H
#define M68K_MCF_PGALLOC_H
#include <asm/tlb.h>
#include <asm/tlbflush.h>
extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
extern const char bad_pmd_string[];
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
if (!page)
return NULL;
memset((void *)page, 0, PAGE_SIZE);
return (pte_t *) (page);
}
extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
pgtable_page_dtor(page);
__free_page(page);
}
#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
pte_t *pte;
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
pte = kmap(page);
if (pte) {
clear_page(pte);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
kunmap(page);
return page;
}
static inline void pte_free(struct mm_struct *mm, struct page *page)
{
pgtable_page_dtor(page);
__free_page(page);
}
/*
* In our implementation, each pgd entry contains 1 pmd that is never allocated
* or freed. pgd_present is always 1, so this should never be called. -NL
*/
#define pmd_free(mm, pmd) BUG()
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
if (!new_pgd)
return NULL;
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
return new_pgd;
}
#define pgd_populate(mm, pmd, pte) BUG()
#endif /* M68K_MCF_PGALLOC_H */