From 8c98573eaf8720b95da81faffd4225cf1519960f Mon Sep 17 00:00:00 2001 From: Shiraz Hashim Date: Mon, 16 Jan 2017 17:52:43 +0530 Subject: [PATCH 1/3] ARM: dts: msm: configure default CMA region for sdm630 Configure the size of the default CMA region to 32 MB in DT for sdm630. Configuring the default CMA region in the DT is cleaner than using CONFIG_CMA_SIZE_MBYTES since it is easier to configure per target and the alloc ranges can be specified in DT. Change-Id: I329ae0aa9cffeed7c30e44749c15fbb83c58b599 Signed-off-by: Shiraz Hashim --- arch/arm/boot/dts/qcom/sdm630.dtsi | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 47b40dc713dc..eebb27b8f196 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -320,6 +320,17 @@ alignment = <0x0 0x400000>; size = <0x0 0x5c00000>; }; + + /* global autoconfigured region for contiguous allocations */ + linux,cma { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x2000000>; + linux,cma-default; + }; + }; }; From 9c2442a7a9578e641f7b1c9de887225a02786c89 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Apr 2013 14:12:53 -0700 Subject: [PATCH 2/3] arm: highmem: Add support for flushing kmap_atomic mappings The highmem code provides kmap_flush_unused to ensure all kmap mappings are really removed if they are unused. This code does not handle kmap_atomic mappings since they are managed separately. This prevents an issue for any code which relies on having absolutely no mappings for a particular page. Rather than pay the penalty of having CONFIG_DEBUG_HIGHMEM on all the time, add functionality to remove the kmap_atomic mappings in a similar way to kmap_flush_unused. Change-Id: Ieb25da809b377b1fae1629e2cb75f8aebc1c1bca Signed-off-by: Laura Abbott --- arch/arm/Kconfig | 4 ++++ arch/arm/mm/highmem.c | 56 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8d3d7a283eed..56961334bb7e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -230,6 +230,9 @@ config NEED_RET_TO_USER config ARCH_MTD_XIP bool +config ARCH_WANT_KMAP_ATOMIC_FLUSH + bool + config VECTORS_BASE hex default 0xffff0000 if MMU || CPU_HIGH_VECTOR @@ -652,6 +655,7 @@ config ARCH_QCOM select SPARSE_IRQ select USE_OF select PINCTRL + select ARCH_WANT_KMAP_ATOMIC_FLUSH help Support for Qualcomm MSM/QSD based systems. This runs on the apps processor of the MSM/QSD and depends on a shared memory diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index d02f8187b1cc..5d73327f8491 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -147,3 +148,58 @@ void *kmap_atomic_pfn(unsigned long pfn) return (void *)vaddr; } + +#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH +static void kmap_remove_unused_cpu(int cpu) +{ + int start_idx, idx, type; + + pagefault_disable(); + type = kmap_atomic_idx(); + start_idx = type + 1 + KM_TYPE_NR * cpu; + + for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) { + unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + pte_t ptep; + + ptep = get_top_pte(vaddr); + if (ptep) + set_top_pte(vaddr, __pte(0)); + } + pagefault_enable(); +} + +static void kmap_remove_unused(void *unused) +{ + kmap_remove_unused_cpu(smp_processor_id()); +} + +void kmap_atomic_flush_unused(void) +{ + on_each_cpu(kmap_remove_unused, NULL, 1); +} + +static int hotplug_kmap_atomic_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & (~CPU_TASKS_FROZEN)) { + case CPU_DYING: + kmap_remove_unused_cpu((int)hcpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block hotplug_kmap_atomic_notifier = { + .notifier_call = hotplug_kmap_atomic_callback, +}; + +static int __init init_kmap_atomic(void) +{ + return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier); +} +early_initcall(init_kmap_atomic); +#endif From f39623177b1382c8177642e7509d131c6ba90c6f Mon Sep 17 00:00:00 2001 From: Shiraz Hashim Date: Wed, 7 Oct 2015 18:59:01 +0530 Subject: [PATCH 3/3] arm: mm: program ptes for access restriction CONFIG_RODATA allows strict kernel mapping permissions to be followed and accordingly maps regions as read-only, not-executable etc. correspondingly. CONFIG_RODATA however assumes all memory regions to be SECTION_SIZE aligned and section mapped for performance reasons. With CONFIG_FORCE_PAGES, we force all kernel mappings as page mapped thus breaking CONFIG_RODATA. Provide provision to apply permissions at page (pte) level, if CONFIG_RODATA does not find section mapping. Change-Id: I8dbf5c3741836bc63a231d8a471cf0306662993b Signed-off-by: Shiraz Hashim --- arch/arm/mm/init.c | 58 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 107b5f1b864b..d3d718772381 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -625,6 +625,9 @@ struct section_perm { pmdval_t mask; pmdval_t prot; pmdval_t clear; + pteval_t ptemask; + pteval_t pteprot; + pteval_t pteclear; }; static struct section_perm nx_perms[] = { @@ -634,6 +637,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_stext, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, + .ptemask = ~L_PTE_XN, + .pteprot = L_PTE_XN, }, /* Make init RW (set NX). */ { @@ -641,6 +646,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)_sdata, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, + .ptemask = ~L_PTE_XN, + .pteprot = L_PTE_XN, }, #ifdef CONFIG_DEBUG_RODATA /* Make rodata NX (set RO in ro_perms below). */ @@ -649,6 +656,8 @@ static struct section_perm nx_perms[] = { .end = (unsigned long)__init_begin, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, + .ptemask = ~L_PTE_XN, + .pteprot = L_PTE_XN, }, #endif }; @@ -667,6 +676,8 @@ static struct section_perm ro_perms[] = { .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .clear = PMD_SECT_AP_WRITE, #endif + .ptemask = ~L_PTE_RDONLY, + .pteprot = L_PTE_RDONLY, }, }; #endif @@ -676,6 +687,35 @@ static struct section_perm ro_perms[] = { * copied into each mm). During startup, this is the init_mm. Is only * safe to be called with preemption disabled, as under stop_machine(). */ +struct pte_data { + pteval_t mask; + pteval_t val; +}; + +static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr, + void *d) +{ + struct pte_data *data = d; + pte_t pte = *ptep; + + pte = __pte((pte_val(*ptep) & data->mask) | data->val); + set_pte_ext(ptep, pte, 0); + + return 0; +} + +static inline void pte_update(unsigned long addr, pteval_t mask, + pteval_t prot, struct mm_struct *mm) +{ + struct pte_data data; + + data.mask = mask; + data.val = prot; + + apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data); + flush_tlb_kernel_range(addr, addr + SECTION_SIZE); +} + static inline void section_update(unsigned long addr, pmdval_t mask, pmdval_t prot, struct mm_struct *mm) { @@ -724,11 +764,21 @@ void set_section_perms(struct section_perm *perms, int n, bool set, for (addr = perms[i].start; addr < perms[i].end; - addr += SECTION_SIZE) - section_update(addr, perms[i].mask, - set ? perms[i].prot : perms[i].clear, mm); - } + addr += SECTION_SIZE) { + pmd_t *pmd; + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), + addr), addr); + if (pmd_bad(*pmd)) + section_update(addr, perms[i].mask, + set ? perms[i].prot : perms[i].clear, + mm); + else + pte_update(addr, perms[i].ptemask, + set ? perms[i].pteprot : perms[i].pteclear, + mm); + } + } } static void update_sections_early(struct section_perm perms[], int n)