iommu: io-pgtable-arm: Use correct bitmask for pgd entry

Ensure that the bits larger than cfg->ias are masked out before mapping an
address in a pagetable. This is required in order to map a sign-extended
address into TTBR1; for other use cases the bits above cfg->ias are
expected to be zero.

Change-Id: I2463cef7e0238cf887dcc682977375eb08d6973b
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2016-11-23 15:04:24 -08:00
parent 767cab9493
commit 0d0dfec04b

View file

@ -69,9 +69,12 @@
#define ARM_LPAE_PGD_IDX(l,d) \
((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
#define ARM_LPAE_LVL_MASK(l, d) \
((l) == ARM_LPAE_START_LVL(d) ? (1 << (d)->pgd_bits) - 1 : \
(1 << (d)->bits_per_level) - 1)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
ARM_LPAE_LVL_MASK(l, d))
/* Calculate the block/page mapping size at level l for pagetable in d. */
#define ARM_LPAE_BLOCK_SIZE(l,d) \
@ -197,6 +200,7 @@ struct arm_lpae_io_pgtable {
struct io_pgtable iop;
int levels;
unsigned int pgd_bits;
size_t pgd_size;
unsigned long pg_shift;
unsigned long bits_per_level;
@ -913,6 +917,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
/* Calculate the actual size of our pgd (without concatenation) */
pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
data->pgd_bits = pgd_bits;
data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
data->iop.ops = (struct io_pgtable_ops) {