summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c4
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--include/linux/cma.h2
-rw-r--r--mm/cma.c13
4 files changed, 11 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 3960e0bceaf2..6cf498a9bc98 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -185,8 +185,8 @@ void __init kvm_cma_reserve(void)
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
- cma_declare_contiguous(selected_size, 0, 0, align_size,
- KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, &kvm_cma, false);
+ cma_declare_contiguous(0, selected_size, 0, align_size,
+ KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
}
}
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0411c1c57005..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -165,7 +165,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
{
int ret;
- ret = cma_declare_contiguous(size, base, limit, 0, 0, res_cma, fixed);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
if (ret)
return ret;
diff --git a/include/linux/cma.h b/include/linux/cma.h
index f6f7809acb98..371b93042520 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -21,7 +21,7 @@ extern unsigned long cma_get_size(struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t size,
phys_addr_t base, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- struct cma **res_cma, bool fixed);
+ bool fixed, struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
extern bool cma_release(struct cma *cma, struct page *pages, int count);
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 103a6663b7c7..488e50810ed1 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -141,13 +141,13 @@ core_initcall(cma_init_reserved_areas);
/**
* cma_declare_contiguous() - reserve custom contiguous area
- * @size: Size of the reserved area (in bytes),
* @base: Base address of the reserved area optional, use 0 for any
+ * @size: Size of the reserved area (in bytes),
* @limit: End address of the reserved memory (optional, 0 for any).
* @alignment: Alignment for the CMA area, should be power of 2 or zero
* @order_per_bit: Order of pages represented by one bit on bitmap.
- * @res_cma: Pointer to store the created cma region.
* @fixed: hint about where to place the reserved area
+ * @res_cma: Pointer to store the created cma region.
*
* This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem)
@@ -157,12 +157,12 @@ core_initcall(cma_init_reserved_areas);
* If @fixed is true, reserve contiguous area at exactly @base. If false,
* reserve in range from @base to @limit.
*/
-int __init cma_declare_contiguous(phys_addr_t size,
- phys_addr_t base, phys_addr_t limit,
+int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- struct cma **res_cma, bool fixed)
+ bool fixed, struct cma **res_cma)
{
- struct cma *cma = &cma_areas[cma_area_count];
+ struct cma *cma;
int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
@@ -218,6 +218,7 @@ int __init cma_declare_contiguous(phys_addr_t size,
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
+ cma = &cma_areas[cma_area_count];
cma->base_pfn = PFN_DOWN(base);
cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;