summaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/dma.c41
-rw-r--r--arch/arc/mm/fault.c20
2 files changed, 15 insertions, 46 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index c75d5c3470e3..db203ff69ccf 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = __phys_to_pfn(dma_addr);
- unsigned long off = vma->vm_pgoff;
- int ret = -ENXIO;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
+ return __phys_to_pfn(dma_addr);
}
/*
@@ -167,7 +148,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
}
/*
- * Plug in coherent or noncoherent dma ops
+ * Plug in direct dma map ops.
*/
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
@@ -175,13 +156,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
* with memory - eliding need for any explicit cache maintenance of
- * DMA buffers - so we can use dma_direct cache ops.
+ * DMA buffers.
*/
- if (is_isa_arcv2() && ioc_enable && coherent) {
- set_dma_ops(dev, &dma_direct_ops);
- dev_info(dev, "use dma_direct_ops cache ops\n");
- } else {
- set_dma_ops(dev, &dma_noncoherent_ops);
- dev_info(dev, "use dma_noncoherent_ops cache ops\n");
- }
+ if (is_isa_arcv2() && ioc_enable && coherent)
+ dev->dma_coherent = true;
+
+ dev_info(dev, "use %sncoherent DMA ops\n",
+ dev->dma_coherent ? "" : "non");
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index db6913094be3..c9da6102eb4f 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- siginfo_t info;
+ int si_code;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- clear_siginfo(&info);
-
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
@@ -91,7 +89,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
return;
}
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
@@ -119,7 +117,7 @@ retry:
* we can handle it..
*/
good_area:
- info.si_code = SEGV_ACCERR;
+ si_code = SEGV_ACCERR;
/* Handle protection violation, execute on heap or stack */
@@ -199,11 +197,7 @@ bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.fault_address = address;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *)address;
- force_sig_info(SIGSEGV, &info, tsk);
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
return;
}
@@ -238,9 +232,5 @@ do_sigbus:
goto no_context;
tsk->thread.fault_address = address;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, tsk);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
}