#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/prom.h>
if (unlikely(npages) == 0) {
if (printk_ratelimit())
WARN_ON(1);
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
}
if (handle && *handle)
goto again;
} else {
/* Third failure, give up */
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
}
}
unsigned int npages, int direction)
{
unsigned long entry, flags;
- dma_addr_t ret = PCI_DMA_ERROR_CODE;
+ dma_addr_t ret = DMA_ERROR_CODE;
spin_lock_irqsave(&(tbl->it_lock), flags);
entry = iommu_range_alloc(tbl, npages, NULL);
- if (unlikely(entry == PCI_DMA_ERROR_CODE)) {
+ if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags);
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
}
entry += tbl->it_offset; /* Offset into real TCE table */
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
- if (unlikely(entry == PCI_DMA_ERROR_CODE)) {
+ if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
" npages %lx\n", tbl, vaddr, npages);
*/
if (outcount < nelems) {
outs++;
- outs->dma_address = PCI_DMA_ERROR_CODE;
+ outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
if (order >= IOMAP_MAX_ORDER) {
printk("PCI_DMA: pci_alloc_consistent size too large: 0x%lx\n",
size);
- return (void *)PCI_DMA_ERROR_CODE;
+ return (void *)DMA_ERROR_CODE;
}
tbl = devnode_table(hwdev);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
- if (mapping == PCI_DMA_ERROR_CODE) {
+ if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
ret = NULL;
} else
size_t size, int direction)
{
struct iommu_table * tbl;
- dma_addr_t dma_handle = PCI_DMA_ERROR_CODE;
+ dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
- if (dma_handle == PCI_DMA_ERROR_CODE) {
+ if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n",
tbl, vaddr, npages);
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/dma.h>
size_t size, int direction )
{
struct iommu_table *tbl;
- dma_addr_t dma_handle = PCI_DMA_ERROR_CODE;
+ dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
/* It is easier to debug here for the drivers than in the tce tables.*/
if(order >= IOMAP_MAX_ORDER) {
printk("VIO_DMA: vio_alloc_consistent size to large: 0x%lx \n", size);
- return (void *)PCI_DMA_ERROR_CODE;
+ return (void *)DMA_ERROR_CODE;
}
tbl = dev->iommu_table;
memset(ret, 0, npages << PAGE_SHIFT);
/* Set up tces to cover the allocated range */
tce = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
- if (tce == PCI_DMA_ERROR_CODE) {
+ if (tce == DMA_ERROR_CODE) {
PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" );
free_pages((unsigned long)ret, order);
ret = NULL;
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "ibmveth.h"
-#warning remove NO_TCE usage from ibmveth.c
-#define NO_TCE PCI_DMA_ERROR_CODE
-
#define DEBUG 1
#define ibmveth_printk(fmt, args...) \
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
if(adapter->buffer_list_addr != NULL) {
- if(adapter->buffer_list_dma != NO_TCE) {
+ if(!vio_dma_mapping_error(adapter->buffer_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, PCI_DMA_BIDIRECTIONAL);
- adapter->buffer_list_dma = NO_TCE;
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->buffer_list_addr);
adapter->buffer_list_addr = NULL;
}
if(adapter->filter_list_addr != NULL) {
- if(adapter->filter_list_dma != NO_TCE) {
+ if(!vio_dma_mapping_error(adapter->filter_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, PCI_DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = NO_TCE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->filter_list_addr);
adapter->filter_list_addr = NULL;
}
if(adapter->rx_queue.queue_addr != NULL) {
- if(adapter->rx_queue.queue_dma != NO_TCE) {
+ if(!vio_dma_mapping_error(adapter->rx_queue.queue_dma)) {
vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL);
- adapter->rx_queue.queue_dma = NO_TCE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
}
kfree(adapter->rx_queue.queue_addr);
adapter->rx_queue.queue_addr = NULL;
adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, PCI_DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL);
- if((adapter->buffer_list_dma == NO_TCE) ||
- (adapter->filter_list_dma == NO_TCE) ||
- (adapter->rx_queue.queue_dma == NO_TCE)) {
+ if((vio_dma_mapping_error(adapter->buffer_list_dma) ) ||
+ (vio_dma_mapping_error(adapter->filter_list_dma)) ||
+ (vio_dma_mapping_error(adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter);
return -ENOMEM;
desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.length, PCI_DMA_TODEVICE);
desc[0].fields.valid = 1;
- if(desc[0].fields.address == NO_TCE) {
+ if(vio_dma_mapping_error(desc[0].fields.address)) {
ibmveth_error_printk("tx: unable to map initial fragment\n");
adapter->tx_map_failed++;
adapter->stats.tx_dropped++;
desc[curfrag+1].fields.length = frag->size;
desc[curfrag+1].fields.valid = 1;
- if(desc[curfrag+1].fields.address == NO_TCE) {
+ if(vio_dma_mapping_error(desc[curfrag+1].fields.address)) {
ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
adapter->tx_map_failed++;
adapter->stats.tx_dropped++;
INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
- adapter->buffer_list_dma = NO_TCE;
- adapter->filter_list_dma = NO_TCE;
- adapter->rx_queue.queue_dma = NO_TCE;
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
atomic_set(&adapter->not_replenishing, 1);
#include <asm/scatterlist.h>
#include <asm/bug.h>
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
BUG();
}
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ return (dma_addr == DMA_ERROR_CODE);
+}
+
#endif /* _ASM_DMA_MAPPING_H */
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/dma-mapping.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
return 0;
}
-#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
{
- return (dma_addr == PCI_DMA_ERROR_CODE);
+ return dma_mapping_error(dma_addr);
}
extern int pci_domain_nr(struct pci_bus *bus);
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _VIO_H
-#define _VIO_H
+#ifndef _ASM_VIO_H
+#define _ASM_VIO_H
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <asm/hvcall.h>
#include <asm/prom.h>
#include <asm/scatterlist.h>
return container_of(dev, struct vio_dev, dev);
}
-#endif /* _PHYP_H */
+static inline int vio_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_mapping_error(dma_addr);
+}
+
+#endif /* _ASM_VIO_H */