Exporting patches: # HG changeset patch # User Muli Ben-Yehuda # Node ID 0d0bd4eaf38aeb3dbc80dcbc1a5c71c1f9b964f2 # Parent c2d17a4d9ee74a612fceeff7271781a1b00ab048 - move swiotlb definitionss to asm-generic - reindent swiotlb.h diff -r c2d17a4d9ee74a612fceeff7271781a1b00ab048 -r 0d0bd4eaf38aeb3dbc80dcbc1a5c71c1f9b964f2 include/asm-x86_64/swiotlb.h --- a/include/asm-x86_64/swiotlb.h Fri Nov 18 18:51:22 2005 +++ b/include/asm-x86_64/swiotlb.h Fri Nov 18 21:15:04 2005 @@ -1,43 +1,7 @@ #ifndef _ASM_SWIOTLB_H #define _ASM_SWTIOLB_H 1 -#include - -/* SWIOTLB interface */ - -extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, - int dir); -extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_device(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_device(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern void swiotlb_sync_sg_for_device(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); -extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); -extern void swiotlb_free_coherent (struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); +#include #ifdef CONFIG_SWIOTLB extern int swiotlb; @@ -45,4 +9,4 @@ #define swiotlb 0 #endif -#endif +#endif /* _ASM_SWTIOLB_H */ diff -r c2d17a4d9ee74a612fceeff7271781a1b00ab048 -r 0d0bd4eaf38aeb3dbc80dcbc1a5c71c1f9b964f2 include/asm-generic/swiotlb.h --- /dev/null Fri Nov 18 18:51:22 2005 +++ b/include/asm-generic/swiotlb.h Fri Nov 18 21:15:04 2005 @@ -0,0 +1,55 @@ +#ifndef _ASM_GENERIC_SWIOTLB_H +#define _ASM_GENERIC_SWTIOLB_H 1 + +extern dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, + enum dma_data_direction dir); + +extern void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir); + +extern void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir); + +extern void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, + enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); + +extern int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +extern void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +extern int +swiotlb_dma_mapping_error(dma_addr_t dma_addr); + +extern void* +swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, + gfp_t flags); + +extern void +swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle); + +#endif /* _ASM_GENERIC_SWTIOLB_H */ # HG changeset patch # User Muli Ben-Yehuda # Node ID c2aaf8d0370b658c39ab24d184b2d4f95334af7f # Parent 0d0bd4eaf38aeb3dbc80dcbc1a5c71c1f9b964f2 include in pci-gart.c diff -r 0d0bd4eaf38aeb3dbc80dcbc1a5c71c1f9b964f2 -r c2aaf8d0370b658c39ab24d184b2d4f95334af7f arch/x86_64/kernel/pci-gart.c --- a/arch/x86_64/kernel/pci-gart.c Fri Nov 18 21:15:04 2005 +++ b/arch/x86_64/kernel/pci-gart.c Fri Nov 18 21:22:45 2005 @@ -30,6 +30,7 @@ #include #include #include +#include dma_addr_t bad_dma_address; # HG changeset patch # User Muli Ben-Yehuda # Node ID 537492320b35b0a23bbcb2e8ebb2e116fc7e4320 # Parent c2aaf8d0370b658c39ab24d184b2d4f95334af7f - s/int dir/enum dma_data_direction dir/ diff -r c2aaf8d0370b658c39ab24d184b2d4f95334af7f -r 537492320b35b0a23bbcb2e8ebb2e116fc7e4320 lib/swiotlb.c --- a/lib/swiotlb.c Fri Nov 18 21:22:45 2005 +++ b/lib/swiotlb.c Fri Nov 18 21:25:28 2005 @@ -280,7 +280,7 @@ * Allocates bounce buffer and returns its kernel virtual address. */ static void * -map_single(struct device *hwdev, char *buffer, size_t size, int dir) +map_single(struct device *hwdev, char *buffer, size_t size, enum dma_data_direction dir) { unsigned long flags; char *dma_addr; @@ -363,7 +363,7 @@ * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ static void -unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) +unmap_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -408,7 +408,7 @@ static void sync_single(struct device *hwdev, char *dma_addr, size_t size, - int dir, int target) + enum dma_data_direction dir, int target) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; char *buffer = io_tlb_orig_addr[index]; @@ -497,7 +497,7 @@ } static void -swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) +swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. @@ -525,7 +525,7 @@ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, enum dma_data_direction dir) { unsigned long dev_addr = virt_to_phys(ptr); void *map; @@ -589,7 +589,7 @@ */ void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) + enum dma_data_direction dir) { char *dma_addr = phys_to_virt(dev_addr); @@ -613,7 +613,7 @@ */ static inline void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir, int target) + size_t size, enum dma_data_direction dir, int target) { char *dma_addr = phys_to_virt(dev_addr); @@ -627,14 +627,14 @@ void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } @@ -645,7 +645,7 @@ static inline void swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, - int dir, int target) + enum dma_data_direction dir, int target) { char *dma_addr = phys_to_virt(dev_addr) + offset; @@ -659,7 +659,8 @@ void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_CPU); @@ -667,7 +668,8 @@ void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_DEVICE); @@ -691,7 +693,7 @@ */ int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, - int dir) + enum dma_data_direction dir) { void *addr; unsigned long dev_addr; @@ -726,7 +728,7 @@ */ void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, - int dir) + enum dma_data_direction dir) { int i; @@ -749,7 +751,7 @@ */ static inline void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir, int target) + int nelems, enum dma_data_direction dir, int target) { int i; @@ -764,14 +766,14 @@ void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } # HG changeset patch # User Muli Ben-Yehuda # Node ID 4eb9af0bbcd47955c5b560586b011e912aaab9fc # Parent 537492320b35b0a23bbcb2e8ebb2e116fc7e4320 - s/int dir/enum dma_data_direction dir/ diff -r 537492320b35b0a23bbcb2e8ebb2e116fc7e4320 -r 4eb9af0bbcd47955c5b560586b011e912aaab9fc arch/x86_64/kernel/pci-gart.c --- a/arch/x86_64/kernel/pci-gart.c Fri Nov 18 21:25:28 2005 +++ b/arch/x86_64/kernel/pci-gart.c Fri Nov 18 21:28:20 2005 @@ -104,7 +104,7 @@ static unsigned long next_bit; /* protected by iommu_bitmap_lock */ static int need_flush; /* global flush state. set for each gart wrap */ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem, - size_t size, int dir, int do_panic); + size_t size, enum dma_data_direction dir, int do_panic); /* Dummy device used for NULL arguments (normally ISA). Better would be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */ @@ -327,7 +327,8 @@ #define CLEAR_LEAK(x) #endif -static void iommu_full(struct device *dev, size_t size, int dir, int do_panic) +static void iommu_full(struct device *dev, size_t size, enum dma_data_direction dir, + int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. @@ -387,7 +388,8 @@ * Caller needs to check if the iommu is needed and flush. */ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem, - size_t size, int dir, int do_panic) + size_t size, enum dma_data_direction dir, + int do_panic) { unsigned long npages = to_pages(phys_mem, size); unsigned long iommu_page = alloc_iommu(npages); @@ -410,7 +412,8 @@ } /* Map a single area into the IOMMU */ -dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, int dir) +dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, + enum dma_data_direction dir) { unsigned long phys_mem, bus; @@ -432,7 +435,7 @@ /* Fallback for dma_map_sg in case of overflow */ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, - int nents, int dir) + int nents, enum dma_data_direction dir) { int i; @@ -516,7 +519,8 @@ * DMA map all entries in a scatterlist. * Merge chunks that have page aligned sizes into a continuous mapping. */ -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) { int i; int out; @@ -588,7 +592,7 @@ * Free a DMA mapping. */ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int direction) + size_t size, enum dma_data_direction direction) { unsigned long iommu_page; int npages; @@ -614,7 +618,8 @@ /* * Wrapper for pci_unmap_single working with scatterlists. */ -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) +void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) { int i; if (swiotlb) { # HG changeset patch # User Muli Ben-Yehuda # Node ID 802ab0812fa5716d8c70a4e33907b95fd4c20956 # Parent 4eb9af0bbcd47955c5b560586b011e912aaab9fc s/int dir/enum dma_data_direction dir/ diff -r 4eb9af0bbcd47955c5b560586b011e912aaab9fc -r 802ab0812fa5716d8c70a4e33907b95fd4c20956 include/asm-x86_64/dma-mapping.h --- a/include/asm-x86_64/dma-mapping.h Fri Nov 18 21:28:20 2005 +++ b/include/asm-x86_64/dma-mapping.h Fri Nov 18 21:30:03 2005 @@ -24,16 +24,16 @@ #ifdef CONFIG_GART_IOMMU extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, - int direction); + enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, - int direction); + enum dma_data_direction direction); #else /* No IOMMU */ static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, - size_t size, int direction) + size_t size, enum dma_data_direction direction) { dma_addr_t addr; @@ -47,7 +47,7 @@ } static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr, - size_t size, int direction) + size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -61,7 +61,8 @@ static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) + size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -74,7 +75,8 @@ static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) + size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -88,7 +90,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction) + size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -102,7 +105,8 @@ static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction) + size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -115,7 +119,8 @@ static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) + int nelems, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -128,7 +133,8 @@ static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) + int nelems, + enum dma_data_direction direction) { if (direction == DMA_NONE) out_of_line_bug(); @@ -140,9 +146,9 @@ } extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, enum dma_data_direction direction); #define dma_unmap_page dma_unmap_single @@ -158,7 +164,8 @@ return 0; } -static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) +static inline void dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction dir) { flush_write_buffers(); }