patch-2.4.21 linux-2.4.21/arch/ia64/sn/io/pci_dma.c

Next file: linux-2.4.21/arch/ia64/sn/io/pciba.c
Previous file: linux-2.4.21/arch/ia64/sn/io/pci.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.20/arch/ia64/sn/io/pci_dma.c linux-2.4.21/arch/ia64/sn/io/pci_dma.c
@@ -9,6 +9,7 @@
  * a description of how these routines should be used.
  */
 
+#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/string.h>
@@ -31,18 +32,6 @@
 #include <asm/sn/pci/pci_bus_cvlink.h>
 #include <asm/sn/nag.h>
 
-/* DMA, PIO, and memory allocation flags */
-#ifdef CONFIG_IA64_SGI_SN1
-#define DMA_DATA_FLAGS		( PCIIO_BYTE_STREAM | PCIIO_DMA_DATA )
-#define DMA_CONTROL_FLAGS	( PCIIO_BYTE_STREAM | PCIBR_BARRIER | \
-				  PCIIO_DMA_CMD )
-#elif defined(CONFIG_IA64_SGI_SN2)
-#define DMA_DATA_FLAGS		( PCIIO_DMA_DATA )
-#define DMA_CONTROL_FLAGS	( PCIBR_BARRIER | PCIIO_DMA_CMD )
-#else
-#error need to define DMA mapping flags for this platform
-#endif
-
 /*
  * For ATE allocations
  */
@@ -129,9 +118,6 @@
 		}
 	}
 
-	printk(KERN_WARNING "find_sn_dma_map: Unable find the corresponding "
-	       "dma map\n");
-
 	return NULL;
 }
 
@@ -151,12 +137,15 @@
 sn_dma_sync(struct pci_dev *hwdev)
 {
 
+#ifdef SN_DMA_SYNC
+
 	struct sn_device_sysdata *device_sysdata;
 	volatile unsigned long dummy;
 
 	/*
-	 * It is expected that on an IA64 platform, a DMA sync ensures that 
-	 * all the DMA from a particular device is complete and coherent.  We
+	 * A DMA sync is supposed to ensure that 
+	 * all the DMA from a particular device
+	 * is complete and coherent.  We
 	 * try to do this by
 	 *	1. flushing the write wuffers from Bridge
 	 *	2. flushing the Xbow port.
@@ -167,12 +156,13 @@
 	dummy = (volatile unsigned long ) *device_sysdata->dma_buf_sync;
 
 	/*
-	 * For the Xbow port flush, we maybe denied the request because 
+	 * For the Xbow port flush, we may be denied the request because 
 	 * someone else may be flushing the port .. try again.
 	 */
 	while((volatile unsigned long ) *device_sysdata->xbow_buf_sync) {
 		udelay(2);
 	}
+#endif
 }
 
 /**
@@ -183,13 +173,12 @@
  *
  * pci_alloc_consistent() returns a pointer to a memory region suitable for
  * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
- * that @dma_handle will have the PCIBR_BARRIER and PCIIO_DMA_CMD flags
- * set.
+ * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
  *
  * This interface is usually used for "command" streams (e.g. the command
  * queue for a SCSI controller).  See Documentation/DMA-mapping.txt for
- * more information.  Note that this routine should always put a 32 bit
- * DMA address into @dma_handle.  This is because most other platforms
+ * more information.  Note that this routine will always put a 32 bit
+ * DMA address into @dma_handle.  This is because most devices
  * that are capable of 64 bit PCI DMA transactions can't do 64 bit _coherent_
  * DMAs, and unfortunately this interface has to cater to the LCD.  Oh well.
  *
@@ -232,15 +221,40 @@
 	/* physical addr. of the memory we just got */
 	phys_addr = __pa(cpuaddr);
 
+	/*
+	 * This will try to use a Direct Map register to do the
+	 * 32 bit DMA mapping, but it may not succeed if another
+	 * device on the same bus is already mapped with different
+	 * attributes or to a different memory region.
+	 */
+#ifdef CONFIG_IA64_SGI_SN1
 	*dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
-					  DMA_CONTROL_FLAGS);
+					  PCIIO_BYTE_STREAM |
+					  PCIIO_DMA_CMD);
+#elif defined(CONFIG_IA64_SGI_SN2)
+	*dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
+			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+					  PCIIO_DMA_CMD);
+#else
+#error unsupported platform
+#endif
+
 	/*
 	 * It is a 32 bit card and we cannot do direct mapping,
-	 * so we use an ATE.
+	 * so we try to use an ATE.
 	 */
 	if (!(*dma_handle)) {
+#ifdef CONFIG_IA64_SGI_SN1
 		dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
-					     DMA_CONTROL_FLAGS | PCIIO_FIXED);
+					     PCIIO_BYTE_STREAM |
+					     PCIIO_DMA_CMD);
+#elif defined(CONFIG_IA64_SGI_SN2)
+		dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
+				((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+					     PCIIO_DMA_CMD);
+#else
+#error unsupported platform
+#endif
 		if (!dma_map) {
 			printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "
 			       "allocate anymore 32 bit page map entries.\n");
@@ -250,13 +264,7 @@
 							     size);
 		sn_dma_map = (struct sn_dma_maps_s *)dma_map;
 		sn_dma_map->dma_addr = *dma_handle;
-		printk(KERN_INFO "%s: PMU mapping: %p\n", __FUNCTION__,
-		       (void *)*dma_handle);
 	}
-	else
-		printk(KERN_INFO "%s: direct mapping: %p\n", __FUNCTION__,
-		       (void *)*dma_handle);
-	
 
         return cpuaddr;
 }
@@ -359,7 +367,9 @@
 		if (IS_PCIA64(hwdev)) {
 			dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
 						       sg->length,
-						       DMA_DATA_FLAGS | PCIIO_DMA_A64 );
+			       ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+						       PCIIO_DMA_DATA |
+						       PCIIO_DMA_A64);
 			sg->address = (char *)dma_addr;
 			continue;
 		}
@@ -368,9 +378,19 @@
 		 * Handle 32-63 bit cards via direct mapping
 		 */
 		if (IS_PCI32G(hwdev)) {
+#ifdef CONFIG_IA64_SGI_SN1
 			dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
 						       sg->length,
-						       DMA_DATA_FLAGS);
+						       PCIIO_BYTE_STREAM |
+						       PCIIO_DMA_DATA);
+#elif defined(CONFIG_IA64_SGI_SN2)
+			dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
+						       sg->length,
+					((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+						       PCIIO_DMA_DATA);
+#else
+#error unsupported platform
+#endif
 			/*
 			 * See if we got a direct map entry
 			 */
@@ -386,8 +406,17 @@
 		 * so we use an ATE.
 		 */
 		dma_map = 0;
+#ifdef CONFIG_IA64_SGI_SN1
 		dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
-					     DMA_DATA_FLAGS);
+					     PCIIO_BYTE_STREAM |
+					     PCIIO_DMA_DATA);
+#elif defined(CONFIG_IA64_SGI_SN2)
+		dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
+				((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+					     PCIIO_DMA_DATA);
+#else
+#error unsupported platform
+#endif
 		if (!dma_map) {
 			printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
 			       "anymore 32 bit page map entries.\n");
@@ -395,7 +424,7 @@
 		}
 		dma_addr = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
 		sg->address = (char *)dma_addr;
-		sg->page = (char *)dma_map;
+		sg->page = (struct page *)dma_map;
 		
 	}
 
@@ -474,7 +503,8 @@
 		BUG();
 
 	/* SN cannot support DMA addresses smaller than 32 bits. */
-	if (IS_PCI32L(hwdev)) return 0;
+	if (IS_PCI32L(hwdev))
+		return 0;
 
 	/*
 	 * find vertex for the device
@@ -489,23 +519,32 @@
 	phys_addr = __pa(ptr);
 
 	if (IS_PCIA64(hwdev)) {
-		/*
-		 * This device supports 64 bit DMA addresses.
-		 */
+		/* This device supports 64 bit DMA addresses. */
 		dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
-					       DMA_DATA_FLAGS | PCIIO_DMA_A64);
+		       ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+					       PCIIO_DMA_DATA |
+					       PCIIO_DMA_A64);
 		return dma_addr;
 	}
 
 	/*
-	 * Devices that supports 32 bit to 63 bit DMA addresses get
+	 * Devices that support 32 bit to 63 bit DMA addresses get
 	 * 32 bit DMA addresses.
 	 *
 	 * First try to get a 32 bit direct map register.
 	 */
 	if (IS_PCI32G(hwdev)) {
+#ifdef CONFIG_IA64_SGI_SN1
 		dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
-					       DMA_DATA_FLAGS);
+					       PCIIO_BYTE_STREAM |
+					       PCIIO_DMA_DATA);
+#elif defined(CONFIG_IA64_SGI_SN2)
+		dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
+			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+					       PCIIO_DMA_DATA);
+#else
+#error unsupported platform
+#endif
 		if (dma_addr)
 			return dma_addr;
 	}
@@ -515,11 +554,20 @@
 	 * let's use the PMU instead.
 	 */
 	dma_map = NULL;
-	dma_map = pciio_dmamap_alloc(vhdl, NULL, size, DMA_DATA_FLAGS);
+#ifdef CONFIG_IA64_SGI_SN1
+	dma_map = pciio_dmamap_alloc(vhdl, NULL, size, PCIIO_BYTE_STREAM |
+				     PCIIO_DMA_DATA);
+#elif defined(CONFIG_IA64_SGI_SN2)
+	dma_map = pciio_dmamap_alloc(vhdl, NULL, size, 
+			((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+			PCIIO_DMA_DATA);
+#else
+#error unsupported platform
+#endif
 
 	if (!dma_map) {
 		printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
-		       "32 bits page map entries.\n");
+		       "32 bit page map entries.\n");
 		BUG();
 	}
 
@@ -628,7 +676,9 @@
  * properly.  For example, if your device can only drive the low 24-bits
  * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
  * this function.  Of course, SN only supports devices that have 32 or more
- * address bits.
+ * address bits when using the PMU.  We could theoretically support <32 bit
+ * cards using direct mapping, but we'll worry about that later--on the off
+ * chance that someone actually wants to use such a card.
  */
 int
 sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)