]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
x86, AMD IOMMU: add functions to send IOMMU commands
authorJoerg Roedel <joerg.roedel@amd.com>
Thu, 26 Jun 2008 19:27:55 +0000 (21:27 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 27 Jun 2008 08:12:15 +0000 (10:12 +0200)
This patch adds generic handling function as well as all functions to send
specific commands to the IOMMU hardware as required by this driver.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/amd_iommu.c

index 90392c7b253b6ad33113b3088612b44e3e179bd2..a24ee4a5203a38293df29bac3ec1ace71f6973e0 100644 (file)
@@ -37,4 +37,110 @@ struct command {
        u32 data[4];
 };
 
+static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
+{
+       u32 tail, head;
+       u8 *target;
+
+       tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+       target = (iommu->cmd_buf + tail);
+       memcpy_toio(target, cmd, sizeof(*cmd));
+       tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
+       head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+       if (tail == head)
+               return -ENOMEM;
+       writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+
+       return 0;
+}
+
+static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       ret = __iommu_queue_command(iommu, cmd);
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
+       return ret;
+}
+
+static int iommu_completion_wait(struct amd_iommu *iommu)
+{
+       int ret;
+       struct command cmd;
+       volatile u64 ready = 0;
+       unsigned long ready_phys = virt_to_phys(&ready);
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
+       cmd.data[1] = HIGH_U32(ready_phys);
+       cmd.data[2] = 1; /* value written to 'ready' */
+       CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
+
+       iommu->need_sync = 0;
+
+       ret = iommu_queue_command(iommu, &cmd);
+
+       if (ret)
+               return ret;
+
+       while (!ready)
+               cpu_relax();
+
+       return 0;
+}
+
+static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
+{
+       struct command cmd;
+
+       BUG_ON(iommu == NULL);
+
+       memset(&cmd, 0, sizeof(cmd));
+       CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
+       cmd.data[0] = devid;
+
+       iommu->need_sync = 1;
+
+       return iommu_queue_command(iommu, &cmd);
+}
+
+static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
+               u64 address, u16 domid, int pde, int s)
+{
+       struct command cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       address &= PAGE_MASK;
+       CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
+       cmd.data[1] |= domid;
+       cmd.data[2] = LOW_U32(address);
+       cmd.data[3] = HIGH_U32(address);
+       if (s)
+               cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       if (pde)
+               cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+
+       iommu->need_sync = 1;
+
+       return iommu_queue_command(iommu, &cmd);
+}
+
+static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
+               u64 address, size_t size)
+{
+       int i;
+       unsigned pages = to_pages(address, size);
+
+       address &= PAGE_MASK;
+
+       for (i = 0; i < pages; ++i) {
+               iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0);
+               address += PAGE_SIZE;
+       }
+
+       return 0;
+}