2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
32 #include "intel-iommu.h"
35 #define PREFIX "DMAR:"
37 /* No locks are needed as DMA remapping hardware unit
38 * list is constructed at boot time and hotplug of
39 * these units are not supported by the architecture.
41 LIST_HEAD(dmar_drhd_units);
42 LIST_HEAD(dmar_rmrr_units);
44 static struct acpi_table_header * __initdata dmar_tbl;
46 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
49 * add INCLUDE_ALL at the tail, so scan the list will find it at
52 if (drhd->include_all)
53 list_add_tail(&drhd->list, &dmar_drhd_units);
55 list_add(&drhd->list, &dmar_drhd_units);
58 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
60 list_add(&rmrr->list, &dmar_rmrr_units);
63 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
64 struct pci_dev **dev, u16 segment)
67 struct pci_dev *pdev = NULL;
68 struct acpi_dmar_pci_path *path;
71 bus = pci_find_bus(segment, scope->bus);
72 path = (struct acpi_dmar_pci_path *)(scope + 1);
73 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
74 / sizeof(struct acpi_dmar_pci_path);
80 * Some BIOSes list non-exist devices in DMAR table, just
85 PREFIX "Device scope bus [%d] not found\n",
89 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 printk(KERN_WARNING PREFIX
92 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
93 segment, bus->number, path->dev, path->fn);
98 bus = pdev->subordinate;
101 printk(KERN_WARNING PREFIX
102 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
103 segment, scope->bus, path->dev, path->fn);
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 printk(KERN_WARNING PREFIX
112 "Device scope type does not match for %s\n",
120 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
123 struct acpi_dmar_device_scope *scope;
129 while (start < end) {
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
135 printk(KERN_WARNING PREFIX
136 "Unsupported device scope\n");
137 start += scope->length;
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
148 while (start < end) {
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
160 start += scope->length;
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
172 dmar_parse_one_drhd(struct acpi_dmar_header *header)
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
177 static int include_all;
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 drhd = (struct acpi_dmar_hardware_unit *)header;
184 dmaru->reg_base_addr = drhd->address;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 if (!dmaru->include_all)
188 ret = dmar_parse_dev_scope((void *)(drhd + 1),
189 ((void *)drhd) + header->length,
190 &dmaru->devices_cnt, &dmaru->devices,
193 /* Only allow one INCLUDE_ALL */
195 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
196 "device scope is allowed\n");
202 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all))
205 dmar_register_drhd_unit(dmaru);
210 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
212 struct acpi_dmar_reserved_memory *rmrr;
213 struct dmar_rmrr_unit *rmrru;
216 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
220 rmrr = (struct acpi_dmar_reserved_memory *)header;
221 rmrru->base_address = rmrr->base_address;
222 rmrru->end_address = rmrr->end_address;
223 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
224 ((void *)rmrr) + header->length,
225 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
227 if (ret || (rmrru->devices_cnt == 0))
230 dmar_register_rmrr_unit(rmrru);
235 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
237 struct acpi_dmar_hardware_unit *drhd;
238 struct acpi_dmar_reserved_memory *rmrr;
240 switch (header->type) {
241 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
242 drhd = (struct acpi_dmar_hardware_unit *)header;
243 printk (KERN_INFO PREFIX
244 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
245 drhd->flags, drhd->address);
247 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
248 rmrr = (struct acpi_dmar_reserved_memory *)header;
250 printk (KERN_INFO PREFIX
251 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
252 rmrr->base_address, rmrr->end_address);
258 * parse_dmar_table - parses the DMA reporting table
261 parse_dmar_table(void)
263 struct acpi_table_dmar *dmar;
264 struct acpi_dmar_header *entry_header;
267 dmar = (struct acpi_table_dmar *)dmar_tbl;
271 if (dmar->width < PAGE_SHIFT_4K - 1) {
272 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
276 printk (KERN_INFO PREFIX "Host address width %d\n",
279 entry_header = (struct acpi_dmar_header *)(dmar + 1);
280 while (((unsigned long)entry_header) <
281 (((unsigned long)dmar) + dmar_tbl->length)) {
282 dmar_table_print_dmar_entry(entry_header);
284 switch (entry_header->type) {
285 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
286 ret = dmar_parse_one_drhd(entry_header);
288 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
289 ret = dmar_parse_one_rmrr(entry_header);
292 printk(KERN_WARNING PREFIX
293 "Unknown DMAR structure type\n");
294 ret = 0; /* for forward compatibility */
300 entry_header = ((void *)entry_header + entry_header->length);
305 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
311 for (index = 0; index < cnt; index++)
312 if (dev == devices[index])
315 /* Check our parent */
316 dev = dev->bus->self;
322 struct dmar_drhd_unit *
323 dmar_find_matched_drhd_unit(struct pci_dev *dev)
325 struct dmar_drhd_unit *drhd = NULL;
327 list_for_each_entry(drhd, &dmar_drhd_units, list) {
328 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
329 drhd->devices_cnt, dev))
337 int __init dmar_table_init(void)
342 ret = parse_dmar_table();
344 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
348 if (list_empty(&dmar_drhd_units)) {
349 printk(KERN_INFO PREFIX "No DMAR devices found\n");
353 if (list_empty(&dmar_rmrr_units)) {
354 printk(KERN_INFO PREFIX "No RMRR found\n");
362 * early_dmar_detect - checks to see if the platform supports DMAR devices
364 int __init early_dmar_detect(void)
366 acpi_status status = AE_OK;
368 /* if we could find DMAR table, then there are DMAR devices */
369 status = acpi_get_table(ACPI_SIG_DMAR, 0,
370 (struct acpi_table_header **)&dmar_tbl);
372 if (ACPI_SUCCESS(status) && !dmar_tbl) {
373 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
374 status = AE_NOT_FOUND;
377 return (ACPI_SUCCESS(status) ? 1 : 0);
380 struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
381 struct dmar_drhd_unit *drhd)
386 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
388 printk(KERN_ERR "IOMMU: can't map the region\n");
391 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
392 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
394 /* the registers might be more than one page */
395 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
396 cap_max_fault_reg_offset(iommu->cap));
397 map_size = PAGE_ALIGN_4K(map_size);
398 if (map_size > PAGE_SIZE_4K) {
400 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
402 printk(KERN_ERR "IOMMU: can't map the region\n");
407 ver = readl(iommu->reg + DMAR_VER_REG);
408 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
409 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
410 iommu->cap, iommu->ecap);
412 spin_lock_init(&iommu->register_lock);
421 void free_iommu(struct intel_iommu *iommu)
427 free_dmar_iommu(iommu);