2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
32 #include "intel-iommu.h"
35 #define PREFIX "DMAR:"
37 /* No locks are needed as DMA remapping hardware unit
38 * list is constructed at boot time and hotplug of
39 * these units are not supported by the architecture.
41 LIST_HEAD(dmar_drhd_units);
42 LIST_HEAD(dmar_rmrr_units);
44 static struct acpi_table_header * __initdata dmar_tbl;
46 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
49 * add INCLUDE_ALL at the tail, so scan the list will find it at
52 if (drhd->include_all)
53 list_add_tail(&drhd->list, &dmar_drhd_units);
55 list_add(&drhd->list, &dmar_drhd_units);
58 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
60 list_add(&rmrr->list, &dmar_rmrr_units);
63 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
64 struct pci_dev **dev, u16 segment)
67 struct pci_dev *pdev = NULL;
68 struct acpi_dmar_pci_path *path;
71 bus = pci_find_bus(segment, scope->bus);
72 path = (struct acpi_dmar_pci_path *)(scope + 1);
73 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
74 / sizeof(struct acpi_dmar_pci_path);
80 * Some BIOSes list non-exist devices in DMAR table, just
85 PREFIX "Device scope bus [%d] not found\n",
89 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 printk(KERN_WARNING PREFIX
92 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
93 segment, bus->number, path->dev, path->fn);
98 bus = pdev->subordinate;
101 printk(KERN_WARNING PREFIX
102 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
103 segment, scope->bus, path->dev, path->fn);
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 printk(KERN_WARNING PREFIX
112 "Device scope type does not match for %s\n",
120 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
123 struct acpi_dmar_device_scope *scope;
129 while (start < end) {
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
135 printk(KERN_WARNING PREFIX
136 "Unsupported device scope\n");
137 start += scope->length;
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
148 while (start < end) {
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
160 start += scope->length;
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
172 dmar_parse_one_drhd(struct acpi_dmar_header *header)
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 drhd = (struct acpi_dmar_hardware_unit *)header;
184 dmaru->reg_base_addr = drhd->address;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
192 dmar_register_drhd_unit(dmaru);
197 dmar_parse_dev(struct dmar_drhd_unit *dmaru)
199 struct acpi_dmar_hardware_unit *drhd;
200 static int include_all;
203 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
205 if (!dmaru->include_all)
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
211 /* Only allow one INCLUDE_ALL */
213 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
214 "device scope is allowed\n");
220 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
221 list_del(&dmaru->list);
228 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
242 dmar_register_rmrr_unit(rmrru);
247 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
249 struct acpi_dmar_reserved_memory *rmrr;
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
254 ((void *)rmrr) + rmrr->header.length,
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
265 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
267 struct acpi_dmar_hardware_unit *drhd;
268 struct acpi_dmar_reserved_memory *rmrr;
270 switch (header->type) {
271 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
272 drhd = (struct acpi_dmar_hardware_unit *)header;
273 printk (KERN_INFO PREFIX
274 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
275 drhd->flags, drhd->address);
277 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
278 rmrr = (struct acpi_dmar_reserved_memory *)header;
280 printk (KERN_INFO PREFIX
281 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
282 rmrr->base_address, rmrr->end_address);
288 * parse_dmar_table - parses the DMA reporting table
291 parse_dmar_table(void)
293 struct acpi_table_dmar *dmar;
294 struct acpi_dmar_header *entry_header;
297 dmar = (struct acpi_table_dmar *)dmar_tbl;
301 if (dmar->width < PAGE_SHIFT_4K - 1) {
302 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
306 printk (KERN_INFO PREFIX "Host address width %d\n",
309 entry_header = (struct acpi_dmar_header *)(dmar + 1);
310 while (((unsigned long)entry_header) <
311 (((unsigned long)dmar) + dmar_tbl->length)) {
312 dmar_table_print_dmar_entry(entry_header);
314 switch (entry_header->type) {
315 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
316 ret = dmar_parse_one_drhd(entry_header);
318 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
319 ret = dmar_parse_one_rmrr(entry_header);
322 printk(KERN_WARNING PREFIX
323 "Unknown DMAR structure type\n");
324 ret = 0; /* for forward compatibility */
330 entry_header = ((void *)entry_header + entry_header->length);
335 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
341 for (index = 0; index < cnt; index++)
342 if (dev == devices[index])
345 /* Check our parent */
346 dev = dev->bus->self;
352 struct dmar_drhd_unit *
353 dmar_find_matched_drhd_unit(struct pci_dev *dev)
355 struct dmar_drhd_unit *drhd = NULL;
357 list_for_each_entry(drhd, &dmar_drhd_units, list) {
358 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
359 drhd->devices_cnt, dev))
366 int __init dmar_dev_scope_init(void)
368 struct dmar_drhd_unit *drhd;
369 struct dmar_rmrr_unit *rmrr;
372 for_each_drhd_unit(drhd) {
373 ret = dmar_parse_dev(drhd);
378 for_each_rmrr_units(rmrr) {
379 ret = rmrr_parse_dev(rmrr);
388 int __init dmar_table_init(void)
390 static int dmar_table_initialized;
393 if (dmar_table_initialized)
396 dmar_table_initialized = 1;
398 ret = parse_dmar_table();
401 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
405 if (list_empty(&dmar_drhd_units)) {
406 printk(KERN_INFO PREFIX "No DMAR devices found\n");
410 if (list_empty(&dmar_rmrr_units)) {
411 printk(KERN_INFO PREFIX "No RMRR found\n");
419 * early_dmar_detect - checks to see if the platform supports DMAR devices
421 int __init early_dmar_detect(void)
423 acpi_status status = AE_OK;
425 /* if we could find DMAR table, then there are DMAR devices */
426 status = acpi_get_table(ACPI_SIG_DMAR, 0,
427 (struct acpi_table_header **)&dmar_tbl);
429 if (ACPI_SUCCESS(status) && !dmar_tbl) {
430 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
431 status = AE_NOT_FOUND;
434 return (ACPI_SUCCESS(status) ? 1 : 0);
437 int alloc_iommu(struct dmar_drhd_unit *drhd)
439 struct intel_iommu *iommu;
442 static int iommu_allocated = 0;
444 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
448 iommu->seq_id = iommu_allocated++;
450 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
452 printk(KERN_ERR "IOMMU: can't map the region\n");
455 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
456 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
458 /* the registers might be more than one page */
459 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
460 cap_max_fault_reg_offset(iommu->cap));
461 map_size = PAGE_ALIGN_4K(map_size);
462 if (map_size > PAGE_SIZE_4K) {
464 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
466 printk(KERN_ERR "IOMMU: can't map the region\n");
471 ver = readl(iommu->reg + DMAR_VER_REG);
472 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
473 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
474 iommu->cap, iommu->ecap);
476 spin_lock_init(&iommu->register_lock);
485 void free_iommu(struct intel_iommu *iommu)
491 free_dmar_iommu(iommu);