2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/string.h>
39 #include <asm/byteorder.h>
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u8 csr1212_key_id_type_map[0x30] = {
50 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L | __I, /* Module */
57 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
88 static void free_keyval(struct csr1212_keyval *kv)
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
97 static u16 csr1212_crc16(const u32 *buffer, size_t length)
103 for (; length; length--) {
104 data = be32_to_cpu(*buffer);
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
113 return cpu_to_be16(crc);
117 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
120 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
126 for (; length; length--) {
127 data = le32_to_cpu(*buffer);
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
136 return cpu_to_be16(crc);
140 static struct csr1212_dentry *
141 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
143 struct csr1212_dentry *pos;
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) {
153 static struct csr1212_keyval *
154 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
156 struct csr1212_keyval *kv;
158 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
159 if (kv->offset == offset)
166 /* Creation Routines */
168 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private)
171 struct csr1212_csr *csr;
173 csr = CSR1212_MALLOC(sizeof(*csr));
178 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
179 CSR1212_CONFIG_ROM_SPACE_SIZE);
180 if (!csr->cache_head) {
185 /* The keyval key id is not used for the root node, but a valid key id
186 * that can be used for a directory needs to be passed to
187 * csr1212_new_directory(). */
188 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
190 CSR1212_FREE(csr->cache_head);
195 csr->bus_info_data = csr->cache_head->data;
196 csr->bus_info_len = bus_info_size;
197 csr->crc_len = bus_info_size;
199 csr->private = private;
200 csr->cache_tail = csr->cache_head;
205 void csr1212_init_local_csr(struct csr1212_csr *csr,
206 const u32 *bus_info_data, int max_rom)
208 static const int mr_map[] = { 4, 64, 1024, 0 };
210 BUG_ON(max_rom & ~0x3);
211 csr->max_rom = mr_map[max_rom];
212 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
215 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
217 struct csr1212_keyval *kv;
219 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
222 kv = CSR1212_MALLOC(sizeof(*kv));
229 kv->associate = NULL;
239 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
241 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
246 kv->value.immediate = value;
251 static struct csr1212_keyval *
252 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
254 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
260 kv->value.leaf.data = CSR1212_MALLOC(data_len);
261 if (!kv->value.leaf.data) {
267 memcpy(kv->value.leaf.data, data, data_len);
269 kv->value.leaf.data = NULL;
272 kv->value.leaf.len = bytes_to_quads(data_len);
279 static struct csr1212_keyval *
280 csr1212_new_csr_offset(u8 key, u32 csr_offset)
282 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
287 kv->value.csr_offset = csr_offset;
294 struct csr1212_keyval *csr1212_new_directory(u8 key)
296 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
301 kv->value.directory.len = 0;
303 kv->value.directory.dentries_head = NULL;
304 kv->value.directory.dentries_tail = NULL;
309 void csr1212_associate_keyval(struct csr1212_keyval *kv,
310 struct csr1212_keyval *associate)
312 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
313 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
314 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
315 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
316 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
317 associate->key.id < 0x30) ||
318 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
320 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
321 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
322 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
323 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
324 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
325 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
328 csr1212_release_keyval(kv->associate);
331 kv->associate = associate;
334 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
335 struct csr1212_keyval *kv)
337 struct csr1212_dentry *dentry;
339 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
341 dentry = CSR1212_MALLOC(sizeof(*dentry));
350 dentry->prev = dir->value.directory.dentries_tail;
352 if (!dir->value.directory.dentries_head)
353 dir->value.directory.dentries_head = dentry;
355 if (dir->value.directory.dentries_tail)
356 dir->value.directory.dentries_tail->next = dentry;
357 dir->value.directory.dentries_tail = dentry;
359 return CSR1212_SUCCESS;
362 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
363 (&((kv)->value.leaf.data[1]))
365 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
366 ((kv)->value.leaf.data[0] = \
367 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
368 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
369 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
370 ((kv)->value.leaf.data[0] = \
371 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
372 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
373 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
375 static struct csr1212_keyval *
376 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
377 const void *data, size_t data_len)
379 struct csr1212_keyval *kv;
381 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
382 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
386 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
387 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
390 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
396 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
397 ((kv)->value.leaf.data[1] = \
398 ((kv)->value.leaf.data[1] & \
399 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
400 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
401 cpu_to_be32(((width) & CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
402 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
404 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
405 ((kv)->value.leaf.data[1] = \
406 ((kv)->value.leaf.data[1] & \
407 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
408 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
409 cpu_to_be32(((char_set) & \
410 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
411 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
413 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
414 ((kv)->value.leaf.data[1] = \
415 ((kv)->value.leaf.data[1] & \
416 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
417 cpu_to_be32(((language) & \
418 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
420 static struct csr1212_keyval *
421 csr1212_new_textual_descriptor_leaf(u8 cwidth, u16 cset, u16 language,
422 const void *data, size_t data_len)
424 struct csr1212_keyval *kv;
427 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
428 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
432 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
433 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
434 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
436 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
438 /* make sure last quadlet is zeroed out */
439 *((u32*)&(lstr[(data_len - 1) & ~0x3])) = 0;
441 /* don't copy the NUL terminator */
442 memcpy(lstr, data, data_len);
447 static int csr1212_check_minimal_ascii(const char *s)
449 static const char minimal_ascii_table[] = {
450 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
451 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
454 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
455 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
456 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
457 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
458 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
459 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
460 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
461 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
462 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
463 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
464 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
465 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
468 if (minimal_ascii_table[*s & 0x7F] != *s)
469 return -1; /* failed */
471 /* String conforms to minimal-ascii, as specified by IEEE 1212,
476 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
478 /* Check if string conform to minimal_ascii format */
479 if (csr1212_check_minimal_ascii(s))
482 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
483 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
487 /* Destruction Routines */
489 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
490 struct csr1212_keyval *kv)
492 struct csr1212_dentry *dentry;
494 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
497 dentry = csr1212_find_keyval(dir, kv);
503 dentry->prev->next = dentry->next;
505 dentry->next->prev = dentry->prev;
506 if (dir->value.directory.dentries_head == dentry)
507 dir->value.directory.dentries_head = dentry->next;
508 if (dir->value.directory.dentries_tail == dentry)
509 dir->value.directory.dentries_tail = dentry->prev;
511 CSR1212_FREE(dentry);
513 csr1212_release_keyval(kv);
516 /* This function is used to free the memory taken by a keyval. If the given
517 * keyval is a directory type, then any keyvals contained in that directory
518 * will be destroyed as well if their respective refcnts are 0. By means of
519 * list manipulation, this routine will descend a directory structure in a
520 * non-recursive manner. */
521 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
523 struct csr1212_keyval *k, *a;
524 struct csr1212_dentry dentry;
525 struct csr1212_dentry *head, *tail;
545 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
546 /* If the current entry is a directory, then move all
547 * the entries to the destruction list. */
548 if (k->value.directory.dentries_head) {
549 tail->next = k->value.directory.dentries_head;
550 k->value.directory.dentries_head->prev = tail;
551 tail = k->value.directory.dentries_tail;
560 if (head->prev && head->prev != &dentry) {
561 CSR1212_FREE(head->prev);
564 } else if (tail != &dentry)
569 void csr1212_destroy_csr(struct csr1212_csr *csr)
571 struct csr1212_csr_rom_cache *c, *oc;
572 struct csr1212_cache_region *cr, *ocr;
574 csr1212_release_keyval(csr->root_kv);
593 /* CSR Image Creation */
595 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
597 struct csr1212_csr_rom_cache *cache;
600 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
601 !csr->ops->release_addr || csr->max_rom < 1);
603 /* ROM size must be a multiple of csr->max_rom */
604 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
606 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
607 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
610 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
611 /* Invalid address returned from allocate_addr_range(). */
612 csr->ops->release_addr(csr_addr, csr->private);
616 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
618 csr->ops->release_addr(csr_addr, csr->private);
622 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
623 if (!cache->ext_rom) {
624 csr->ops->release_addr(csr_addr, csr->private);
629 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
630 csr1212_release_keyval(cache->ext_rom);
631 csr->ops->release_addr(csr_addr, csr->private);
635 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
636 cache->ext_rom->value.leaf.len = -1;
637 cache->ext_rom->value.leaf.data = cache->data;
639 /* Add cache to tail of cache list */
640 cache->prev = csr->cache_tail;
641 csr->cache_tail->next = cache;
642 csr->cache_tail = cache;
643 return CSR1212_SUCCESS;
646 static void csr1212_remove_cache(struct csr1212_csr *csr,
647 struct csr1212_csr_rom_cache *cache)
649 if (csr->cache_head == cache)
650 csr->cache_head = cache->next;
651 if (csr->cache_tail == cache)
652 csr->cache_tail = cache->prev;
655 cache->prev->next = cache->next;
657 cache->next->prev = cache->prev;
659 if (cache->ext_rom) {
660 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
661 csr1212_release_keyval(cache->ext_rom);
667 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
668 struct csr1212_keyval **layout_tail)
670 struct csr1212_dentry *dentry;
671 struct csr1212_keyval *dkv;
672 struct csr1212_keyval *last_extkey_spec = NULL;
673 struct csr1212_keyval *last_extkey = NULL;
676 for (dentry = dir->value.directory.dentries_head; dentry;
677 dentry = dentry->next) {
678 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
679 /* Special Case: Extended Key Specifier_ID */
680 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
681 if (last_extkey_spec == NULL) {
682 last_extkey_spec = dkv;
683 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
684 last_extkey_spec = dkv;
688 /* Special Case: Extended Key */
689 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
690 if (last_extkey == NULL) {
692 } else if (dkv->value.immediate != last_extkey->value.immediate) {
701 switch(dkv->key.type) {
703 case CSR1212_KV_TYPE_IMMEDIATE:
704 case CSR1212_KV_TYPE_CSR_OFFSET:
706 case CSR1212_KV_TYPE_LEAF:
707 case CSR1212_KV_TYPE_DIRECTORY:
708 /* Remove from list */
709 if (dkv->prev && (dkv->prev->next == dkv))
710 dkv->prev->next = dkv->next;
711 if (dkv->next && (dkv->next->prev == dkv))
712 dkv->next->prev = dkv->prev;
713 //if (dkv == *layout_tail)
714 // *layout_tail = dkv->prev;
716 /* Special case: Extended ROM leafs */
717 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
718 dkv->value.leaf.len = -1;
719 /* Don't add Extended ROM leafs in the layout list,
720 * they are handled differently. */
724 /* Add to tail of list */
726 dkv->prev = *layout_tail;
727 (*layout_tail)->next = dkv;
736 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
738 struct csr1212_keyval *ltail = kv;
742 switch(kv->key.type) {
743 case CSR1212_KV_TYPE_LEAF:
744 /* Add 1 quadlet for crc/len field */
745 agg_size += kv->value.leaf.len + 1;
748 case CSR1212_KV_TYPE_DIRECTORY:
749 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
750 /* Add 1 quadlet for crc/len field */
751 agg_size += kv->value.directory.len + 1;
756 return quads_to_bytes(agg_size);
759 static struct csr1212_keyval *
760 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
761 struct csr1212_keyval *start_kv, int start_pos)
763 struct csr1212_keyval *kv = start_kv;
764 struct csr1212_keyval *okv = start_kv;
766 int kv_len = 0, okv_len = 0;
768 cache->layout_head = kv;
770 while(kv && pos < cache->size) {
771 /* Special case: Extended ROM leafs */
772 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
773 kv->offset = cache->offset + pos;
776 switch(kv->key.type) {
777 case CSR1212_KV_TYPE_LEAF:
778 kv_len = kv->value.leaf.len;
781 case CSR1212_KV_TYPE_DIRECTORY:
782 kv_len = kv->value.directory.len;
786 /* Should never get here */
790 pos += quads_to_bytes(kv_len + 1);
792 if (pos <= cache->size) {
799 cache->layout_tail = okv;
800 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
805 #define CSR1212_KV_KEY_SHIFT 24
806 #define CSR1212_KV_KEY_TYPE_SHIFT 6
807 #define CSR1212_KV_KEY_ID_MASK 0x3f
808 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
811 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
813 struct csr1212_dentry *dentry;
814 struct csr1212_keyval *last_extkey_spec = NULL;
815 struct csr1212_keyval *last_extkey = NULL;
818 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
819 struct csr1212_keyval *a;
821 for (a = dentry->kv; a; a = a->associate) {
824 /* Special Case: Extended Key Specifier_ID */
825 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
826 if (last_extkey_spec == NULL) {
827 last_extkey_spec = a;
828 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
829 last_extkey_spec = a;
833 /* Special Case: Extended Key */
834 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
835 if (last_extkey == NULL) {
837 } else if (a->value.immediate != last_extkey->value.immediate) {
844 switch(a->key.type) {
845 case CSR1212_KV_TYPE_IMMEDIATE:
846 value = a->value.immediate;
848 case CSR1212_KV_TYPE_CSR_OFFSET:
849 value = a->value.csr_offset;
851 case CSR1212_KV_TYPE_LEAF:
853 value -= dir->offset + quads_to_bytes(1+index);
854 value = bytes_to_quads(value);
856 case CSR1212_KV_TYPE_DIRECTORY:
858 value -= dir->offset + quads_to_bytes(1+index);
859 value = bytes_to_quads(value);
862 /* Should never get here */
863 break; /* GDB breakpoint */
866 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
867 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
868 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
869 data_buffer[index] = cpu_to_be32(value);
875 struct csr1212_keyval_img {
880 u32 data[0]; /* older gcc can't handle [] which is standard */
883 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
885 struct csr1212_keyval *kv, *nkv;
886 struct csr1212_keyval_img *kvi;
888 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
889 kvi = (struct csr1212_keyval_img *)
890 (cache->data + bytes_to_quads(kv->offset - cache->offset));
891 switch(kv->key.type) {
893 case CSR1212_KV_TYPE_IMMEDIATE:
894 case CSR1212_KV_TYPE_CSR_OFFSET:
895 /* Should never get here */
896 break; /* GDB breakpoint */
898 case CSR1212_KV_TYPE_LEAF:
899 /* Don't copy over Extended ROM areas, they are
900 * already filled out! */
901 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
902 memcpy(kvi->data, kv->value.leaf.data,
903 quads_to_bytes(kv->value.leaf.len));
905 kvi->length = cpu_to_be16(kv->value.leaf.len);
906 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
909 case CSR1212_KV_TYPE_DIRECTORY:
910 csr1212_generate_tree_subdir(kv, kvi->data);
912 kvi->length = cpu_to_be16(kv->value.directory.len);
913 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
919 kv->prev->next = NULL;
921 kv->next->prev = NULL;
927 #define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u32))
929 int csr1212_generate_csr_image(struct csr1212_csr *csr)
931 struct csr1212_bus_info_block_img *bi;
932 struct csr1212_csr_rom_cache *cache;
933 struct csr1212_keyval *kv;
940 cache = csr->cache_head;
942 bi = (struct csr1212_bus_info_block_img*)cache->data;
944 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
945 bi->crc_length = bi->length;
946 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
948 csr->root_kv->next = NULL;
949 csr->root_kv->prev = NULL;
951 agg_size = csr1212_generate_layout_order(csr->root_kv);
953 init_offset = csr->bus_info_len;
955 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
957 /* Estimate approximate number of additional cache
958 * regions needed (it assumes that the cache holding
959 * the first 1K Config ROM space always exists). */
960 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
961 (2 * sizeof(u32))) + 1;
963 /* Add additional cache regions, extras will be
965 for (; est_c; est_c--) {
966 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
967 if (ret != CSR1212_SUCCESS)
970 /* Need to re-layout for additional cache regions */
971 agg_size = csr1212_generate_layout_order(csr->root_kv);
973 cache = csr->cache_head;
974 init_offset = csr->bus_info_len;
976 kv = csr1212_generate_positions(cache, kv, init_offset);
977 agg_size -= cache->len;
978 init_offset = sizeof(u32);
981 /* Remove unused, excess cache regions */
983 struct csr1212_csr_rom_cache *oc = cache;
986 csr1212_remove_cache(csr, oc);
989 /* Go through the list backward so that when done, the correct CRC
990 * will be calculated for the Extended ROM areas. */
991 for(cache = csr->cache_tail; cache; cache = cache->prev) {
992 /* Only Extended ROM caches should have this set. */
993 if (cache->ext_rom) {
996 /* Make sure the Extended ROM leaf is a multiple of
997 * max_rom in size. */
998 BUG_ON(csr->max_rom < 1);
999 leaf_size = (cache->len + (csr->max_rom - 1)) &
1000 ~(csr->max_rom - 1);
1002 /* Zero out the unused ROM region */
1003 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1004 leaf_size - cache->len);
1006 /* Subtract leaf header */
1007 leaf_size -= sizeof(u32);
1009 /* Update the Extended ROM leaf length */
1010 cache->ext_rom->value.leaf.len =
1011 bytes_to_quads(leaf_size);
1013 /* Zero out the unused ROM region */
1014 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1015 cache->size - cache->len);
1018 /* Copy the data into the cache buffer */
1019 csr1212_fill_cache(cache);
1021 if (cache != csr->cache_head) {
1022 /* Set the length and CRC of the extended ROM. */
1023 struct csr1212_keyval_img *kvi =
1024 (struct csr1212_keyval_img*)cache->data;
1025 u16 len = bytes_to_quads(cache->len) - 1;
1027 kvi->length = cpu_to_be16(len);
1028 kvi->crc = csr1212_crc16(kvi->data, len);
1032 return CSR1212_SUCCESS;
1035 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1037 struct csr1212_csr_rom_cache *cache;
1039 for (cache = csr->cache_head; cache; cache = cache->next) {
1040 if (offset >= cache->offset &&
1041 (offset + len) <= (cache->offset + cache->size)) {
1043 &cache->data[bytes_to_quads(offset - cache->offset)],
1045 return CSR1212_SUCCESS;
1052 /* Parse a chunk of data as a Config ROM */
1054 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1056 struct csr1212_bus_info_block_img *bi;
1057 struct csr1212_cache_region *cr;
1061 /* IEEE 1212 says that the entire bus info block should be readable in
1062 * a single transaction regardless of the max_rom value.
1063 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1064 * bus info block will be read 1 quadlet at a time. The rest of the
1065 * ConfigROM will be read according to the max_rom field. */
1066 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1067 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1069 &csr->cache_head->data[bytes_to_quads(i)],
1071 if (ret != CSR1212_SUCCESS)
1074 /* check ROM header's info_length */
1076 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1077 bytes_to_quads(csr->bus_info_len) - 1)
1081 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1082 csr->crc_len = quads_to_bytes(bi->crc_length);
1084 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1085 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1086 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1087 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1089 &csr->cache_head->data[bytes_to_quads(i)],
1091 if (ret != CSR1212_SUCCESS)
1096 /* Apparently there are too many differnt wrong implementations of the
1097 * CRC algorithm that verifying them is moot. */
1098 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1099 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1103 cr = CSR1212_MALLOC(sizeof(*cr));
1109 cr->offset_start = 0;
1110 cr->offset_end = csr->crc_len + 4;
1112 csr->cache_head->filled_head = cr;
1113 csr->cache_head->filled_tail = cr;
1115 return CSR1212_SUCCESS;
1118 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1119 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1120 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1121 #define CSR1212_KV_VAL_MASK 0xffffff
1122 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1124 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1127 int ret = CSR1212_SUCCESS;
1128 struct csr1212_keyval *k = NULL;
1131 switch(CSR1212_KV_KEY_TYPE(ki)) {
1132 case CSR1212_KV_TYPE_IMMEDIATE:
1133 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1134 CSR1212_KV_VAL(ki));
1140 k->refcnt = 0; /* Don't keep local reference when parsing. */
1143 case CSR1212_KV_TYPE_CSR_OFFSET:
1144 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1145 CSR1212_KV_VAL(ki));
1150 k->refcnt = 0; /* Don't keep local reference when parsing. */
1154 /* Compute the offset from 0xffff f000 0000. */
1155 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1156 if (offset == kv_pos) {
1157 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1158 * or Directories. The Config ROM image is most likely
1159 * messed up, so we'll just abort here. */
1164 k = csr1212_find_keyval_offset(dir, offset);
1167 break; /* Found it. */
1169 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1170 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1172 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1178 k->refcnt = 0; /* Don't keep local reference when parsing. */
1179 k->valid = 0; /* Contents not read yet so it's not valid. */
1183 k->next = dir->next;
1184 dir->next->prev = k;
1187 ret = csr1212_attach_keyval_to_directory(dir, k);
1190 if (ret != CSR1212_SUCCESS && k != NULL)
1195 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1196 struct csr1212_csr_rom_cache *cache)
1198 struct csr1212_keyval_img *kvi;
1200 int ret = CSR1212_SUCCESS;
1203 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1205 kvi_len = be16_to_cpu(kvi->length);
1208 /* Apparently there are too many differnt wrong implementations of the
1209 * CRC algorithm that verifying them is moot. */
1210 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1211 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1217 switch(kv->key.type) {
1218 case CSR1212_KV_TYPE_DIRECTORY:
1219 for (i = 0; i < kvi_len; i++) {
1220 u32 ki = kvi->data[i];
1222 /* Some devices put null entries in their unit
1223 * directories. If we come across such an entry,
1227 ret = csr1212_parse_dir_entry(kv, ki,
1229 quads_to_bytes(i + 1)));
1231 kv->value.directory.len = kvi_len;
1234 case CSR1212_KV_TYPE_LEAF:
1235 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1236 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1237 if (!kv->value.leaf.data) {
1242 kv->value.leaf.len = kvi_len;
1243 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1254 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1256 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1257 struct csr1212_keyval_img *kvi = NULL;
1258 struct csr1212_csr_rom_cache *cache;
1264 BUG_ON(!csr || !kv || csr->max_rom < 1);
1266 /* First find which cache the data should be in (or go in if not read
1268 for (cache = csr->cache_head; cache; cache = cache->next) {
1269 if (kv->offset >= cache->offset &&
1270 kv->offset < (cache->offset + cache->size))
1277 /* Only create a new cache for Extended ROM leaves. */
1278 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1281 if (csr->ops->bus_read(csr,
1282 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1283 sizeof(u32), &q, csr->private)) {
1287 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1289 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1290 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1292 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1296 kv->value.leaf.data = &cache->data[1];
1297 csr->cache_tail->next = cache;
1298 cache->prev = csr->cache_tail;
1300 csr->cache_tail = cache;
1301 cache->filled_head =
1302 CSR1212_MALLOC(sizeof(*cache->filled_head));
1303 if (!cache->filled_head) {
1307 cache->filled_head->offset_start = 0;
1308 cache->filled_head->offset_end = sizeof(u32);
1309 cache->filled_tail = cache->filled_head;
1310 cache->filled_head->next = NULL;
1311 cache->filled_head->prev = NULL;
1314 /* Don't read the entire extended ROM now. Pieces of it will
1315 * be read when entries inside it are read. */
1316 return csr1212_parse_keyval(kv, cache);
1319 cache_index = kv->offset - cache->offset;
1321 /* Now seach read portions of the cache to see if it is there. */
1322 for (cr = cache->filled_head; cr; cr = cr->next) {
1323 if (cache_index < cr->offset_start) {
1324 newcr = CSR1212_MALLOC(sizeof(*newcr));
1328 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1329 newcr->offset_end = newcr->offset_start;
1331 newcr->prev = cr->prev;
1335 } else if ((cache_index >= cr->offset_start) &&
1336 (cache_index < cr->offset_end)) {
1337 kvi = (struct csr1212_keyval_img*)
1338 (&cache->data[bytes_to_quads(cache_index)]);
1339 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1341 } else if (cache_index == cr->offset_end)
1346 cr = cache->filled_tail;
1347 newcr = CSR1212_MALLOC(sizeof(*newcr));
1351 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1352 newcr->offset_end = newcr->offset_start;
1354 newcr->next = cr->next;
1357 cache->filled_tail = newcr;
1360 while(!kvi || cr->offset_end < cache_index + kv_len) {
1361 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1362 ~(csr->max_rom - 1))];
1364 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1365 cr->offset_end) & ~(csr->max_rom - 1);
1367 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1369 if (csr->max_rom == 4)
1370 /* We've got problems! */
1373 /* Apperently the max_rom value was a lie, set it to
1374 * do quadlet reads and try again. */
1379 cr->offset_end += csr->max_rom - (cr->offset_end &
1380 (csr->max_rom - 1));
1382 if (!kvi && (cr->offset_end > cache_index)) {
1383 kvi = (struct csr1212_keyval_img*)
1384 (&cache->data[bytes_to_quads(cache_index)]);
1385 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1388 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1389 /* The Leaf or Directory claims its length extends
1390 * beyond the ConfigROM image region and thus beyond the
1391 * end of our cache region. Therefore, we abort now
1392 * rather than seg faulting later. */
1398 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1399 /* consolidate region entries */
1400 ncr->offset_start = cr->offset_start;
1403 cr->prev->next = cr->next;
1404 ncr->prev = cr->prev;
1405 if (cache->filled_head == cr)
1406 cache->filled_head = ncr;
1412 return csr1212_parse_keyval(kv, cache);
1415 int csr1212_parse_csr(struct csr1212_csr *csr)
1417 static const int mr_map[] = { 4, 64, 1024, 0 };
1418 struct csr1212_dentry *dentry;
1421 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1423 ret = csr1212_parse_bus_info_block(csr);
1424 if (ret != CSR1212_SUCCESS)
1427 if (!csr->ops->get_max_rom)
1428 csr->max_rom = mr_map[0]; /* default value */
1430 int i = csr->ops->get_max_rom(csr->bus_info_data,
1434 csr->max_rom = mr_map[i];
1437 csr->cache_head->layout_head = csr->root_kv;
1438 csr->cache_head->layout_tail = csr->root_kv;
1440 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1443 csr->root_kv->valid = 0;
1444 csr->root_kv->next = csr->root_kv;
1445 csr->root_kv->prev = csr->root_kv;
1446 ret = _csr1212_read_keyval(csr, csr->root_kv);
1447 if (ret != CSR1212_SUCCESS)
1450 /* Scan through the Root directory finding all extended ROM regions
1451 * and make cache regions for them */
1452 for (dentry = csr->root_kv->value.directory.dentries_head;
1453 dentry; dentry = dentry->next) {
1454 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1455 !dentry->kv->valid) {
1456 ret = _csr1212_read_keyval(csr, dentry->kv);
1457 if (ret != CSR1212_SUCCESS)
1462 return CSR1212_SUCCESS;