2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/string.h>
39 #include <asm/byteorder.h>
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u8 csr1212_key_id_type_map[0x30] = {
50 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L | __I, /* Module */
57 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
88 static void free_keyval(struct csr1212_keyval *kv)
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
97 static u16 csr1212_crc16(const u32 *buffer, size_t length)
103 for (; length; length--) {
104 data = be32_to_cpu(*buffer);
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
113 return cpu_to_be16(crc);
117 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
120 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
126 for (; length; length--) {
127 data = le32_to_cpu(*buffer);
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
136 return cpu_to_be16(crc);
140 static struct csr1212_dentry *
141 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
143 struct csr1212_dentry *pos;
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) {
153 static struct csr1212_keyval *
154 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
156 struct csr1212_keyval *kv;
158 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
159 if (kv->offset == offset)
166 /* Creation Routines */
168 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private)
171 struct csr1212_csr *csr;
173 csr = CSR1212_MALLOC(sizeof(*csr));
178 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
179 CSR1212_CONFIG_ROM_SPACE_SIZE);
180 if (!csr->cache_head) {
185 /* The keyval key id is not used for the root node, but a valid key id
186 * that can be used for a directory needs to be passed to
187 * csr1212_new_directory(). */
188 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
190 CSR1212_FREE(csr->cache_head);
195 csr->bus_info_data = csr->cache_head->data;
196 csr->bus_info_len = bus_info_size;
197 csr->crc_len = bus_info_size;
199 csr->private = private;
200 csr->cache_tail = csr->cache_head;
205 void csr1212_init_local_csr(struct csr1212_csr *csr,
206 const u32 *bus_info_data, int max_rom)
208 static const int mr_map[] = { 4, 64, 1024, 0 };
210 BUG_ON(max_rom & ~0x3);
211 csr->max_rom = mr_map[max_rom];
212 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
215 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
217 struct csr1212_keyval *kv;
219 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
222 kv = CSR1212_MALLOC(sizeof(*kv));
229 kv->associate = NULL;
239 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
241 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
246 kv->value.immediate = value;
251 static struct csr1212_keyval *
252 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
254 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
260 kv->value.leaf.data = CSR1212_MALLOC(data_len);
261 if (!kv->value.leaf.data) {
267 memcpy(kv->value.leaf.data, data, data_len);
269 kv->value.leaf.data = NULL;
272 kv->value.leaf.len = bytes_to_quads(data_len);
279 static struct csr1212_keyval *
280 csr1212_new_csr_offset(u8 key, u32 csr_offset)
282 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
287 kv->value.csr_offset = csr_offset;
294 struct csr1212_keyval *csr1212_new_directory(u8 key)
296 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
301 kv->value.directory.len = 0;
303 kv->value.directory.dentries_head = NULL;
304 kv->value.directory.dentries_tail = NULL;
309 void csr1212_associate_keyval(struct csr1212_keyval *kv,
310 struct csr1212_keyval *associate)
312 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
313 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
314 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
315 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
316 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
317 associate->key.id < 0x30) ||
318 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
320 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
321 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
322 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
323 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
324 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
325 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
328 csr1212_release_keyval(kv->associate);
331 kv->associate = associate;
334 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
335 struct csr1212_keyval *kv)
337 struct csr1212_dentry *dentry;
339 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
341 dentry = CSR1212_MALLOC(sizeof(*dentry));
350 dentry->prev = dir->value.directory.dentries_tail;
352 if (!dir->value.directory.dentries_head)
353 dir->value.directory.dentries_head = dentry;
355 if (dir->value.directory.dentries_tail)
356 dir->value.directory.dentries_tail->next = dentry;
357 dir->value.directory.dentries_tail = dentry;
359 return CSR1212_SUCCESS;
362 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
363 (&((kv)->value.leaf.data[1]))
365 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
366 ((kv)->value.leaf.data[0] = \
367 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
368 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
369 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
370 ((kv)->value.leaf.data[0] = \
371 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
372 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
373 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
375 static struct csr1212_keyval *
376 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
377 const void *data, size_t data_len)
379 struct csr1212_keyval *kv;
381 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
382 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
386 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
387 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
390 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
396 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
397 ((kv)->value.leaf.data[1] = \
398 ((kv)->value.leaf.data[1] & \
399 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
400 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
401 cpu_to_be32(((width) & CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
402 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
404 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
405 ((kv)->value.leaf.data[1] = \
406 ((kv)->value.leaf.data[1] & \
407 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
408 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
409 cpu_to_be32(((char_set) & \
410 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
411 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
413 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
414 ((kv)->value.leaf.data[1] = \
415 ((kv)->value.leaf.data[1] & \
416 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
417 cpu_to_be32(((language) & \
418 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
420 static struct csr1212_keyval *
421 csr1212_new_textual_descriptor_leaf(u8 cwidth, u16 cset, u16 language,
422 const void *data, size_t data_len)
424 struct csr1212_keyval *kv;
427 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
428 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
432 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
433 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
434 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
436 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
438 /* make sure last quadlet is zeroed out */
439 *((u32*)&(lstr[(data_len - 1) & ~0x3])) = 0;
441 /* don't copy the NUL terminator */
442 memcpy(lstr, data, data_len);
447 static int csr1212_check_minimal_ascii(const char *s)
449 static const char minimal_ascii_table[] = {
450 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
451 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
454 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
455 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
456 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
457 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
458 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
459 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
460 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
461 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
462 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
463 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
464 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
465 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
468 if (minimal_ascii_table[*s & 0x7F] != *s)
469 return -1; /* failed */
471 /* String conforms to minimal-ascii, as specified by IEEE 1212,
476 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
478 /* Check if string conform to minimal_ascii format */
479 if (csr1212_check_minimal_ascii(s))
482 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
483 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
487 /* Destruction Routines */
489 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
490 struct csr1212_keyval *kv)
492 struct csr1212_dentry *dentry;
494 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
497 dentry = csr1212_find_keyval(dir, kv);
503 dentry->prev->next = dentry->next;
505 dentry->next->prev = dentry->prev;
506 if (dir->value.directory.dentries_head == dentry)
507 dir->value.directory.dentries_head = dentry->next;
508 if (dir->value.directory.dentries_tail == dentry)
509 dir->value.directory.dentries_tail = dentry->prev;
511 CSR1212_FREE(dentry);
513 csr1212_release_keyval(kv);
516 /* This function is used to free the memory taken by a keyval. If the given
517 * keyval is a directory type, then any keyvals contained in that directory
518 * will be destroyed as well if their respective refcnts are 0. By means of
519 * list manipulation, this routine will descend a directory structure in a
520 * non-recursive manner. */
521 static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
523 struct csr1212_keyval *k, *a;
524 struct csr1212_dentry dentry;
525 struct csr1212_dentry *head, *tail;
545 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
546 /* If the current entry is a directory, then move all
547 * the entries to the destruction list. */
548 if (k->value.directory.dentries_head) {
549 tail->next = k->value.directory.dentries_head;
550 k->value.directory.dentries_head->prev = tail;
551 tail = k->value.directory.dentries_tail;
560 if (head->prev && head->prev != &dentry) {
561 CSR1212_FREE(head->prev);
564 } else if (tail != &dentry)
569 void csr1212_release_keyval(struct csr1212_keyval *kv)
574 csr1212_destroy_keyval(kv);
577 void csr1212_destroy_csr(struct csr1212_csr *csr)
579 struct csr1212_csr_rom_cache *c, *oc;
580 struct csr1212_cache_region *cr, *ocr;
582 csr1212_release_keyval(csr->root_kv);
601 /* CSR Image Creation */
603 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
605 struct csr1212_csr_rom_cache *cache;
608 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
609 !csr->ops->release_addr || csr->max_rom < 1);
611 /* ROM size must be a multiple of csr->max_rom */
612 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
614 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
615 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
618 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
619 /* Invalid address returned from allocate_addr_range(). */
620 csr->ops->release_addr(csr_addr, csr->private);
624 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
626 csr->ops->release_addr(csr_addr, csr->private);
630 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
631 if (!cache->ext_rom) {
632 csr->ops->release_addr(csr_addr, csr->private);
637 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
638 csr1212_release_keyval(cache->ext_rom);
639 csr->ops->release_addr(csr_addr, csr->private);
643 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
644 cache->ext_rom->value.leaf.len = -1;
645 cache->ext_rom->value.leaf.data = cache->data;
647 /* Add cache to tail of cache list */
648 cache->prev = csr->cache_tail;
649 csr->cache_tail->next = cache;
650 csr->cache_tail = cache;
651 return CSR1212_SUCCESS;
654 static void csr1212_remove_cache(struct csr1212_csr *csr,
655 struct csr1212_csr_rom_cache *cache)
657 if (csr->cache_head == cache)
658 csr->cache_head = cache->next;
659 if (csr->cache_tail == cache)
660 csr->cache_tail = cache->prev;
663 cache->prev->next = cache->next;
665 cache->next->prev = cache->prev;
667 if (cache->ext_rom) {
668 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
669 csr1212_release_keyval(cache->ext_rom);
675 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
676 struct csr1212_keyval **layout_tail)
678 struct csr1212_dentry *dentry;
679 struct csr1212_keyval *dkv;
680 struct csr1212_keyval *last_extkey_spec = NULL;
681 struct csr1212_keyval *last_extkey = NULL;
684 for (dentry = dir->value.directory.dentries_head; dentry;
685 dentry = dentry->next) {
686 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
687 /* Special Case: Extended Key Specifier_ID */
688 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
689 if (last_extkey_spec == NULL) {
690 last_extkey_spec = dkv;
691 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
692 last_extkey_spec = dkv;
696 /* Special Case: Extended Key */
697 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
698 if (last_extkey == NULL) {
700 } else if (dkv->value.immediate != last_extkey->value.immediate) {
709 switch(dkv->key.type) {
711 case CSR1212_KV_TYPE_IMMEDIATE:
712 case CSR1212_KV_TYPE_CSR_OFFSET:
714 case CSR1212_KV_TYPE_LEAF:
715 case CSR1212_KV_TYPE_DIRECTORY:
716 /* Remove from list */
717 if (dkv->prev && (dkv->prev->next == dkv))
718 dkv->prev->next = dkv->next;
719 if (dkv->next && (dkv->next->prev == dkv))
720 dkv->next->prev = dkv->prev;
721 //if (dkv == *layout_tail)
722 // *layout_tail = dkv->prev;
724 /* Special case: Extended ROM leafs */
725 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
726 dkv->value.leaf.len = -1;
727 /* Don't add Extended ROM leafs in the layout list,
728 * they are handled differently. */
732 /* Add to tail of list */
734 dkv->prev = *layout_tail;
735 (*layout_tail)->next = dkv;
744 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
746 struct csr1212_keyval *ltail = kv;
750 switch(kv->key.type) {
751 case CSR1212_KV_TYPE_LEAF:
752 /* Add 1 quadlet for crc/len field */
753 agg_size += kv->value.leaf.len + 1;
756 case CSR1212_KV_TYPE_DIRECTORY:
757 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
758 /* Add 1 quadlet for crc/len field */
759 agg_size += kv->value.directory.len + 1;
764 return quads_to_bytes(agg_size);
767 static struct csr1212_keyval *
768 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
769 struct csr1212_keyval *start_kv, int start_pos)
771 struct csr1212_keyval *kv = start_kv;
772 struct csr1212_keyval *okv = start_kv;
774 int kv_len = 0, okv_len = 0;
776 cache->layout_head = kv;
778 while(kv && pos < cache->size) {
779 /* Special case: Extended ROM leafs */
780 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
781 kv->offset = cache->offset + pos;
784 switch(kv->key.type) {
785 case CSR1212_KV_TYPE_LEAF:
786 kv_len = kv->value.leaf.len;
789 case CSR1212_KV_TYPE_DIRECTORY:
790 kv_len = kv->value.directory.len;
794 /* Should never get here */
798 pos += quads_to_bytes(kv_len + 1);
800 if (pos <= cache->size) {
807 cache->layout_tail = okv;
808 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
813 #define CSR1212_KV_KEY_SHIFT 24
814 #define CSR1212_KV_KEY_TYPE_SHIFT 6
815 #define CSR1212_KV_KEY_ID_MASK 0x3f
816 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
819 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
821 struct csr1212_dentry *dentry;
822 struct csr1212_keyval *last_extkey_spec = NULL;
823 struct csr1212_keyval *last_extkey = NULL;
826 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
827 struct csr1212_keyval *a;
829 for (a = dentry->kv; a; a = a->associate) {
832 /* Special Case: Extended Key Specifier_ID */
833 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
834 if (last_extkey_spec == NULL) {
835 last_extkey_spec = a;
836 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
837 last_extkey_spec = a;
841 /* Special Case: Extended Key */
842 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
843 if (last_extkey == NULL) {
845 } else if (a->value.immediate != last_extkey->value.immediate) {
852 switch(a->key.type) {
853 case CSR1212_KV_TYPE_IMMEDIATE:
854 value = a->value.immediate;
856 case CSR1212_KV_TYPE_CSR_OFFSET:
857 value = a->value.csr_offset;
859 case CSR1212_KV_TYPE_LEAF:
861 value -= dir->offset + quads_to_bytes(1+index);
862 value = bytes_to_quads(value);
864 case CSR1212_KV_TYPE_DIRECTORY:
866 value -= dir->offset + quads_to_bytes(1+index);
867 value = bytes_to_quads(value);
870 /* Should never get here */
871 break; /* GDB breakpoint */
874 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
875 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
876 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
877 data_buffer[index] = cpu_to_be32(value);
883 struct csr1212_keyval_img {
888 u32 data[0]; /* older gcc can't handle [] which is standard */
891 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
893 struct csr1212_keyval *kv, *nkv;
894 struct csr1212_keyval_img *kvi;
896 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
897 kvi = (struct csr1212_keyval_img *)
898 (cache->data + bytes_to_quads(kv->offset - cache->offset));
899 switch(kv->key.type) {
901 case CSR1212_KV_TYPE_IMMEDIATE:
902 case CSR1212_KV_TYPE_CSR_OFFSET:
903 /* Should never get here */
904 break; /* GDB breakpoint */
906 case CSR1212_KV_TYPE_LEAF:
907 /* Don't copy over Extended ROM areas, they are
908 * already filled out! */
909 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
910 memcpy(kvi->data, kv->value.leaf.data,
911 quads_to_bytes(kv->value.leaf.len));
913 kvi->length = cpu_to_be16(kv->value.leaf.len);
914 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
917 case CSR1212_KV_TYPE_DIRECTORY:
918 csr1212_generate_tree_subdir(kv, kvi->data);
920 kvi->length = cpu_to_be16(kv->value.directory.len);
921 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
927 kv->prev->next = NULL;
929 kv->next->prev = NULL;
935 #define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u32))
937 int csr1212_generate_csr_image(struct csr1212_csr *csr)
939 struct csr1212_bus_info_block_img *bi;
940 struct csr1212_csr_rom_cache *cache;
941 struct csr1212_keyval *kv;
948 cache = csr->cache_head;
950 bi = (struct csr1212_bus_info_block_img*)cache->data;
952 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
953 bi->crc_length = bi->length;
954 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
956 csr->root_kv->next = NULL;
957 csr->root_kv->prev = NULL;
959 agg_size = csr1212_generate_layout_order(csr->root_kv);
961 init_offset = csr->bus_info_len;
963 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
965 /* Estimate approximate number of additional cache
966 * regions needed (it assumes that the cache holding
967 * the first 1K Config ROM space always exists). */
968 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
969 (2 * sizeof(u32))) + 1;
971 /* Add additional cache regions, extras will be
973 for (; est_c; est_c--) {
974 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
975 if (ret != CSR1212_SUCCESS)
978 /* Need to re-layout for additional cache regions */
979 agg_size = csr1212_generate_layout_order(csr->root_kv);
981 cache = csr->cache_head;
982 init_offset = csr->bus_info_len;
984 kv = csr1212_generate_positions(cache, kv, init_offset);
985 agg_size -= cache->len;
986 init_offset = sizeof(u32);
989 /* Remove unused, excess cache regions */
991 struct csr1212_csr_rom_cache *oc = cache;
994 csr1212_remove_cache(csr, oc);
997 /* Go through the list backward so that when done, the correct CRC
998 * will be calculated for the Extended ROM areas. */
999 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1000 /* Only Extended ROM caches should have this set. */
1001 if (cache->ext_rom) {
1004 /* Make sure the Extended ROM leaf is a multiple of
1005 * max_rom in size. */
1006 BUG_ON(csr->max_rom < 1);
1007 leaf_size = (cache->len + (csr->max_rom - 1)) &
1008 ~(csr->max_rom - 1);
1010 /* Zero out the unused ROM region */
1011 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1012 leaf_size - cache->len);
1014 /* Subtract leaf header */
1015 leaf_size -= sizeof(u32);
1017 /* Update the Extended ROM leaf length */
1018 cache->ext_rom->value.leaf.len =
1019 bytes_to_quads(leaf_size);
1021 /* Zero out the unused ROM region */
1022 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1023 cache->size - cache->len);
1026 /* Copy the data into the cache buffer */
1027 csr1212_fill_cache(cache);
1029 if (cache != csr->cache_head) {
1030 /* Set the length and CRC of the extended ROM. */
1031 struct csr1212_keyval_img *kvi =
1032 (struct csr1212_keyval_img*)cache->data;
1033 u16 len = bytes_to_quads(cache->len) - 1;
1035 kvi->length = cpu_to_be16(len);
1036 kvi->crc = csr1212_crc16(kvi->data, len);
1040 return CSR1212_SUCCESS;
1043 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1045 struct csr1212_csr_rom_cache *cache;
1047 for (cache = csr->cache_head; cache; cache = cache->next) {
1048 if (offset >= cache->offset &&
1049 (offset + len) <= (cache->offset + cache->size)) {
1051 &cache->data[bytes_to_quads(offset - cache->offset)],
1053 return CSR1212_SUCCESS;
1060 /* Parse a chunk of data as a Config ROM */
1062 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1064 struct csr1212_bus_info_block_img *bi;
1065 struct csr1212_cache_region *cr;
1069 /* IEEE 1212 says that the entire bus info block should be readable in
1070 * a single transaction regardless of the max_rom value.
1071 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1072 * bus info block will be read 1 quadlet at a time. The rest of the
1073 * ConfigROM will be read according to the max_rom field. */
1074 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1075 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1077 &csr->cache_head->data[bytes_to_quads(i)],
1079 if (ret != CSR1212_SUCCESS)
1082 /* check ROM header's info_length */
1084 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1085 bytes_to_quads(csr->bus_info_len) - 1)
1089 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1090 csr->crc_len = quads_to_bytes(bi->crc_length);
1092 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1093 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1094 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1095 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1097 &csr->cache_head->data[bytes_to_quads(i)],
1099 if (ret != CSR1212_SUCCESS)
1104 /* Apparently there are too many differnt wrong implementations of the
1105 * CRC algorithm that verifying them is moot. */
1106 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1107 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1111 cr = CSR1212_MALLOC(sizeof(*cr));
1117 cr->offset_start = 0;
1118 cr->offset_end = csr->crc_len + 4;
1120 csr->cache_head->filled_head = cr;
1121 csr->cache_head->filled_tail = cr;
1123 return CSR1212_SUCCESS;
1126 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1127 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1128 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1129 #define CSR1212_KV_VAL_MASK 0xffffff
1130 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1132 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1135 int ret = CSR1212_SUCCESS;
1136 struct csr1212_keyval *k = NULL;
1139 switch(CSR1212_KV_KEY_TYPE(ki)) {
1140 case CSR1212_KV_TYPE_IMMEDIATE:
1141 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1142 CSR1212_KV_VAL(ki));
1148 k->refcnt = 0; /* Don't keep local reference when parsing. */
1151 case CSR1212_KV_TYPE_CSR_OFFSET:
1152 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1153 CSR1212_KV_VAL(ki));
1158 k->refcnt = 0; /* Don't keep local reference when parsing. */
1162 /* Compute the offset from 0xffff f000 0000. */
1163 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1164 if (offset == kv_pos) {
1165 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1166 * or Directories. The Config ROM image is most likely
1167 * messed up, so we'll just abort here. */
1172 k = csr1212_find_keyval_offset(dir, offset);
1175 break; /* Found it. */
1177 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1178 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1180 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1186 k->refcnt = 0; /* Don't keep local reference when parsing. */
1187 k->valid = 0; /* Contents not read yet so it's not valid. */
1191 k->next = dir->next;
1192 dir->next->prev = k;
1195 ret = csr1212_attach_keyval_to_directory(dir, k);
1198 if (ret != CSR1212_SUCCESS && k != NULL)
1203 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1204 struct csr1212_csr_rom_cache *cache)
1206 struct csr1212_keyval_img *kvi;
1208 int ret = CSR1212_SUCCESS;
1211 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1213 kvi_len = be16_to_cpu(kvi->length);
1216 /* Apparently there are too many differnt wrong implementations of the
1217 * CRC algorithm that verifying them is moot. */
1218 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1219 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1225 switch(kv->key.type) {
1226 case CSR1212_KV_TYPE_DIRECTORY:
1227 for (i = 0; i < kvi_len; i++) {
1228 u32 ki = kvi->data[i];
1230 /* Some devices put null entries in their unit
1231 * directories. If we come across such an entry,
1235 ret = csr1212_parse_dir_entry(kv, ki,
1237 quads_to_bytes(i + 1)));
1239 kv->value.directory.len = kvi_len;
1242 case CSR1212_KV_TYPE_LEAF:
1243 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1244 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1245 if (!kv->value.leaf.data) {
1250 kv->value.leaf.len = kvi_len;
1251 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1263 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1265 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1266 struct csr1212_keyval_img *kvi = NULL;
1267 struct csr1212_csr_rom_cache *cache;
1273 BUG_ON(!csr || !kv || csr->max_rom < 1);
1275 /* First find which cache the data should be in (or go in if not read
1277 for (cache = csr->cache_head; cache; cache = cache->next) {
1278 if (kv->offset >= cache->offset &&
1279 kv->offset < (cache->offset + cache->size))
1286 /* Only create a new cache for Extended ROM leaves. */
1287 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1290 if (csr->ops->bus_read(csr,
1291 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1292 sizeof(u32), &q, csr->private)) {
1296 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1298 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1299 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1301 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1305 kv->value.leaf.data = &cache->data[1];
1306 csr->cache_tail->next = cache;
1307 cache->prev = csr->cache_tail;
1309 csr->cache_tail = cache;
1310 cache->filled_head =
1311 CSR1212_MALLOC(sizeof(*cache->filled_head));
1312 if (!cache->filled_head) {
1316 cache->filled_head->offset_start = 0;
1317 cache->filled_head->offset_end = sizeof(u32);
1318 cache->filled_tail = cache->filled_head;
1319 cache->filled_head->next = NULL;
1320 cache->filled_head->prev = NULL;
1323 /* Don't read the entire extended ROM now. Pieces of it will
1324 * be read when entries inside it are read. */
1325 return csr1212_parse_keyval(kv, cache);
1328 cache_index = kv->offset - cache->offset;
1330 /* Now seach read portions of the cache to see if it is there. */
1331 for (cr = cache->filled_head; cr; cr = cr->next) {
1332 if (cache_index < cr->offset_start) {
1333 newcr = CSR1212_MALLOC(sizeof(*newcr));
1337 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1338 newcr->offset_end = newcr->offset_start;
1340 newcr->prev = cr->prev;
1344 } else if ((cache_index >= cr->offset_start) &&
1345 (cache_index < cr->offset_end)) {
1346 kvi = (struct csr1212_keyval_img*)
1347 (&cache->data[bytes_to_quads(cache_index)]);
1348 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1350 } else if (cache_index == cr->offset_end)
1355 cr = cache->filled_tail;
1356 newcr = CSR1212_MALLOC(sizeof(*newcr));
1360 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1361 newcr->offset_end = newcr->offset_start;
1363 newcr->next = cr->next;
1366 cache->filled_tail = newcr;
1369 while(!kvi || cr->offset_end < cache_index + kv_len) {
1370 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1371 ~(csr->max_rom - 1))];
1373 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1374 cr->offset_end) & ~(csr->max_rom - 1);
1376 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1378 if (csr->max_rom == 4)
1379 /* We've got problems! */
1382 /* Apperently the max_rom value was a lie, set it to
1383 * do quadlet reads and try again. */
1388 cr->offset_end += csr->max_rom - (cr->offset_end &
1389 (csr->max_rom - 1));
1391 if (!kvi && (cr->offset_end > cache_index)) {
1392 kvi = (struct csr1212_keyval_img*)
1393 (&cache->data[bytes_to_quads(cache_index)]);
1394 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1397 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1398 /* The Leaf or Directory claims its length extends
1399 * beyond the ConfigROM image region and thus beyond the
1400 * end of our cache region. Therefore, we abort now
1401 * rather than seg faulting later. */
1407 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1408 /* consolidate region entries */
1409 ncr->offset_start = cr->offset_start;
1412 cr->prev->next = cr->next;
1413 ncr->prev = cr->prev;
1414 if (cache->filled_head == cr)
1415 cache->filled_head = ncr;
1421 return csr1212_parse_keyval(kv, cache);
1424 struct csr1212_keyval *
1425 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1430 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1435 int csr1212_parse_csr(struct csr1212_csr *csr)
1437 static const int mr_map[] = { 4, 64, 1024, 0 };
1438 struct csr1212_dentry *dentry;
1441 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1443 ret = csr1212_parse_bus_info_block(csr);
1444 if (ret != CSR1212_SUCCESS)
1447 if (!csr->ops->get_max_rom)
1448 csr->max_rom = mr_map[0]; /* default value */
1450 int i = csr->ops->get_max_rom(csr->bus_info_data,
1454 csr->max_rom = mr_map[i];
1457 csr->cache_head->layout_head = csr->root_kv;
1458 csr->cache_head->layout_tail = csr->root_kv;
1460 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1463 csr->root_kv->valid = 0;
1464 csr->root_kv->next = csr->root_kv;
1465 csr->root_kv->prev = csr->root_kv;
1466 ret = csr1212_read_keyval(csr, csr->root_kv);
1467 if (ret != CSR1212_SUCCESS)
1470 /* Scan through the Root directory finding all extended ROM regions
1471 * and make cache regions for them */
1472 for (dentry = csr->root_kv->value.directory.dentries_head;
1473 dentry; dentry = dentry->next) {
1474 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1475 !dentry->kv->valid) {
1476 ret = csr1212_read_keyval(csr, dentry->kv);
1477 if (ret != CSR1212_SUCCESS)
1482 return CSR1212_SUCCESS;