2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Functions for EQs, NEQs and interrupts
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
44 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
53 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
54 #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
55 #define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
56 #define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
57 #define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
58 #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
60 #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
61 #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
62 #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
63 #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
65 #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
66 #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
68 static void queue_comp_task(struct ehca_cq *__cq);
70 static struct ehca_comp_pool* pool;
71 #ifdef CONFIG_HOTPLUG_CPU
72 static struct notifier_block comp_pool_callback_nb;
75 static inline void comp_event_callback(struct ehca_cq *cq)
77 if (!cq->ib_cq.comp_handler)
80 spin_lock(&cq->cb_lock);
81 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
82 spin_unlock(&cq->cb_lock);
87 static void print_error_data(struct ehca_shca * shca, void* data,
88 u64* rblock, int length)
90 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
91 u64 resource = rblock[1];
94 case 0x1: /* Queue Pair */
96 struct ehca_qp *qp = (struct ehca_qp*)data;
98 /* only print error data if AER is set */
102 ehca_err(&shca->ib_device,
103 "QP 0x%x (resource=%lx) has errors.",
104 qp->ib_qp.qp_num, resource);
107 case 0x4: /* Completion Queue */
109 struct ehca_cq *cq = (struct ehca_cq*)data;
111 ehca_err(&shca->ib_device,
112 "CQ 0x%x (resource=%lx) has errors.",
113 cq->cq_number, resource);
117 ehca_err(&shca->ib_device,
118 "Unknown errror type: %lx on %s.",
119 type, shca->ib_device.name);
123 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
124 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
125 "---------------------------------------------------");
126 ehca_dmp(rblock, length, "resource=%lx", resource);
127 ehca_err(&shca->ib_device, "EHCA ----- error data end "
128 "----------------------------------------------------");
133 int ehca_error_data(struct ehca_shca *shca, void *data,
139 unsigned long block_count;
141 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
148 /* rblock must be 4K aligned and should be 4K large */
149 ret = hipz_h_error_data(shca->ipz_hca_handle,
154 if (ret == H_R_STATE)
155 ehca_err(&shca->ib_device,
156 "No error data is available: %lx.", resource);
157 else if (ret == H_SUCCESS) {
160 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
162 if (length > EHCA_PAGESIZE)
163 length = EHCA_PAGESIZE;
165 print_error_data(shca, data, rblock, length);
167 ehca_err(&shca->ib_device,
168 "Error data could not be fetched: %lx", resource);
170 ehca_free_fw_ctrlblock(rblock);
177 static void qp_event_callback(struct ehca_shca *shca,
179 enum ib_event_type event_type)
181 struct ib_event event;
184 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
186 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
187 qp = idr_find(&ehca_qp_idr, token);
188 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
194 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
196 if (!qp->ib_qp.event_handler)
199 event.device = &shca->ib_device;
200 event.event = event_type;
201 event.element.qp = &qp->ib_qp;
203 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
208 static void cq_event_callback(struct ehca_shca *shca,
213 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
215 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
216 cq = idr_find(&ehca_cq_idr, token);
218 atomic_inc(&cq->nr_events);
219 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
224 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
226 if (atomic_dec_and_test(&cq->nr_events))
227 wake_up(&cq->wait_completion);
232 static void parse_identifier(struct ehca_shca *shca, u64 eqe)
234 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
236 switch (identifier) {
237 case 0x02: /* path migrated */
238 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
240 case 0x03: /* communication established */
241 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
243 case 0x04: /* send queue drained */
244 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
246 case 0x05: /* QP error */
247 case 0x06: /* QP error */
248 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
250 case 0x07: /* CQ error */
251 case 0x08: /* CQ error */
252 cq_event_callback(shca, eqe);
254 case 0x09: /* MRMWPTE error */
255 ehca_err(&shca->ib_device, "MRMWPTE error.");
257 case 0x0A: /* port event */
258 ehca_err(&shca->ib_device, "Port event.");
260 case 0x0B: /* MR access error */
261 ehca_err(&shca->ib_device, "MR access error.");
263 case 0x0C: /* EQ error */
264 ehca_err(&shca->ib_device, "EQ error.");
266 case 0x0D: /* P/Q_Key mismatch */
267 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
269 case 0x10: /* sampling complete */
270 ehca_err(&shca->ib_device, "Sampling complete.");
272 case 0x11: /* unaffiliated access error */
273 ehca_err(&shca->ib_device, "Unaffiliated access error.");
275 case 0x12: /* path migrating error */
276 ehca_err(&shca->ib_device, "Path migration error.");
278 case 0x13: /* interface trace stopped */
279 ehca_err(&shca->ib_device, "Interface trace stopped.");
281 case 0x14: /* first error capture info available */
283 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
284 identifier, shca->ib_device.name);
291 static void parse_ec(struct ehca_shca *shca, u64 eqe)
293 struct ib_event event;
294 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
295 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
298 case 0x30: /* port availability change */
299 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
300 ehca_info(&shca->ib_device,
301 "port %x is active.", port);
302 event.device = &shca->ib_device;
303 event.event = IB_EVENT_PORT_ACTIVE;
304 event.element.port_num = port;
305 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
306 ib_dispatch_event(&event);
308 ehca_info(&shca->ib_device,
309 "port %x is inactive.", port);
310 event.device = &shca->ib_device;
311 event.event = IB_EVENT_PORT_ERR;
312 event.element.port_num = port;
313 shca->sport[port - 1].port_state = IB_PORT_DOWN;
314 ib_dispatch_event(&event);
318 /* port configuration change
319 * disruptive change is caused by
320 * LID, PKEY or SM change
322 ehca_warn(&shca->ib_device,
323 "disruptive port %x configuration change", port);
325 ehca_info(&shca->ib_device,
326 "port %x is inactive.", port);
327 event.device = &shca->ib_device;
328 event.event = IB_EVENT_PORT_ERR;
329 event.element.port_num = port;
330 shca->sport[port - 1].port_state = IB_PORT_DOWN;
331 ib_dispatch_event(&event);
333 ehca_info(&shca->ib_device,
334 "port %x is active.", port);
335 event.device = &shca->ib_device;
336 event.event = IB_EVENT_PORT_ACTIVE;
337 event.element.port_num = port;
338 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
339 ib_dispatch_event(&event);
341 case 0x32: /* adapter malfunction */
342 ehca_err(&shca->ib_device, "Adapter malfunction.");
344 case 0x33: /* trace stopped */
345 ehca_err(&shca->ib_device, "Traced stopped.");
348 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
349 ec, shca->ib_device.name);
356 static inline void reset_eq_pending(struct ehca_cq *cq)
359 struct h_galpa gal = cq->galpas.kernel;
361 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
362 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
367 irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
369 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
371 tasklet_hi_schedule(&shca->neq.interrupt_task);
376 void ehca_tasklet_neq(unsigned long data)
378 struct ehca_shca *shca = (struct ehca_shca*)data;
379 struct ehca_eqe *eqe;
382 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
385 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
386 parse_ec(shca, eqe->entry);
388 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
391 ret = hipz_h_reset_event(shca->ipz_hca_handle,
392 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
394 if (ret != H_SUCCESS)
395 ehca_err(&shca->ib_device, "Can't clear notification events.");
400 irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
402 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
404 tasklet_hi_schedule(&shca->eq.interrupt_task);
410 static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
417 eqe_value = eqe->entry;
418 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
419 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
420 ehca_dbg(&shca->ib_device, "Got completion event");
421 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
422 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
423 cq = idr_find(&ehca_cq_idr, token);
425 atomic_inc(&cq->nr_events);
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 ehca_err(&shca->ib_device,
429 "Invalid eqe for non-existing cq token=%x",
433 reset_eq_pending(cq);
434 if (ehca_scaling_code)
437 comp_event_callback(cq);
438 if (atomic_dec_and_test(&cq->nr_events))
439 wake_up(&cq->wait_completion);
442 ehca_dbg(&shca->ib_device, "Got non completion event");
443 parse_identifier(shca, eqe_value);
447 void ehca_process_eq(struct ehca_shca *shca, int is_irq)
449 struct ehca_eq *eq = &shca->eq;
450 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
456 spin_lock_irqsave(&eq->irq_spinlock, flags);
458 const int max_query_cnt = 100;
462 int_state = hipz_h_query_int_state(
463 shca->ipz_hca_handle, eq->ist);
466 } while (int_state && query_cnt < max_query_cnt);
467 if (unlikely((query_cnt == max_query_cnt)))
468 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
469 int_state, query_cnt);
472 /* read out all eqes */
476 eqe_cache[eqe_cnt].eqe =
477 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
478 if (!eqe_cache[eqe_cnt].eqe)
480 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
481 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
482 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
483 spin_lock(&ehca_cq_idr_lock);
484 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
485 if (eqe_cache[eqe_cnt].cq)
486 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
487 spin_unlock(&ehca_cq_idr_lock);
488 if (!eqe_cache[eqe_cnt].cq) {
489 ehca_err(&shca->ib_device,
490 "Invalid eqe for non-existing cq "
495 eqe_cache[eqe_cnt].cq = NULL;
497 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
500 ehca_dbg(&shca->ib_device,
501 "No eqe found for irq event");
502 goto unlock_irq_spinlock;
504 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
505 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
506 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
507 /* enable irq for new packets */
508 for (i = 0; i < eqe_cnt; i++) {
509 if (eq->eqe_cache[i].cq)
510 reset_eq_pending(eq->eqe_cache[i].cq);
513 spin_lock(&eq->spinlock);
514 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
515 spin_unlock(&eq->spinlock);
516 /* call completion handler for cached eqes */
517 for (i = 0; i < eqe_cnt; i++)
518 if (eq->eqe_cache[i].cq) {
519 if (ehca_scaling_code)
520 queue_comp_task(eq->eqe_cache[i].cq);
522 struct ehca_cq *cq = eq->eqe_cache[i].cq;
523 comp_event_callback(cq);
524 if (atomic_dec_and_test(&cq->nr_events))
525 wake_up(&cq->wait_completion);
528 ehca_dbg(&shca->ib_device, "Got non completion event");
529 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
531 /* poll eq if not empty */
533 goto unlock_irq_spinlock;
535 struct ehca_eqe *eqe;
536 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
539 process_eqe(shca, eqe);
543 spin_unlock_irqrestore(&eq->irq_spinlock, flags);
546 void ehca_tasklet_eq(unsigned long data)
548 ehca_process_eq((struct ehca_shca*)data, 1);
551 static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
556 WARN_ON_ONCE(!in_interrupt());
557 if (ehca_debug_level)
558 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
560 spin_lock_irqsave(&pool->last_cpu_lock, flags);
561 cpu = next_cpu(pool->last_cpu, cpu_online_map);
563 cpu = first_cpu(cpu_online_map);
564 pool->last_cpu = cpu;
565 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
570 static void __queue_comp_task(struct ehca_cq *__cq,
571 struct ehca_cpu_comp_task *cct)
575 spin_lock_irqsave(&cct->task_lock, flags);
576 spin_lock(&__cq->task_lock);
578 if (__cq->nr_callbacks == 0) {
579 __cq->nr_callbacks++;
580 list_add_tail(&__cq->entry, &cct->cq_list);
582 wake_up(&cct->wait_queue);
584 __cq->nr_callbacks++;
586 spin_unlock(&__cq->task_lock);
587 spin_unlock_irqrestore(&cct->task_lock, flags);
590 static void queue_comp_task(struct ehca_cq *__cq)
593 struct ehca_cpu_comp_task *cct;
597 cpu_id = find_next_online_cpu(pool);
598 BUG_ON(!cpu_online(cpu_id));
600 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
603 spin_lock_irqsave(&cct->task_lock, flags);
604 cq_jobs = cct->cq_jobs;
605 spin_unlock_irqrestore(&cct->task_lock, flags);
607 cpu_id = find_next_online_cpu(pool);
608 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
612 __queue_comp_task(__cq, cct);
615 static void run_comp_task(struct ehca_cpu_comp_task* cct)
620 spin_lock_irqsave(&cct->task_lock, flags);
622 while (!list_empty(&cct->cq_list)) {
623 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
624 spin_unlock_irqrestore(&cct->task_lock, flags);
626 comp_event_callback(cq);
627 if (atomic_dec_and_test(&cq->nr_events))
628 wake_up(&cq->wait_completion);
630 spin_lock_irqsave(&cct->task_lock, flags);
631 spin_lock(&cq->task_lock);
633 if (!cq->nr_callbacks) {
634 list_del_init(cct->cq_list.next);
637 spin_unlock(&cq->task_lock);
640 spin_unlock_irqrestore(&cct->task_lock, flags);
643 static int comp_task(void *__cct)
645 struct ehca_cpu_comp_task* cct = __cct;
647 DECLARE_WAITQUEUE(wait, current);
649 set_current_state(TASK_INTERRUPTIBLE);
650 while(!kthread_should_stop()) {
651 add_wait_queue(&cct->wait_queue, &wait);
653 spin_lock_irq(&cct->task_lock);
654 cql_empty = list_empty(&cct->cq_list);
655 spin_unlock_irq(&cct->task_lock);
659 __set_current_state(TASK_RUNNING);
661 remove_wait_queue(&cct->wait_queue, &wait);
663 spin_lock_irq(&cct->task_lock);
664 cql_empty = list_empty(&cct->cq_list);
665 spin_unlock_irq(&cct->task_lock);
667 run_comp_task(__cct);
669 set_current_state(TASK_INTERRUPTIBLE);
671 __set_current_state(TASK_RUNNING);
676 static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
679 struct ehca_cpu_comp_task *cct;
681 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
682 spin_lock_init(&cct->task_lock);
683 INIT_LIST_HEAD(&cct->cq_list);
684 init_waitqueue_head(&cct->wait_queue);
685 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
690 static void destroy_comp_task(struct ehca_comp_pool *pool,
693 struct ehca_cpu_comp_task *cct;
694 struct task_struct *task;
695 unsigned long flags_cct;
697 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
699 spin_lock_irqsave(&cct->task_lock, flags_cct);
705 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
711 #ifdef CONFIG_HOTPLUG_CPU
712 static void take_over_work(struct ehca_comp_pool *pool,
715 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
718 unsigned long flags_cct;
720 spin_lock_irqsave(&cct->task_lock, flags_cct);
722 list_splice_init(&cct->cq_list, &list);
724 while(!list_empty(&list)) {
725 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
727 list_del(&cq->entry);
728 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
729 smp_processor_id()));
732 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
736 static int comp_pool_callback(struct notifier_block *nfb,
737 unsigned long action,
740 unsigned int cpu = (unsigned long)hcpu;
741 struct ehca_cpu_comp_task *cct;
745 case CPU_UP_PREPARE_FROZEN:
746 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
747 if(!create_comp_task(pool, cpu)) {
748 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
752 case CPU_UP_CANCELED:
753 case CPU_UP_CANCELED_FROZEN:
754 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
755 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
756 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
757 destroy_comp_task(pool, cpu);
760 case CPU_ONLINE_FROZEN:
761 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
762 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
763 kthread_bind(cct->task, cpu);
764 wake_up_process(cct->task);
766 case CPU_DOWN_PREPARE:
767 case CPU_DOWN_PREPARE_FROZEN:
768 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
770 case CPU_DOWN_FAILED:
771 case CPU_DOWN_FAILED_FROZEN:
772 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
775 case CPU_DEAD_FROZEN:
776 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
777 destroy_comp_task(pool, cpu);
778 take_over_work(pool, cpu);
786 int ehca_create_comp_pool(void)
789 struct task_struct *task;
791 if (!ehca_scaling_code)
794 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
798 spin_lock_init(&pool->last_cpu_lock);
799 pool->last_cpu = any_online_cpu(cpu_online_map);
801 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
802 if (pool->cpu_comp_tasks == NULL) {
807 for_each_online_cpu(cpu) {
808 task = create_comp_task(pool, cpu);
810 kthread_bind(task, cpu);
811 wake_up_process(task);
815 #ifdef CONFIG_HOTPLUG_CPU
816 comp_pool_callback_nb.notifier_call = comp_pool_callback;
817 comp_pool_callback_nb.priority =0;
818 register_cpu_notifier(&comp_pool_callback_nb);
821 printk(KERN_INFO "eHCA scaling code enabled\n");
826 void ehca_destroy_comp_pool(void)
830 if (!ehca_scaling_code)
833 #ifdef CONFIG_HOTPLUG_CPU
834 unregister_cpu_notifier(&comp_pool_callback_nb);
837 for (i = 0; i < NR_CPUS; i++) {
839 destroy_comp_task(pool, i);
841 free_percpu(pool->cpu_comp_tasks);