2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_cmnd.h>
42 #include "../scsi/scsi_transport_api.h"
44 #include <linux/libata.h>
49 /* speed down verdicts */
50 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
51 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
52 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
53 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56 ATA_EFLAG_IS_IO = (1 << 0),
57 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
59 /* error categories */
62 ATA_ECAT_TOUT_HSM = 2,
64 ATA_ECAT_DUBIOUS_NONE = 4,
65 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
66 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
67 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
70 ATA_EH_CMD_DFL_TIMEOUT = 5000,
72 /* always put at least this amount of time between resets */
73 ATA_EH_RESET_COOL_DOWN = 5000,
75 /* Waiting in ->prereset can never be reliable. It's
76 * sometimes nice to wait there but it can't be depended upon;
77 * otherwise, we wouldn't be resetting. Just give it enough
78 * time for most drives to spin up.
80 ATA_EH_PRERESET_TIMEOUT = 10000,
81 ATA_EH_FASTDRAIN_INTERVAL = 3000,
84 /* The following table determines how we sequence resets. Each entry
85 * represents timeout for that try. The first try can be soft or
86 * hardreset. All others are hardreset if available. In most cases
87 * the first reset w/ 10sec timeout should succeed. Following entries
88 * are mostly for error handling, hotplug and retarded devices.
90 static const unsigned long ata_eh_reset_timeouts[] = {
91 10000, /* most drives spin up by 10sec */
92 10000, /* > 99% working drives spin up before 20sec */
93 35000, /* give > 30 secs of idleness for retarded devices */
94 5000, /* and sweet one last chance */
95 ULONG_MAX, /* > 1 min has elapsed, give up */
98 static const unsigned long ata_eh_identify_timeouts[] = {
99 5000, /* covers > 99% of successes and not too boring on failures */
100 10000, /* combined time till here is enough even for media access */
101 30000, /* for true idiots */
105 static const unsigned long ata_eh_other_timeouts[] = {
106 5000, /* same rationale as identify timeout */
108 /* but no merciful 30sec for other commands, it just isn't worth it */
112 struct ata_eh_cmd_timeout_ent {
114 const unsigned long *timeouts;
117 /* The following table determines timeouts to use for EH internal
118 * commands. Each table entry is a command class and matches the
119 * commands the entry applies to and the timeout table to use.
121 * On the retry after a command timed out, the next timeout value from
122 * the table is used. If the table doesn't contain further entries,
123 * the last value is used.
125 * ehc->cmd_timeout_idx keeps track of which timeout to use per
126 * command class, so if SET_FEATURES times out on the first try, the
127 * next try will use the second timeout value only for that class.
129 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
130 static const struct ata_eh_cmd_timeout_ent
131 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
132 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
133 .timeouts = ata_eh_identify_timeouts, },
134 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
135 .timeouts = ata_eh_other_timeouts, },
136 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
137 .timeouts = ata_eh_other_timeouts, },
138 { .commands = CMDS(ATA_CMD_SET_FEATURES),
139 .timeouts = ata_eh_other_timeouts, },
140 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
141 .timeouts = ata_eh_other_timeouts, },
145 static void __ata_port_freeze(struct ata_port *ap);
147 static void ata_eh_handle_port_suspend(struct ata_port *ap);
148 static void ata_eh_handle_port_resume(struct ata_port *ap);
149 #else /* CONFIG_PM */
150 static void ata_eh_handle_port_suspend(struct ata_port *ap)
153 static void ata_eh_handle_port_resume(struct ata_port *ap)
155 #endif /* CONFIG_PM */
157 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
160 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
161 ATA_EH_DESC_LEN - ehi->desc_len,
166 * __ata_ehi_push_desc - push error description without adding separator
168 * @fmt: printf format string
170 * Format string according to @fmt and append it to @ehi->desc.
173 * spin_lock_irqsave(host lock)
175 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
180 __ata_ehi_pushv_desc(ehi, fmt, args);
185 * ata_ehi_push_desc - push error description with separator
187 * @fmt: printf format string
189 * Format string according to @fmt and append it to @ehi->desc.
190 * If @ehi->desc is not empty, ", " is added in-between.
193 * spin_lock_irqsave(host lock)
195 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
200 __ata_ehi_push_desc(ehi, ", ");
203 __ata_ehi_pushv_desc(ehi, fmt, args);
208 * ata_ehi_clear_desc - clean error description
214 * spin_lock_irqsave(host lock)
216 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
223 * ata_port_desc - append port description
224 * @ap: target ATA port
225 * @fmt: printf format string
227 * Format string according to @fmt and append it to port
228 * description. If port description is not empty, " " is added
229 * in-between. This function is to be used while initializing
230 * ata_host. The description is printed on host registration.
235 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
239 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
241 if (ap->link.eh_info.desc_len)
242 __ata_ehi_push_desc(&ap->link.eh_info, " ");
245 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
252 * ata_port_pbar_desc - append PCI BAR description
253 * @ap: target ATA port
254 * @bar: target PCI BAR
255 * @offset: offset into PCI BAR
256 * @name: name of the area
258 * If @offset is negative, this function formats a string which
259 * contains the name, address, size and type of the BAR and
260 * appends it to the port description. If @offset is zero or
261 * positive, only name and offsetted address is appended.
266 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
269 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
271 unsigned long long start, len;
273 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
275 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
278 start = (unsigned long long)pci_resource_start(pdev, bar);
279 len = (unsigned long long)pci_resource_len(pdev, bar);
282 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
284 ata_port_desc(ap, "%s 0x%llx", name,
285 start + (unsigned long long)offset);
288 #endif /* CONFIG_PCI */
290 static int ata_lookup_timeout_table(u8 cmd)
294 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
297 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
306 * ata_internal_cmd_timeout - determine timeout for an internal command
307 * @dev: target device
308 * @cmd: internal command to be issued
310 * Determine timeout for internal command @cmd for @dev.
316 * Determined timeout.
318 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
320 struct ata_eh_context *ehc = &dev->link->eh_context;
321 int ent = ata_lookup_timeout_table(cmd);
325 return ATA_EH_CMD_DFL_TIMEOUT;
327 idx = ehc->cmd_timeout_idx[dev->devno][ent];
328 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
332 * ata_internal_cmd_timed_out - notification for internal command timeout
333 * @dev: target device
334 * @cmd: internal command which timed out
336 * Notify EH that internal command @cmd for @dev timed out. This
337 * function should be called only for commands whose timeouts are
338 * determined using ata_internal_cmd_timeout().
343 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
345 struct ata_eh_context *ehc = &dev->link->eh_context;
346 int ent = ata_lookup_timeout_table(cmd);
352 idx = ehc->cmd_timeout_idx[dev->devno][ent];
353 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
354 ehc->cmd_timeout_idx[dev->devno][ent]++;
357 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
358 unsigned int err_mask)
360 struct ata_ering_entry *ent;
365 ering->cursor %= ATA_ERING_SIZE;
367 ent = &ering->ring[ering->cursor];
368 ent->eflags = eflags;
369 ent->err_mask = err_mask;
370 ent->timestamp = get_jiffies_64();
373 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
375 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
382 static void ata_ering_clear(struct ata_ering *ering)
384 memset(ering, 0, sizeof(*ering));
387 static int ata_ering_map(struct ata_ering *ering,
388 int (*map_fn)(struct ata_ering_entry *, void *),
392 struct ata_ering_entry *ent;
396 ent = &ering->ring[idx];
399 rc = map_fn(ent, arg);
402 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
403 } while (idx != ering->cursor);
408 static unsigned int ata_eh_dev_action(struct ata_device *dev)
410 struct ata_eh_context *ehc = &dev->link->eh_context;
412 return ehc->i.action | ehc->i.dev_action[dev->devno];
415 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
416 struct ata_eh_info *ehi, unsigned int action)
418 struct ata_device *tdev;
421 ehi->action &= ~action;
422 ata_link_for_each_dev(tdev, link)
423 ehi->dev_action[tdev->devno] &= ~action;
425 /* doesn't make sense for port-wide EH actions */
426 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
428 /* break ehi->action into ehi->dev_action */
429 if (ehi->action & action) {
430 ata_link_for_each_dev(tdev, link)
431 ehi->dev_action[tdev->devno] |=
432 ehi->action & action;
433 ehi->action &= ~action;
436 /* turn off the specified per-dev action */
437 ehi->dev_action[dev->devno] &= ~action;
442 * ata_scsi_timed_out - SCSI layer time out callback
443 * @cmd: timed out SCSI command
445 * Handles SCSI layer timeout. We race with normal completion of
446 * the qc for @cmd. If the qc is already gone, we lose and let
447 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
448 * timed out and EH should be invoked. Prevent ata_qc_complete()
449 * from finishing it by setting EH_SCHEDULED and return
452 * TODO: kill this function once old EH is gone.
455 * Called from timer context
458 * EH_HANDLED or EH_NOT_HANDLED
460 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
462 struct Scsi_Host *host = cmd->device->host;
463 struct ata_port *ap = ata_shost_to_port(host);
465 struct ata_queued_cmd *qc;
466 enum scsi_eh_timer_return ret;
470 if (ap->ops->error_handler) {
471 ret = EH_NOT_HANDLED;
476 spin_lock_irqsave(ap->lock, flags);
477 qc = ata_qc_from_tag(ap, ap->link.active_tag);
479 WARN_ON(qc->scsicmd != cmd);
480 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
481 qc->err_mask |= AC_ERR_TIMEOUT;
482 ret = EH_NOT_HANDLED;
484 spin_unlock_irqrestore(ap->lock, flags);
487 DPRINTK("EXIT, ret=%d\n", ret);
492 * ata_scsi_error - SCSI layer error handler callback
493 * @host: SCSI host on which error occurred
495 * Handles SCSI-layer-thrown error events.
498 * Inherited from SCSI layer (none, can sleep)
503 void ata_scsi_error(struct Scsi_Host *host)
505 struct ata_port *ap = ata_shost_to_port(host);
511 /* synchronize with port task */
512 ata_port_flush_task(ap);
514 /* synchronize with host lock and sort out timeouts */
516 /* For new EH, all qcs are finished in one of three ways -
517 * normal completion, error completion, and SCSI timeout.
518 * Both cmpletions can race against SCSI timeout. When normal
519 * completion wins, the qc never reaches EH. When error
520 * completion wins, the qc has ATA_QCFLAG_FAILED set.
522 * When SCSI timeout wins, things are a bit more complex.
523 * Normal or error completion can occur after the timeout but
524 * before this point. In such cases, both types of
525 * completions are honored. A scmd is determined to have
526 * timed out iff its associated qc is active and not failed.
528 if (ap->ops->error_handler) {
529 struct scsi_cmnd *scmd, *tmp;
532 spin_lock_irqsave(ap->lock, flags);
534 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
535 struct ata_queued_cmd *qc;
537 for (i = 0; i < ATA_MAX_QUEUE; i++) {
538 qc = __ata_qc_from_tag(ap, i);
539 if (qc->flags & ATA_QCFLAG_ACTIVE &&
544 if (i < ATA_MAX_QUEUE) {
545 /* the scmd has an associated qc */
546 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
547 /* which hasn't failed yet, timeout */
548 qc->err_mask |= AC_ERR_TIMEOUT;
549 qc->flags |= ATA_QCFLAG_FAILED;
553 /* Normal completion occurred after
554 * SCSI timeout but before this point.
555 * Successfully complete it.
557 scmd->retries = scmd->allowed;
558 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
562 /* If we have timed out qcs. They belong to EH from
563 * this point but the state of the controller is
564 * unknown. Freeze the port to make sure the IRQ
565 * handler doesn't diddle with those qcs. This must
566 * be done atomically w.r.t. setting QCFLAG_FAILED.
569 __ata_port_freeze(ap);
571 spin_unlock_irqrestore(ap->lock, flags);
573 /* initialize eh_tries */
574 ap->eh_tries = ATA_EH_MAX_TRIES;
576 spin_unlock_wait(ap->lock);
579 /* invoke error handler */
580 if (ap->ops->error_handler) {
581 struct ata_link *link;
583 /* kill fast drain timer */
584 del_timer_sync(&ap->fastdrain_timer);
586 /* process port resume request */
587 ata_eh_handle_port_resume(ap);
589 /* fetch & clear EH info */
590 spin_lock_irqsave(ap->lock, flags);
592 __ata_port_for_each_link(link, ap) {
593 struct ata_eh_context *ehc = &link->eh_context;
594 struct ata_device *dev;
596 memset(&link->eh_context, 0, sizeof(link->eh_context));
597 link->eh_context.i = link->eh_info;
598 memset(&link->eh_info, 0, sizeof(link->eh_info));
600 ata_link_for_each_dev(dev, link) {
601 int devno = dev->devno;
603 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
604 if (ata_ncq_enabled(dev))
605 ehc->saved_ncq_enabled |= 1 << devno;
608 /* set last reset timestamp to some time in the past */
609 ehc->last_reset = jiffies - 60 * HZ;
612 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
613 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
614 ap->excl_link = NULL; /* don't maintain exclusion over EH */
616 spin_unlock_irqrestore(ap->lock, flags);
618 /* invoke EH, skip if unloading or suspended */
619 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
620 ap->ops->error_handler(ap);
624 /* process port suspend request */
625 ata_eh_handle_port_suspend(ap);
627 /* Exception might have happend after ->error_handler
628 * recovered the port but before this point. Repeat
631 spin_lock_irqsave(ap->lock, flags);
633 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
634 if (--ap->eh_tries) {
635 spin_unlock_irqrestore(ap->lock, flags);
638 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
639 "tries, giving up\n", ATA_EH_MAX_TRIES);
640 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
643 /* this run is complete, make sure EH info is clear */
644 __ata_port_for_each_link(link, ap)
645 memset(&link->eh_info, 0, sizeof(link->eh_info));
647 /* Clear host_eh_scheduled while holding ap->lock such
648 * that if exception occurs after this point but
649 * before EH completion, SCSI midlayer will
652 host->host_eh_scheduled = 0;
654 spin_unlock_irqrestore(ap->lock, flags);
656 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
657 ap->ops->eng_timeout(ap);
660 /* finish or retry handled scmd's and clean up */
661 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
663 scsi_eh_flush_done_q(&ap->eh_done_q);
666 spin_lock_irqsave(ap->lock, flags);
668 if (ap->pflags & ATA_PFLAG_LOADING)
669 ap->pflags &= ~ATA_PFLAG_LOADING;
670 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
671 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
673 if (ap->pflags & ATA_PFLAG_RECOVERED)
674 ata_port_printk(ap, KERN_INFO, "EH complete\n");
676 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
678 /* tell wait_eh that we're done */
679 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
680 wake_up_all(&ap->eh_wait_q);
682 spin_unlock_irqrestore(ap->lock, flags);
688 * ata_port_wait_eh - Wait for the currently pending EH to complete
689 * @ap: Port to wait EH for
691 * Wait until the currently pending EH is complete.
694 * Kernel thread context (may sleep).
696 void ata_port_wait_eh(struct ata_port *ap)
702 spin_lock_irqsave(ap->lock, flags);
704 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
705 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
706 spin_unlock_irqrestore(ap->lock, flags);
708 spin_lock_irqsave(ap->lock, flags);
710 finish_wait(&ap->eh_wait_q, &wait);
712 spin_unlock_irqrestore(ap->lock, flags);
714 /* make sure SCSI EH is complete */
715 if (scsi_host_in_recovery(ap->scsi_host)) {
721 static int ata_eh_nr_in_flight(struct ata_port *ap)
726 /* count only non-internal commands */
727 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
728 if (ata_qc_from_tag(ap, tag))
734 void ata_eh_fastdrain_timerfn(unsigned long arg)
736 struct ata_port *ap = (void *)arg;
740 spin_lock_irqsave(ap->lock, flags);
742 cnt = ata_eh_nr_in_flight(ap);
748 if (cnt == ap->fastdrain_cnt) {
751 /* No progress during the last interval, tag all
752 * in-flight qcs as timed out and freeze the port.
754 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
755 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
757 qc->err_mask |= AC_ERR_TIMEOUT;
762 /* some qcs have finished, give it another chance */
763 ap->fastdrain_cnt = cnt;
764 ap->fastdrain_timer.expires =
765 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
766 add_timer(&ap->fastdrain_timer);
770 spin_unlock_irqrestore(ap->lock, flags);
774 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
775 * @ap: target ATA port
776 * @fastdrain: activate fast drain
778 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
779 * is non-zero and EH wasn't pending before. Fast drain ensures
780 * that EH kicks in in timely manner.
783 * spin_lock_irqsave(host lock)
785 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
789 /* already scheduled? */
790 if (ap->pflags & ATA_PFLAG_EH_PENDING)
793 ap->pflags |= ATA_PFLAG_EH_PENDING;
798 /* do we have in-flight qcs? */
799 cnt = ata_eh_nr_in_flight(ap);
803 /* activate fast drain */
804 ap->fastdrain_cnt = cnt;
805 ap->fastdrain_timer.expires =
806 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
807 add_timer(&ap->fastdrain_timer);
811 * ata_qc_schedule_eh - schedule qc for error handling
812 * @qc: command to schedule error handling for
814 * Schedule error handling for @qc. EH will kick in as soon as
815 * other commands are drained.
818 * spin_lock_irqsave(host lock)
820 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
822 struct ata_port *ap = qc->ap;
824 WARN_ON(!ap->ops->error_handler);
826 qc->flags |= ATA_QCFLAG_FAILED;
827 ata_eh_set_pending(ap, 1);
829 /* The following will fail if timeout has already expired.
830 * ata_scsi_error() takes care of such scmds on EH entry.
831 * Note that ATA_QCFLAG_FAILED is unconditionally set after
832 * this function completes.
834 scsi_req_abort_cmd(qc->scsicmd);
838 * ata_port_schedule_eh - schedule error handling without a qc
839 * @ap: ATA port to schedule EH for
841 * Schedule error handling for @ap. EH will kick in as soon as
842 * all commands are drained.
845 * spin_lock_irqsave(host lock)
847 void ata_port_schedule_eh(struct ata_port *ap)
849 WARN_ON(!ap->ops->error_handler);
851 if (ap->pflags & ATA_PFLAG_INITIALIZING)
854 ata_eh_set_pending(ap, 1);
855 scsi_schedule_eh(ap->scsi_host);
857 DPRINTK("port EH scheduled\n");
860 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
862 int tag, nr_aborted = 0;
864 WARN_ON(!ap->ops->error_handler);
866 /* we're gonna abort all commands, no need for fast drain */
867 ata_eh_set_pending(ap, 0);
869 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
870 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
872 if (qc && (!link || qc->dev->link == link)) {
873 qc->flags |= ATA_QCFLAG_FAILED;
880 ata_port_schedule_eh(ap);
886 * ata_link_abort - abort all qc's on the link
887 * @link: ATA link to abort qc's for
889 * Abort all active qc's active on @link and schedule EH.
892 * spin_lock_irqsave(host lock)
895 * Number of aborted qc's.
897 int ata_link_abort(struct ata_link *link)
899 return ata_do_link_abort(link->ap, link);
903 * ata_port_abort - abort all qc's on the port
904 * @ap: ATA port to abort qc's for
906 * Abort all active qc's of @ap and schedule EH.
909 * spin_lock_irqsave(host_set lock)
912 * Number of aborted qc's.
914 int ata_port_abort(struct ata_port *ap)
916 return ata_do_link_abort(ap, NULL);
920 * __ata_port_freeze - freeze port
921 * @ap: ATA port to freeze
923 * This function is called when HSM violation or some other
924 * condition disrupts normal operation of the port. Frozen port
925 * is not allowed to perform any operation until the port is
926 * thawed, which usually follows a successful reset.
928 * ap->ops->freeze() callback can be used for freezing the port
929 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
930 * port cannot be frozen hardware-wise, the interrupt handler
931 * must ack and clear interrupts unconditionally while the port
935 * spin_lock_irqsave(host lock)
937 static void __ata_port_freeze(struct ata_port *ap)
939 WARN_ON(!ap->ops->error_handler);
944 ap->pflags |= ATA_PFLAG_FROZEN;
946 DPRINTK("ata%u port frozen\n", ap->print_id);
950 * ata_port_freeze - abort & freeze port
951 * @ap: ATA port to freeze
953 * Abort and freeze @ap.
956 * spin_lock_irqsave(host lock)
959 * Number of aborted commands.
961 int ata_port_freeze(struct ata_port *ap)
965 WARN_ON(!ap->ops->error_handler);
967 nr_aborted = ata_port_abort(ap);
968 __ata_port_freeze(ap);
974 * sata_async_notification - SATA async notification handler
975 * @ap: ATA port where async notification is received
977 * Handler to be called when async notification via SDB FIS is
978 * received. This function schedules EH if necessary.
981 * spin_lock_irqsave(host lock)
984 * 1 if EH is scheduled, 0 otherwise.
986 int sata_async_notification(struct ata_port *ap)
991 if (!(ap->flags & ATA_FLAG_AN))
994 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
996 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
998 if (!sata_pmp_attached(ap) || rc) {
999 /* PMP is not attached or SNTF is not available */
1000 if (!sata_pmp_attached(ap)) {
1001 /* PMP is not attached. Check whether ATAPI
1002 * AN is configured. If so, notify media
1005 struct ata_device *dev = ap->link.device;
1007 if ((dev->class == ATA_DEV_ATAPI) &&
1008 (dev->flags & ATA_DFLAG_AN))
1009 ata_scsi_media_change_notify(dev);
1012 /* PMP is attached but SNTF is not available.
1013 * ATAPI async media change notification is
1014 * not used. The PMP must be reporting PHY
1015 * status change, schedule EH.
1017 ata_port_schedule_eh(ap);
1021 /* PMP is attached and SNTF is available */
1022 struct ata_link *link;
1024 /* check and notify ATAPI AN */
1025 ata_port_for_each_link(link, ap) {
1026 if (!(sntf & (1 << link->pmp)))
1029 if ((link->device->class == ATA_DEV_ATAPI) &&
1030 (link->device->flags & ATA_DFLAG_AN))
1031 ata_scsi_media_change_notify(link->device);
1034 /* If PMP is reporting that PHY status of some
1035 * downstream ports has changed, schedule EH.
1037 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1038 ata_port_schedule_eh(ap);
1047 * ata_eh_freeze_port - EH helper to freeze port
1048 * @ap: ATA port to freeze
1055 void ata_eh_freeze_port(struct ata_port *ap)
1057 unsigned long flags;
1059 if (!ap->ops->error_handler)
1062 spin_lock_irqsave(ap->lock, flags);
1063 __ata_port_freeze(ap);
1064 spin_unlock_irqrestore(ap->lock, flags);
1068 * ata_port_thaw_port - EH helper to thaw port
1069 * @ap: ATA port to thaw
1071 * Thaw frozen port @ap.
1076 void ata_eh_thaw_port(struct ata_port *ap)
1078 unsigned long flags;
1080 if (!ap->ops->error_handler)
1083 spin_lock_irqsave(ap->lock, flags);
1085 ap->pflags &= ~ATA_PFLAG_FROZEN;
1090 spin_unlock_irqrestore(ap->lock, flags);
1092 DPRINTK("ata%u port thawed\n", ap->print_id);
1095 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1100 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1102 struct ata_port *ap = qc->ap;
1103 struct scsi_cmnd *scmd = qc->scsicmd;
1104 unsigned long flags;
1106 spin_lock_irqsave(ap->lock, flags);
1107 qc->scsidone = ata_eh_scsidone;
1108 __ata_qc_complete(qc);
1109 WARN_ON(ata_tag_valid(qc->tag));
1110 spin_unlock_irqrestore(ap->lock, flags);
1112 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1116 * ata_eh_qc_complete - Complete an active ATA command from EH
1117 * @qc: Command to complete
1119 * Indicate to the mid and upper layers that an ATA command has
1120 * completed. To be used from EH.
1122 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1124 struct scsi_cmnd *scmd = qc->scsicmd;
1125 scmd->retries = scmd->allowed;
1126 __ata_eh_qc_complete(qc);
1130 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1131 * @qc: Command to retry
1133 * Indicate to the mid and upper layers that an ATA command
1134 * should be retried. To be used from EH.
1136 * SCSI midlayer limits the number of retries to scmd->allowed.
1137 * scmd->retries is decremented for commands which get retried
1138 * due to unrelated failures (qc->err_mask is zero).
1140 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1142 struct scsi_cmnd *scmd = qc->scsicmd;
1143 if (!qc->err_mask && scmd->retries)
1145 __ata_eh_qc_complete(qc);
1149 * ata_eh_detach_dev - detach ATA device
1150 * @dev: ATA device to detach
1157 void ata_eh_detach_dev(struct ata_device *dev)
1159 struct ata_link *link = dev->link;
1160 struct ata_port *ap = link->ap;
1161 unsigned long flags;
1163 ata_dev_disable(dev);
1165 spin_lock_irqsave(ap->lock, flags);
1167 dev->flags &= ~ATA_DFLAG_DETACH;
1169 if (ata_scsi_offline_dev(dev)) {
1170 dev->flags |= ATA_DFLAG_DETACHED;
1171 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1174 /* clear per-dev EH actions */
1175 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1176 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1178 spin_unlock_irqrestore(ap->lock, flags);
1182 * ata_eh_about_to_do - about to perform eh_action
1183 * @link: target ATA link
1184 * @dev: target ATA dev for per-dev action (can be NULL)
1185 * @action: action about to be performed
1187 * Called just before performing EH actions to clear related bits
1188 * in @link->eh_info such that eh actions are not unnecessarily
1194 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1195 unsigned int action)
1197 struct ata_port *ap = link->ap;
1198 struct ata_eh_info *ehi = &link->eh_info;
1199 struct ata_eh_context *ehc = &link->eh_context;
1200 unsigned long flags;
1202 spin_lock_irqsave(ap->lock, flags);
1204 ata_eh_clear_action(link, dev, ehi, action);
1206 if (!(ehc->i.flags & ATA_EHI_QUIET))
1207 ap->pflags |= ATA_PFLAG_RECOVERED;
1209 spin_unlock_irqrestore(ap->lock, flags);
1213 * ata_eh_done - EH action complete
1214 * @ap: target ATA port
1215 * @dev: target ATA dev for per-dev action (can be NULL)
1216 * @action: action just completed
1218 * Called right after performing EH actions to clear related bits
1219 * in @link->eh_context.
1224 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1225 unsigned int action)
1227 struct ata_eh_context *ehc = &link->eh_context;
1229 ata_eh_clear_action(link, dev, &ehc->i, action);
1233 * ata_err_string - convert err_mask to descriptive string
1234 * @err_mask: error mask to convert to string
1236 * Convert @err_mask to descriptive string. Errors are
1237 * prioritized according to severity and only the most severe
1238 * error is reported.
1244 * Descriptive string for @err_mask
1246 static const char *ata_err_string(unsigned int err_mask)
1248 if (err_mask & AC_ERR_HOST_BUS)
1249 return "host bus error";
1250 if (err_mask & AC_ERR_ATA_BUS)
1251 return "ATA bus error";
1252 if (err_mask & AC_ERR_TIMEOUT)
1254 if (err_mask & AC_ERR_HSM)
1255 return "HSM violation";
1256 if (err_mask & AC_ERR_SYSTEM)
1257 return "internal error";
1258 if (err_mask & AC_ERR_MEDIA)
1259 return "media error";
1260 if (err_mask & AC_ERR_INVALID)
1261 return "invalid argument";
1262 if (err_mask & AC_ERR_DEV)
1263 return "device error";
1264 return "unknown error";
1268 * ata_read_log_page - read a specific log page
1269 * @dev: target device
1270 * @page: page to read
1271 * @buf: buffer to store read page
1272 * @sectors: number of sectors to read
1274 * Read log page using READ_LOG_EXT command.
1277 * Kernel thread context (may sleep).
1280 * 0 on success, AC_ERR_* mask otherwise.
1282 static unsigned int ata_read_log_page(struct ata_device *dev,
1283 u8 page, void *buf, unsigned int sectors)
1285 struct ata_taskfile tf;
1286 unsigned int err_mask;
1288 DPRINTK("read log page - page %d\n", page);
1290 ata_tf_init(dev, &tf);
1291 tf.command = ATA_CMD_READ_LOG_EXT;
1294 tf.hob_nsect = sectors >> 8;
1295 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1296 tf.protocol = ATA_PROT_PIO;
1298 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1299 buf, sectors * ATA_SECT_SIZE, 0);
1301 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1306 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1307 * @dev: Device to read log page 10h from
1308 * @tag: Resulting tag of the failed command
1309 * @tf: Resulting taskfile registers of the failed command
1311 * Read log page 10h to obtain NCQ error details and clear error
1315 * Kernel thread context (may sleep).
1318 * 0 on success, -errno otherwise.
1320 static int ata_eh_read_log_10h(struct ata_device *dev,
1321 int *tag, struct ata_taskfile *tf)
1323 u8 *buf = dev->link->ap->sector_buf;
1324 unsigned int err_mask;
1328 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1333 for (i = 0; i < ATA_SECT_SIZE; i++)
1336 ata_dev_printk(dev, KERN_WARNING,
1337 "invalid checksum 0x%x on log page 10h\n", csum);
1342 *tag = buf[0] & 0x1f;
1344 tf->command = buf[2];
1345 tf->feature = buf[3];
1349 tf->device = buf[7];
1350 tf->hob_lbal = buf[8];
1351 tf->hob_lbam = buf[9];
1352 tf->hob_lbah = buf[10];
1353 tf->nsect = buf[12];
1354 tf->hob_nsect = buf[13];
1360 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1361 * @dev: device to perform REQUEST_SENSE to
1362 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1363 * @dfl_sense_key: default sense key to use
1365 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1366 * SENSE. This function is EH helper.
1369 * Kernel thread context (may sleep).
1372 * 0 on success, AC_ERR_* mask on failure
1374 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1375 u8 *sense_buf, u8 dfl_sense_key)
1377 u8 cdb[ATAPI_CDB_LEN] =
1378 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1379 struct ata_port *ap = dev->link->ap;
1380 struct ata_taskfile tf;
1382 DPRINTK("ATAPI request sense\n");
1384 /* FIXME: is this needed? */
1385 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1387 /* initialize sense_buf with the error register,
1388 * for the case where they are -not- overwritten
1390 sense_buf[0] = 0x70;
1391 sense_buf[2] = dfl_sense_key;
1393 /* some devices time out if garbage left in tf */
1394 ata_tf_init(dev, &tf);
1396 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1397 tf.command = ATA_CMD_PACKET;
1399 /* is it pointless to prefer PIO for "safety reasons"? */
1400 if (ap->flags & ATA_FLAG_PIO_DMA) {
1401 tf.protocol = ATAPI_PROT_DMA;
1402 tf.feature |= ATAPI_PKT_DMA;
1404 tf.protocol = ATAPI_PROT_PIO;
1405 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1409 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1410 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1414 * ata_eh_analyze_serror - analyze SError for a failed port
1415 * @link: ATA link to analyze SError for
1417 * Analyze SError if available and further determine cause of
1423 static void ata_eh_analyze_serror(struct ata_link *link)
1425 struct ata_eh_context *ehc = &link->eh_context;
1426 u32 serror = ehc->i.serror;
1427 unsigned int err_mask = 0, action = 0;
1430 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1431 err_mask |= AC_ERR_ATA_BUS;
1432 action |= ATA_EH_RESET;
1434 if (serror & SERR_PROTOCOL) {
1435 err_mask |= AC_ERR_HSM;
1436 action |= ATA_EH_RESET;
1438 if (serror & SERR_INTERNAL) {
1439 err_mask |= AC_ERR_SYSTEM;
1440 action |= ATA_EH_RESET;
1443 /* Determine whether a hotplug event has occurred. Both
1444 * SError.N/X are considered hotplug events for enabled or
1445 * host links. For disabled PMP links, only N bit is
1446 * considered as X bit is left at 1 for link plugging.
1450 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1451 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1453 hotplug_mask = SERR_PHYRDY_CHG;
1455 if (serror & hotplug_mask)
1456 ata_ehi_hotplugged(&ehc->i);
1458 ehc->i.err_mask |= err_mask;
1459 ehc->i.action |= action;
1463 * ata_eh_analyze_ncq_error - analyze NCQ error
1464 * @link: ATA link to analyze NCQ error for
1466 * Read log page 10h, determine the offending qc and acquire
1467 * error status TF. For NCQ device errors, all LLDDs have to do
1468 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1472 * Kernel thread context (may sleep).
1474 void ata_eh_analyze_ncq_error(struct ata_link *link)
1476 struct ata_port *ap = link->ap;
1477 struct ata_eh_context *ehc = &link->eh_context;
1478 struct ata_device *dev = link->device;
1479 struct ata_queued_cmd *qc;
1480 struct ata_taskfile tf;
1483 /* if frozen, we can't do much */
1484 if (ap->pflags & ATA_PFLAG_FROZEN)
1487 /* is it NCQ device error? */
1488 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1491 /* has LLDD analyzed already? */
1492 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1493 qc = __ata_qc_from_tag(ap, tag);
1495 if (!(qc->flags & ATA_QCFLAG_FAILED))
1502 /* okay, this error is ours */
1503 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1505 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1506 "(errno=%d)\n", rc);
1510 if (!(link->sactive & (1 << tag))) {
1511 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1512 "inactive tag %d\n", tag);
1516 /* we've got the perpetrator, condemn it */
1517 qc = __ata_qc_from_tag(ap, tag);
1518 memcpy(&qc->result_tf, &tf, sizeof(tf));
1519 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1520 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1521 ehc->i.err_mask &= ~AC_ERR_DEV;
1525 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1526 * @qc: qc to analyze
1527 * @tf: Taskfile registers to analyze
1529 * Analyze taskfile of @qc and further determine cause of
1530 * failure. This function also requests ATAPI sense data if
1534 * Kernel thread context (may sleep).
1537 * Determined recovery action
1539 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1540 const struct ata_taskfile *tf)
1542 unsigned int tmp, action = 0;
1543 u8 stat = tf->command, err = tf->feature;
1545 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1546 qc->err_mask |= AC_ERR_HSM;
1547 return ATA_EH_RESET;
1550 if (stat & (ATA_ERR | ATA_DF))
1551 qc->err_mask |= AC_ERR_DEV;
1555 switch (qc->dev->class) {
1558 qc->err_mask |= AC_ERR_ATA_BUS;
1560 qc->err_mask |= AC_ERR_MEDIA;
1562 qc->err_mask |= AC_ERR_INVALID;
1566 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1567 tmp = atapi_eh_request_sense(qc->dev,
1568 qc->scsicmd->sense_buffer,
1569 qc->result_tf.feature >> 4);
1571 /* ATA_QCFLAG_SENSE_VALID is used to
1572 * tell atapi_qc_complete() that sense
1573 * data is already valid.
1575 * TODO: interpret sense data and set
1576 * appropriate err_mask.
1578 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1580 qc->err_mask |= tmp;
1584 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1585 action |= ATA_EH_RESET;
1590 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1595 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1599 base = ATA_ECAT_DUBIOUS_NONE;
1601 if (err_mask & AC_ERR_ATA_BUS)
1602 return base + ATA_ECAT_ATA_BUS;
1604 if (err_mask & AC_ERR_TIMEOUT)
1605 return base + ATA_ECAT_TOUT_HSM;
1607 if (eflags & ATA_EFLAG_IS_IO) {
1608 if (err_mask & AC_ERR_HSM)
1609 return base + ATA_ECAT_TOUT_HSM;
1611 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1612 return base + ATA_ECAT_UNK_DEV;
1618 struct speed_down_verdict_arg {
1621 int nr_errors[ATA_ECAT_NR];
1624 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1626 struct speed_down_verdict_arg *arg = void_arg;
1629 if (ent->timestamp < arg->since)
1632 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1634 arg->nr_errors[cat]++;
1640 * ata_eh_speed_down_verdict - Determine speed down verdict
1641 * @dev: Device of interest
1643 * This function examines error ring of @dev and determines
1644 * whether NCQ needs to be turned off, transfer speed should be
1645 * stepped down, or falling back to PIO is necessary.
1647 * ECAT_ATA_BUS : ATA_BUS error for any command
1649 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1652 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1654 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1655 * data transfer hasn't been verified.
1659 * NCQ_OFF : Turn off NCQ.
1661 * SPEED_DOWN : Speed down transfer speed but don't fall back
1664 * FALLBACK_TO_PIO : Fall back to PIO.
1666 * Even if multiple verdicts are returned, only one action is
1667 * taken per error. An action triggered by non-DUBIOUS errors
1668 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1669 * This is to expedite speed down decisions right after device is
1670 * initially configured.
1672 * The followings are speed down rules. #1 and #2 deal with
1675 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1676 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1678 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1679 * occurred during last 5 mins, NCQ_OFF.
1681 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1682 * ocurred during last 5 mins, FALLBACK_TO_PIO
1684 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1685 * during last 10 mins, NCQ_OFF.
1687 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1688 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1691 * Inherited from caller.
1694 * OR of ATA_EH_SPDN_* flags.
1696 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1698 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1699 u64 j64 = get_jiffies_64();
1700 struct speed_down_verdict_arg arg;
1701 unsigned int verdict = 0;
1703 /* scan past 5 mins of error history */
1704 memset(&arg, 0, sizeof(arg));
1705 arg.since = j64 - min(j64, j5mins);
1706 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1708 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1709 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1710 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1711 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1713 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1714 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1715 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1717 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1718 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1719 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1720 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1722 /* scan past 10 mins of error history */
1723 memset(&arg, 0, sizeof(arg));
1724 arg.since = j64 - min(j64, j10mins);
1725 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1727 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1728 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1729 verdict |= ATA_EH_SPDN_NCQ_OFF;
1731 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1732 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1733 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1734 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1740 * ata_eh_speed_down - record error and speed down if necessary
1741 * @dev: Failed device
1742 * @eflags: mask of ATA_EFLAG_* flags
1743 * @err_mask: err_mask of the error
1745 * Record error and examine error history to determine whether
1746 * adjusting transmission speed is necessary. It also sets
1747 * transmission limits appropriately if such adjustment is
1751 * Kernel thread context (may sleep).
1754 * Determined recovery action.
1756 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1757 unsigned int eflags, unsigned int err_mask)
1759 struct ata_link *link = ata_dev_phys_link(dev);
1761 unsigned int verdict;
1762 unsigned int action = 0;
1764 /* don't bother if Cat-0 error */
1765 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1768 /* record error and determine whether speed down is necessary */
1769 ata_ering_record(&dev->ering, eflags, err_mask);
1770 verdict = ata_eh_speed_down_verdict(dev);
1773 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1774 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1775 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1776 dev->flags |= ATA_DFLAG_NCQ_OFF;
1777 ata_dev_printk(dev, KERN_WARNING,
1778 "NCQ disabled due to excessive errors\n");
1783 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1784 /* speed down SATA link speed if possible */
1785 if (sata_down_spd_limit(link) == 0) {
1786 action |= ATA_EH_RESET;
1790 /* lower transfer mode */
1791 if (dev->spdn_cnt < 2) {
1792 static const int dma_dnxfer_sel[] =
1793 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1794 static const int pio_dnxfer_sel[] =
1795 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1798 if (dev->xfer_shift != ATA_SHIFT_PIO)
1799 sel = dma_dnxfer_sel[dev->spdn_cnt];
1801 sel = pio_dnxfer_sel[dev->spdn_cnt];
1805 if (ata_down_xfermask_limit(dev, sel) == 0) {
1806 action |= ATA_EH_RESET;
1812 /* Fall back to PIO? Slowing down to PIO is meaningless for
1813 * SATA ATA devices. Consider it only for PATA and SATAPI.
1815 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1816 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1817 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1818 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1820 action |= ATA_EH_RESET;
1827 /* device has been slowed down, blow error history */
1828 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1829 ata_ering_clear(&dev->ering);
1834 * ata_eh_link_autopsy - analyze error and determine recovery action
1835 * @link: host link to perform autopsy on
1837 * Analyze why @link failed and determine which recovery actions
1838 * are needed. This function also sets more detailed AC_ERR_*
1839 * values and fills sense data for ATAPI CHECK SENSE.
1842 * Kernel thread context (may sleep).
1844 static void ata_eh_link_autopsy(struct ata_link *link)
1846 struct ata_port *ap = link->ap;
1847 struct ata_eh_context *ehc = &link->eh_context;
1848 struct ata_device *dev;
1849 unsigned int all_err_mask = 0, eflags = 0;
1856 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1859 /* obtain and analyze SError */
1860 rc = sata_scr_read(link, SCR_ERROR, &serror);
1862 ehc->i.serror |= serror;
1863 ata_eh_analyze_serror(link);
1864 } else if (rc != -EOPNOTSUPP) {
1865 /* SError read failed, force reset and probing */
1866 ehc->i.probe_mask |= ATA_ALL_DEVICES;
1867 ehc->i.action |= ATA_EH_RESET;
1868 ehc->i.err_mask |= AC_ERR_OTHER;
1871 /* analyze NCQ failure */
1872 ata_eh_analyze_ncq_error(link);
1874 /* any real error trumps AC_ERR_OTHER */
1875 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1876 ehc->i.err_mask &= ~AC_ERR_OTHER;
1878 all_err_mask |= ehc->i.err_mask;
1880 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1881 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1883 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1884 ata_dev_phys_link(qc->dev) != link)
1887 /* inherit upper level err_mask */
1888 qc->err_mask |= ehc->i.err_mask;
1891 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1893 /* DEV errors are probably spurious in case of ATA_BUS error */
1894 if (qc->err_mask & AC_ERR_ATA_BUS)
1895 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1898 /* any real error trumps unknown error */
1899 if (qc->err_mask & ~AC_ERR_OTHER)
1900 qc->err_mask &= ~AC_ERR_OTHER;
1902 /* SENSE_VALID trumps dev/unknown error and revalidation */
1903 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1904 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1906 /* determine whether the command is worth retrying */
1907 if (!(qc->err_mask & AC_ERR_INVALID) &&
1908 ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
1909 qc->flags |= ATA_QCFLAG_RETRY;
1911 /* accumulate error info */
1912 ehc->i.dev = qc->dev;
1913 all_err_mask |= qc->err_mask;
1914 if (qc->flags & ATA_QCFLAG_IO)
1915 eflags |= ATA_EFLAG_IS_IO;
1918 /* enforce default EH actions */
1919 if (ap->pflags & ATA_PFLAG_FROZEN ||
1920 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1921 ehc->i.action |= ATA_EH_RESET;
1922 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
1923 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
1924 ehc->i.action |= ATA_EH_REVALIDATE;
1926 /* If we have offending qcs and the associated failed device,
1927 * perform per-dev EH action only on the offending device.
1930 ehc->i.dev_action[ehc->i.dev->devno] |=
1931 ehc->i.action & ATA_EH_PERDEV_MASK;
1932 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1935 /* propagate timeout to host link */
1936 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
1937 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
1939 /* record error and consider speeding down */
1941 if (!dev && ((ata_link_max_devices(link) == 1 &&
1942 ata_dev_enabled(link->device))))
1946 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
1947 eflags |= ATA_EFLAG_DUBIOUS_XFER;
1948 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
1955 * ata_eh_autopsy - analyze error and determine recovery action
1956 * @ap: host port to perform autopsy on
1958 * Analyze all links of @ap and determine why they failed and
1959 * which recovery actions are needed.
1962 * Kernel thread context (may sleep).
1964 void ata_eh_autopsy(struct ata_port *ap)
1966 struct ata_link *link;
1968 ata_port_for_each_link(link, ap)
1969 ata_eh_link_autopsy(link);
1971 /* Handle the frigging slave link. Autopsy is done similarly
1972 * but actions and flags are transferred over to the master
1973 * link and handled from there.
1975 if (ap->slave_link) {
1976 struct ata_eh_context *mehc = &ap->link.eh_context;
1977 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
1979 ata_eh_link_autopsy(ap->slave_link);
1981 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
1982 mehc->i.action |= sehc->i.action;
1983 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
1984 mehc->i.flags |= sehc->i.flags;
1985 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
1988 /* Autopsy of fanout ports can affect host link autopsy.
1989 * Perform host link autopsy last.
1991 if (sata_pmp_attached(ap))
1992 ata_eh_link_autopsy(&ap->link);
1996 * ata_eh_link_report - report error handling to user
1997 * @link: ATA link EH is going on
1999 * Report EH to user.
2004 static void ata_eh_link_report(struct ata_link *link)
2006 struct ata_port *ap = link->ap;
2007 struct ata_eh_context *ehc = &link->eh_context;
2008 const char *frozen, *desc;
2010 int tag, nr_failed = 0;
2012 if (ehc->i.flags & ATA_EHI_QUIET)
2016 if (ehc->i.desc[0] != '\0')
2019 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2020 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2022 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2023 ata_dev_phys_link(qc->dev) != link ||
2024 ((qc->flags & ATA_QCFLAG_QUIET) &&
2025 qc->err_mask == AC_ERR_DEV))
2027 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2033 if (!nr_failed && !ehc->i.err_mask)
2037 if (ap->pflags & ATA_PFLAG_FROZEN)
2040 memset(tries_buf, 0, sizeof(tries_buf));
2041 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2042 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2046 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2047 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2048 ehc->i.err_mask, link->sactive, ehc->i.serror,
2049 ehc->i.action, frozen, tries_buf);
2051 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2053 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2054 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2055 ehc->i.err_mask, link->sactive, ehc->i.serror,
2056 ehc->i.action, frozen, tries_buf);
2058 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2062 ata_link_printk(link, KERN_ERR,
2063 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2064 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2065 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2066 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2067 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2068 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2069 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2070 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2071 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2072 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2073 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2074 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2075 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2076 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2077 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2078 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2079 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2080 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2082 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2083 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2084 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2085 const u8 *cdb = qc->cdb;
2086 char data_buf[20] = "";
2087 char cdb_buf[70] = "";
2089 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2090 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2093 if (qc->dma_dir != DMA_NONE) {
2094 static const char *dma_str[] = {
2095 [DMA_BIDIRECTIONAL] = "bidi",
2096 [DMA_TO_DEVICE] = "out",
2097 [DMA_FROM_DEVICE] = "in",
2099 static const char *prot_str[] = {
2100 [ATA_PROT_PIO] = "pio",
2101 [ATA_PROT_DMA] = "dma",
2102 [ATA_PROT_NCQ] = "ncq",
2103 [ATAPI_PROT_PIO] = "pio",
2104 [ATAPI_PROT_DMA] = "dma",
2107 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2108 prot_str[qc->tf.protocol], qc->nbytes,
2109 dma_str[qc->dma_dir]);
2112 if (ata_is_atapi(qc->tf.protocol))
2113 snprintf(cdb_buf, sizeof(cdb_buf),
2114 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2115 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2116 cdb[0], cdb[1], cdb[2], cdb[3],
2117 cdb[4], cdb[5], cdb[6], cdb[7],
2118 cdb[8], cdb[9], cdb[10], cdb[11],
2119 cdb[12], cdb[13], cdb[14], cdb[15]);
2121 ata_dev_printk(qc->dev, KERN_ERR,
2122 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2124 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2125 "Emask 0x%x (%s)%s\n",
2126 cmd->command, cmd->feature, cmd->nsect,
2127 cmd->lbal, cmd->lbam, cmd->lbah,
2128 cmd->hob_feature, cmd->hob_nsect,
2129 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2130 cmd->device, qc->tag, data_buf, cdb_buf,
2131 res->command, res->feature, res->nsect,
2132 res->lbal, res->lbam, res->lbah,
2133 res->hob_feature, res->hob_nsect,
2134 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2135 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2136 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2138 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2140 if (res->command & ATA_BUSY)
2141 ata_dev_printk(qc->dev, KERN_ERR,
2142 "status: { Busy }\n");
2144 ata_dev_printk(qc->dev, KERN_ERR,
2145 "status: { %s%s%s%s}\n",
2146 res->command & ATA_DRDY ? "DRDY " : "",
2147 res->command & ATA_DF ? "DF " : "",
2148 res->command & ATA_DRQ ? "DRQ " : "",
2149 res->command & ATA_ERR ? "ERR " : "");
2152 if (cmd->command != ATA_CMD_PACKET &&
2153 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2155 ata_dev_printk(qc->dev, KERN_ERR,
2156 "error: { %s%s%s%s}\n",
2157 res->feature & ATA_ICRC ? "ICRC " : "",
2158 res->feature & ATA_UNC ? "UNC " : "",
2159 res->feature & ATA_IDNF ? "IDNF " : "",
2160 res->feature & ATA_ABORTED ? "ABRT " : "");
2165 * ata_eh_report - report error handling to user
2166 * @ap: ATA port to report EH about
2168 * Report EH to user.
2173 void ata_eh_report(struct ata_port *ap)
2175 struct ata_link *link;
2177 __ata_port_for_each_link(link, ap)
2178 ata_eh_link_report(link);
2181 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2182 unsigned int *classes, unsigned long deadline,
2185 struct ata_device *dev;
2188 ata_link_for_each_dev(dev, link)
2189 classes[dev->devno] = ATA_DEV_UNKNOWN;
2191 return reset(link, classes, deadline);
2194 static int ata_eh_followup_srst_needed(struct ata_link *link,
2195 int rc, const unsigned int *classes)
2197 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2201 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2206 int ata_eh_reset(struct ata_link *link, int classify,
2207 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2208 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2210 struct ata_port *ap = link->ap;
2211 struct ata_link *slave = ap->slave_link;
2212 struct ata_eh_context *ehc = &link->eh_context;
2213 struct ata_eh_context *sehc = &slave->eh_context;
2214 unsigned int *classes = ehc->classes;
2215 unsigned int lflags = link->flags;
2216 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2217 int max_tries = 0, try = 0;
2218 struct ata_link *failed_link;
2219 struct ata_device *dev;
2220 unsigned long deadline, now;
2221 ata_reset_fn_t reset;
2222 unsigned long flags;
2229 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2231 if (link->flags & ATA_LFLAG_NO_HRST)
2233 if (link->flags & ATA_LFLAG_NO_SRST)
2237 deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
2238 if (time_before(now, deadline))
2239 schedule_timeout_uninterruptible(deadline - now);
2241 spin_lock_irqsave(ap->lock, flags);
2242 ap->pflags |= ATA_PFLAG_RESETTING;
2243 spin_unlock_irqrestore(ap->lock, flags);
2245 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2246 ehc->last_reset = jiffies;
2248 ata_link_for_each_dev(dev, link) {
2249 /* If we issue an SRST then an ATA drive (not ATAPI)
2250 * may change configuration and be in PIO0 timing. If
2251 * we do a hard reset (or are coming from power on)
2252 * this is true for ATA or ATAPI. Until we've set a
2253 * suitable controller mode we should not touch the
2254 * bus as we may be talking too fast.
2256 dev->pio_mode = XFER_PIO_0;
2258 /* If the controller has a pio mode setup function
2259 * then use it to set the chipset to rights. Don't
2260 * touch the DMA setup as that will be dealt with when
2261 * configuring devices.
2263 if (ap->ops->set_piomode)
2264 ap->ops->set_piomode(ap, dev);
2267 /* prefer hardreset */
2269 ehc->i.action &= ~ATA_EH_RESET;
2272 ehc->i.action |= ATA_EH_HARDRESET;
2273 } else if (softreset) {
2275 ehc->i.action |= ATA_EH_SOFTRESET;
2279 unsigned long deadline = ata_deadline(jiffies,
2280 ATA_EH_PRERESET_TIMEOUT);
2283 sehc->i.action &= ~ATA_EH_RESET;
2284 sehc->i.action |= ehc->i.action;
2287 rc = prereset(link, deadline);
2289 /* If present, do prereset on slave link too. Reset
2290 * is skipped iff both master and slave links report
2291 * -ENOENT or clear ATA_EH_RESET.
2293 if (slave && (rc == 0 || rc == -ENOENT)) {
2296 tmp = prereset(slave, deadline);
2300 ehc->i.action |= sehc->i.action;
2304 if (rc == -ENOENT) {
2305 ata_link_printk(link, KERN_DEBUG,
2306 "port disabled. ignoring.\n");
2307 ehc->i.action &= ~ATA_EH_RESET;
2309 ata_link_for_each_dev(dev, link)
2310 classes[dev->devno] = ATA_DEV_NONE;
2314 ata_link_printk(link, KERN_ERR,
2315 "prereset failed (errno=%d)\n", rc);
2319 /* prereset() might have cleared ATA_EH_RESET. If so,
2320 * bang classes and return.
2322 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2323 ata_link_for_each_dev(dev, link)
2324 classes[dev->devno] = ATA_DEV_NONE;
2334 ehc->last_reset = jiffies;
2335 if (ata_is_host_link(link))
2336 ata_eh_freeze_port(ap);
2338 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2342 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2343 reset == softreset ? "soft" : "hard");
2345 /* mark that this EH session started with reset */
2346 if (reset == hardreset)
2347 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2349 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2351 rc = ata_do_reset(link, reset, classes, deadline, true);
2352 if (rc && rc != -EAGAIN) {
2357 /* hardreset slave link if existent */
2358 if (slave && reset == hardreset) {
2362 ata_link_printk(slave, KERN_INFO,
2363 "hard resetting link\n");
2365 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2366 tmp = ata_do_reset(slave, reset, classes, deadline,
2374 failed_link = slave;
2380 /* perform follow-up SRST if necessary */
2381 if (reset == hardreset &&
2382 ata_eh_followup_srst_needed(link, rc, classes)) {
2386 ata_link_printk(link, KERN_ERR,
2387 "follow-up softreset required "
2388 "but no softreset avaliable\n");
2394 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2395 rc = ata_do_reset(link, reset, classes, deadline, true);
2399 ata_link_printk(link, KERN_INFO, "no reset method "
2400 "available, skipping reset\n");
2401 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2402 lflags |= ATA_LFLAG_ASSUME_ATA;
2406 * Post-reset processing
2408 ata_link_for_each_dev(dev, link) {
2409 /* After the reset, the device state is PIO 0 and the
2410 * controller state is undefined. Reset also wakes up
2411 * drives from sleeping mode.
2413 dev->pio_mode = XFER_PIO_0;
2414 dev->flags &= ~ATA_DFLAG_SLEEPING;
2416 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2419 /* apply class override */
2420 if (lflags & ATA_LFLAG_ASSUME_ATA)
2421 classes[dev->devno] = ATA_DEV_ATA;
2422 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2423 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
2426 /* record current link speed */
2427 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2428 link->sata_spd = (sstatus >> 4) & 0xf;
2429 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2430 slave->sata_spd = (sstatus >> 4) & 0xf;
2433 if (ata_is_host_link(link))
2434 ata_eh_thaw_port(ap);
2436 /* postreset() should clear hardware SError. Although SError
2437 * is cleared during link resume, clearing SError here is
2438 * necessary as some PHYs raise hotplug events after SRST.
2439 * This introduces race condition where hotplug occurs between
2440 * reset and here. This race is mediated by cross checking
2441 * link onlineness and classification result later.
2444 postreset(link, classes);
2446 postreset(slave, classes);
2449 /* clear cached SError */
2450 spin_lock_irqsave(link->ap->lock, flags);
2451 link->eh_info.serror = 0;
2453 slave->eh_info.serror = 0;
2454 spin_unlock_irqrestore(link->ap->lock, flags);
2456 /* Make sure onlineness and classification result correspond.
2457 * Hotplug could have happened during reset and some
2458 * controllers fail to wait while a drive is spinning up after
2459 * being hotplugged causing misdetection. By cross checking
2460 * link onlineness and classification result, those conditions
2461 * can be reliably detected and retried.
2464 ata_link_for_each_dev(dev, link) {
2465 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2466 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2467 classes[dev->devno] = ATA_DEV_NONE;
2468 if (ata_phys_link_online(ata_dev_phys_link(dev)))
2473 if (classify && nr_unknown) {
2474 if (try < max_tries) {
2475 ata_link_printk(link, KERN_WARNING, "link online but "
2476 "device misclassified, retrying\n");
2481 ata_link_printk(link, KERN_WARNING,
2482 "link online but device misclassified, "
2483 "device detection might fail\n");
2486 /* reset successful, schedule revalidation */
2487 ata_eh_done(link, NULL, ATA_EH_RESET);
2489 ata_eh_done(slave, NULL, ATA_EH_RESET);
2490 ehc->last_reset = jiffies;
2491 ehc->i.action |= ATA_EH_REVALIDATE;
2495 /* clear hotplug flag */
2496 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2498 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2500 spin_lock_irqsave(ap->lock, flags);
2501 ap->pflags &= ~ATA_PFLAG_RESETTING;
2502 spin_unlock_irqrestore(ap->lock, flags);
2507 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2508 if (!ata_is_host_link(link) &&
2509 sata_scr_read(link, SCR_STATUS, &sstatus))
2512 if (rc == -ERESTART || try >= max_tries)
2516 if (time_before(now, deadline)) {
2517 unsigned long delta = deadline - now;
2519 ata_link_printk(failed_link, KERN_WARNING,
2520 "reset failed (errno=%d), retrying in %u secs\n",
2521 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2524 delta = schedule_timeout_uninterruptible(delta);
2527 if (try == max_tries - 1) {
2528 sata_down_spd_limit(link);
2530 sata_down_spd_limit(slave);
2531 } else if (rc == -EPIPE)
2532 sata_down_spd_limit(failed_link);
2539 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2541 struct ata_link *link;
2542 struct ata_device *dev;
2543 unsigned long flags;
2546 * This function can be thought of as an extended version of
2547 * ata_eh_about_to_do() specially crafted to accommodate the
2548 * requirements of ATA_EH_PARK handling. Since the EH thread
2549 * does not leave the do {} while () loop in ata_eh_recover as
2550 * long as the timeout for a park request to *one* device on
2551 * the port has not expired, and since we still want to pick
2552 * up park requests to other devices on the same port or
2553 * timeout updates for the same device, we have to pull
2554 * ATA_EH_PARK actions from eh_info into eh_context.i
2555 * ourselves at the beginning of each pass over the loop.
2557 * Additionally, all write accesses to &ap->park_req_pending
2558 * through INIT_COMPLETION() (see below) or complete_all()
2559 * (see ata_scsi_park_store()) are protected by the host lock.
2560 * As a result we have that park_req_pending.done is zero on
2561 * exit from this function, i.e. when ATA_EH_PARK actions for
2562 * *all* devices on port ap have been pulled into the
2563 * respective eh_context structs. If, and only if,
2564 * park_req_pending.done is non-zero by the time we reach
2565 * wait_for_completion_timeout(), another ATA_EH_PARK action
2566 * has been scheduled for at least one of the devices on port
2567 * ap and we have to cycle over the do {} while () loop in
2568 * ata_eh_recover() again.
2571 spin_lock_irqsave(ap->lock, flags);
2572 INIT_COMPLETION(ap->park_req_pending);
2573 ata_port_for_each_link(link, ap) {
2574 ata_link_for_each_dev(dev, link) {
2575 struct ata_eh_info *ehi = &link->eh_info;
2577 link->eh_context.i.dev_action[dev->devno] |=
2578 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2579 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2582 spin_unlock_irqrestore(ap->lock, flags);
2585 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2587 struct ata_eh_context *ehc = &dev->link->eh_context;
2588 struct ata_taskfile tf;
2589 unsigned int err_mask;
2591 ata_tf_init(dev, &tf);
2593 ehc->unloaded_mask |= 1 << dev->devno;
2594 tf.command = ATA_CMD_IDLEIMMEDIATE;
2600 ehc->unloaded_mask &= ~(1 << dev->devno);
2601 tf.command = ATA_CMD_CHK_POWER;
2604 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2605 tf.protocol |= ATA_PROT_NODATA;
2606 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2607 if (park && (err_mask || tf.lbal != 0xc4)) {
2608 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2609 ehc->unloaded_mask &= ~(1 << dev->devno);
2613 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2614 struct ata_device **r_failed_dev)
2616 struct ata_port *ap = link->ap;
2617 struct ata_eh_context *ehc = &link->eh_context;
2618 struct ata_device *dev;
2619 unsigned int new_mask = 0;
2620 unsigned long flags;
2625 /* For PATA drive side cable detection to work, IDENTIFY must
2626 * be done backwards such that PDIAG- is released by the slave
2627 * device before the master device is identified.
2629 ata_link_for_each_dev_reverse(dev, link) {
2630 unsigned int action = ata_eh_dev_action(dev);
2631 unsigned int readid_flags = 0;
2633 if (ehc->i.flags & ATA_EHI_DID_RESET)
2634 readid_flags |= ATA_READID_POSTRESET;
2636 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2637 WARN_ON(dev->class == ATA_DEV_PMP);
2639 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2644 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2645 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2650 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2652 /* Configuration may have changed, reconfigure
2655 ehc->i.flags |= ATA_EHI_SETMODE;
2657 /* schedule the scsi_rescan_device() here */
2658 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2659 } else if (dev->class == ATA_DEV_UNKNOWN &&
2660 ehc->tries[dev->devno] &&
2661 ata_class_enabled(ehc->classes[dev->devno])) {
2662 dev->class = ehc->classes[dev->devno];
2664 if (dev->class == ATA_DEV_PMP)
2665 rc = sata_pmp_attach(dev);
2667 rc = ata_dev_read_id(dev, &dev->class,
2668 readid_flags, dev->id);
2671 new_mask |= 1 << dev->devno;
2674 /* IDENTIFY was issued to non-existent
2675 * device. No need to reset. Just
2676 * thaw and kill the device.
2678 ata_eh_thaw_port(ap);
2679 dev->class = ATA_DEV_UNKNOWN;
2682 dev->class = ATA_DEV_UNKNOWN;
2688 /* PDIAG- should have been released, ask cable type if post-reset */
2689 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2690 if (ap->ops->cable_detect)
2691 ap->cbl = ap->ops->cable_detect(ap);
2695 /* Configure new devices forward such that user doesn't see
2696 * device detection messages backwards.
2698 ata_link_for_each_dev(dev, link) {
2699 if (!(new_mask & (1 << dev->devno)) ||
2700 dev->class == ATA_DEV_PMP)
2703 ehc->i.flags |= ATA_EHI_PRINTINFO;
2704 rc = ata_dev_configure(dev);
2705 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
2709 spin_lock_irqsave(ap->lock, flags);
2710 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
2711 spin_unlock_irqrestore(ap->lock, flags);
2713 /* new device discovered, configure xfermode */
2714 ehc->i.flags |= ATA_EHI_SETMODE;
2720 *r_failed_dev = dev;
2721 DPRINTK("EXIT rc=%d\n", rc);
2726 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2727 * @link: link on which timings will be programmed
2728 * @r_failed_dev: out paramter for failed device
2730 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2731 * ata_set_mode() fails, pointer to the failing device is
2732 * returned in @r_failed_dev.
2735 * PCI/etc. bus probe sem.
2738 * 0 on success, negative errno otherwise
2740 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2742 struct ata_port *ap = link->ap;
2743 struct ata_device *dev;
2746 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
2747 ata_link_for_each_dev(dev, link) {
2748 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
2749 struct ata_ering_entry *ent;
2751 ent = ata_ering_top(&dev->ering);
2753 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
2757 /* has private set_mode? */
2758 if (ap->ops->set_mode)
2759 rc = ap->ops->set_mode(link, r_failed_dev);
2761 rc = ata_do_set_mode(link, r_failed_dev);
2763 /* if transfer mode has changed, set DUBIOUS_XFER on device */
2764 ata_link_for_each_dev(dev, link) {
2765 struct ata_eh_context *ehc = &link->eh_context;
2766 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
2767 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
2769 if (dev->xfer_mode != saved_xfer_mode ||
2770 ata_ncq_enabled(dev) != saved_ncq)
2771 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
2777 static int ata_link_nr_enabled(struct ata_link *link)
2779 struct ata_device *dev;
2782 ata_link_for_each_dev(dev, link)
2783 if (ata_dev_enabled(dev))
2788 static int ata_link_nr_vacant(struct ata_link *link)
2790 struct ata_device *dev;
2793 ata_link_for_each_dev(dev, link)
2794 if (dev->class == ATA_DEV_UNKNOWN)
2799 static int ata_eh_skip_recovery(struct ata_link *link)
2801 struct ata_port *ap = link->ap;
2802 struct ata_eh_context *ehc = &link->eh_context;
2803 struct ata_device *dev;
2805 /* skip disabled links */
2806 if (link->flags & ATA_LFLAG_DISABLED)
2809 /* thaw frozen port and recover failed devices */
2810 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
2813 /* reset at least once if reset is requested */
2814 if ((ehc->i.action & ATA_EH_RESET) &&
2815 !(ehc->i.flags & ATA_EHI_DID_RESET))
2818 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
2819 ata_link_for_each_dev(dev, link) {
2820 if (dev->class == ATA_DEV_UNKNOWN &&
2821 ehc->classes[dev->devno] != ATA_DEV_NONE)
2828 static int ata_eh_schedule_probe(struct ata_device *dev)
2830 struct ata_eh_context *ehc = &dev->link->eh_context;
2832 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
2833 (ehc->did_probe_mask & (1 << dev->devno)))
2836 ata_eh_detach_dev(dev);
2838 ehc->did_probe_mask |= (1 << dev->devno);
2839 ehc->i.action |= ATA_EH_RESET;
2840 ehc->saved_xfer_mode[dev->devno] = 0;
2841 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
2846 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2848 struct ata_eh_context *ehc = &dev->link->eh_context;
2850 ehc->tries[dev->devno]--;
2854 /* device missing or wrong IDENTIFY data, schedule probing */
2855 ehc->i.probe_mask |= (1 << dev->devno);
2857 /* give it just one more chance */
2858 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
2860 if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
2861 /* This is the last chance, better to slow
2862 * down than lose it.
2864 sata_down_spd_limit(ata_dev_phys_link(dev));
2865 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2869 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2870 /* disable device if it has used up all its chances */
2871 ata_dev_disable(dev);
2873 /* detach if offline */
2874 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2875 ata_eh_detach_dev(dev);
2877 /* schedule probe if necessary */
2878 if (ata_eh_schedule_probe(dev)) {
2879 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2880 memset(ehc->cmd_timeout_idx[dev->devno], 0,
2881 sizeof(ehc->cmd_timeout_idx[dev->devno]));
2886 ehc->i.action |= ATA_EH_RESET;
2892 * ata_eh_recover - recover host port after error
2893 * @ap: host port to recover
2894 * @prereset: prereset method (can be NULL)
2895 * @softreset: softreset method (can be NULL)
2896 * @hardreset: hardreset method (can be NULL)
2897 * @postreset: postreset method (can be NULL)
2898 * @r_failed_link: out parameter for failed link
2900 * This is the alpha and omega, eum and yang, heart and soul of
2901 * libata exception handling. On entry, actions required to
2902 * recover each link and hotplug requests are recorded in the
2903 * link's eh_context. This function executes all the operations
2904 * with appropriate retrials and fallbacks to resurrect failed
2905 * devices, detach goners and greet newcomers.
2908 * Kernel thread context (may sleep).
2911 * 0 on success, -errno on failure.
2913 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2914 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2915 ata_postreset_fn_t postreset,
2916 struct ata_link **r_failed_link)
2918 struct ata_link *link;
2919 struct ata_device *dev;
2922 unsigned long flags, deadline;
2926 /* prep for recovery */
2927 ata_port_for_each_link(link, ap) {
2928 struct ata_eh_context *ehc = &link->eh_context;
2930 /* re-enable link? */
2931 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
2932 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
2933 spin_lock_irqsave(ap->lock, flags);
2934 link->flags &= ~ATA_LFLAG_DISABLED;
2935 spin_unlock_irqrestore(ap->lock, flags);
2936 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
2939 ata_link_for_each_dev(dev, link) {
2940 if (link->flags & ATA_LFLAG_NO_RETRY)
2941 ehc->tries[dev->devno] = 1;
2943 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2945 /* collect port action mask recorded in dev actions */
2946 ehc->i.action |= ehc->i.dev_action[dev->devno] &
2947 ~ATA_EH_PERDEV_MASK;
2948 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
2950 /* process hotplug request */
2951 if (dev->flags & ATA_DFLAG_DETACH)
2952 ata_eh_detach_dev(dev);
2954 /* schedule probe if necessary */
2955 if (!ata_dev_enabled(dev))
2956 ata_eh_schedule_probe(dev);
2964 /* if UNLOADING, finish immediately */
2965 if (ap->pflags & ATA_PFLAG_UNLOADING)
2969 ata_port_for_each_link(link, ap) {
2970 struct ata_eh_context *ehc = &link->eh_context;
2972 /* skip EH if possible. */
2973 if (ata_eh_skip_recovery(link))
2976 ata_link_for_each_dev(dev, link)
2977 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
2981 ata_port_for_each_link(link, ap) {
2982 struct ata_eh_context *ehc = &link->eh_context;
2984 if (!(ehc->i.action & ATA_EH_RESET))
2987 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
2988 prereset, softreset, hardreset, postreset);
2990 ata_link_printk(link, KERN_ERR,
2991 "reset failed, giving up\n");
3000 * clears ATA_EH_PARK in eh_info and resets
3001 * ap->park_req_pending
3003 ata_eh_pull_park_action(ap);
3006 ata_port_for_each_link(link, ap) {
3007 ata_link_for_each_dev(dev, link) {
3008 struct ata_eh_context *ehc = &link->eh_context;
3011 if (dev->class != ATA_DEV_ATA)
3013 if (!(ehc->i.dev_action[dev->devno] &
3016 tmp = dev->unpark_deadline;
3017 if (time_before(deadline, tmp))
3019 else if (time_before_eq(tmp, jiffies))
3021 if (ehc->unloaded_mask & (1 << dev->devno))
3024 ata_eh_park_issue_cmd(dev, 1);
3029 if (time_before_eq(deadline, now))
3032 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3035 ata_port_for_each_link(link, ap) {
3036 ata_link_for_each_dev(dev, link) {
3037 if (!(link->eh_context.unloaded_mask &
3041 ata_eh_park_issue_cmd(dev, 0);
3042 ata_eh_done(link, dev, ATA_EH_PARK);
3047 ata_port_for_each_link(link, ap) {
3048 struct ata_eh_context *ehc = &link->eh_context;
3050 /* revalidate existing devices and attach new ones */
3051 rc = ata_eh_revalidate_and_attach(link, &dev);
3055 /* if PMP got attached, return, pmp EH will take care of it */
3056 if (link->device->class == ATA_DEV_PMP) {
3061 /* configure transfer mode if necessary */
3062 if (ehc->i.flags & ATA_EHI_SETMODE) {
3063 rc = ata_set_mode(link, &dev);
3066 ehc->i.flags &= ~ATA_EHI_SETMODE;
3069 if (ehc->i.action & ATA_EH_LPM)
3070 ata_link_for_each_dev(dev, link)
3071 ata_dev_enable_pm(dev, ap->pm_policy);
3073 /* this link is okay now */
3079 ata_eh_handle_dev_fail(dev, rc);
3081 if (ap->pflags & ATA_PFLAG_FROZEN) {
3082 /* PMP reset requires working host port.
3083 * Can't retry if it's frozen.
3085 if (sata_pmp_attached(ap))
3095 if (rc && r_failed_link)
3096 *r_failed_link = link;
3098 DPRINTK("EXIT, rc=%d\n", rc);
3103 * ata_eh_finish - finish up EH
3104 * @ap: host port to finish EH for
3106 * Recovery is complete. Clean up EH states and retry or finish
3112 void ata_eh_finish(struct ata_port *ap)
3116 /* retry or finish qcs */
3117 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3118 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3120 if (!(qc->flags & ATA_QCFLAG_FAILED))
3124 /* FIXME: Once EH migration is complete,
3125 * generate sense data in this function,
3126 * considering both err_mask and tf.
3128 if (qc->flags & ATA_QCFLAG_RETRY)
3129 ata_eh_qc_retry(qc);
3131 ata_eh_qc_complete(qc);
3133 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3134 ata_eh_qc_complete(qc);
3136 /* feed zero TF to sense generation */
3137 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3138 ata_eh_qc_retry(qc);
3143 /* make sure nr_active_links is zero after EH */
3144 WARN_ON(ap->nr_active_links);
3145 ap->nr_active_links = 0;
3149 * ata_do_eh - do standard error handling
3150 * @ap: host port to handle error for
3152 * @prereset: prereset method (can be NULL)
3153 * @softreset: softreset method (can be NULL)
3154 * @hardreset: hardreset method (can be NULL)
3155 * @postreset: postreset method (can be NULL)
3157 * Perform standard error handling sequence.
3160 * Kernel thread context (may sleep).
3162 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3163 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3164 ata_postreset_fn_t postreset)
3166 struct ata_device *dev;
3172 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3175 ata_link_for_each_dev(dev, &ap->link)
3176 ata_dev_disable(dev);
3183 * ata_std_error_handler - standard error handler
3184 * @ap: host port to handle error for
3186 * Standard error handler
3189 * Kernel thread context (may sleep).
3191 void ata_std_error_handler(struct ata_port *ap)
3193 struct ata_port_operations *ops = ap->ops;
3194 ata_reset_fn_t hardreset = ops->hardreset;
3196 /* ignore built-in hardreset if SCR access is not available */
3197 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3200 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3205 * ata_eh_handle_port_suspend - perform port suspend operation
3206 * @ap: port to suspend
3211 * Kernel thread context (may sleep).
3213 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3215 unsigned long flags;
3218 /* are we suspending? */
3219 spin_lock_irqsave(ap->lock, flags);
3220 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3221 ap->pm_mesg.event == PM_EVENT_ON) {
3222 spin_unlock_irqrestore(ap->lock, flags);
3225 spin_unlock_irqrestore(ap->lock, flags);
3227 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3229 /* tell ACPI we're suspending */
3230 rc = ata_acpi_on_suspend(ap);
3235 ata_eh_freeze_port(ap);
3237 if (ap->ops->port_suspend)
3238 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3240 ata_acpi_set_state(ap, PMSG_SUSPEND);
3243 spin_lock_irqsave(ap->lock, flags);
3245 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3247 ap->pflags |= ATA_PFLAG_SUSPENDED;
3248 else if (ap->pflags & ATA_PFLAG_FROZEN)
3249 ata_port_schedule_eh(ap);
3251 if (ap->pm_result) {
3252 *ap->pm_result = rc;
3253 ap->pm_result = NULL;
3256 spin_unlock_irqrestore(ap->lock, flags);
3262 * ata_eh_handle_port_resume - perform port resume operation
3263 * @ap: port to resume
3268 * Kernel thread context (may sleep).
3270 static void ata_eh_handle_port_resume(struct ata_port *ap)
3272 unsigned long flags;
3275 /* are we resuming? */
3276 spin_lock_irqsave(ap->lock, flags);
3277 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3278 ap->pm_mesg.event != PM_EVENT_ON) {
3279 spin_unlock_irqrestore(ap->lock, flags);
3282 spin_unlock_irqrestore(ap->lock, flags);
3284 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3286 ata_acpi_set_state(ap, PMSG_ON);
3288 if (ap->ops->port_resume)
3289 rc = ap->ops->port_resume(ap);
3291 /* tell ACPI that we're resuming */
3292 ata_acpi_on_resume(ap);
3295 spin_lock_irqsave(ap->lock, flags);
3296 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3297 if (ap->pm_result) {
3298 *ap->pm_result = rc;
3299 ap->pm_result = NULL;
3301 spin_unlock_irqrestore(ap->lock, flags);
3303 #endif /* CONFIG_PM */