2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
24 #include <asm/ccwdev.h>
25 #include <asm/ebcdic.h>
26 #include <asm/idals.h>
30 #define PRINTK_HEADER "dasd:"
34 * SECTION: Constant definitions to be used within this file
36 #define DASD_CHANQ_MAX_SIZE 4
39 * SECTION: exported variables of dasd.c
41 debug_info_t *dasd_debug_area;
42 struct dasd_discipline *dasd_diag_discipline_pointer;
43 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
45 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
46 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
47 " Copyright 2000 IBM Corporation");
48 MODULE_SUPPORTED_DEVICE("dasd");
49 MODULE_LICENSE("GPL");
52 * SECTION: prototypes for static functions of dasd.c
54 static int dasd_alloc_queue(struct dasd_block *);
55 static void dasd_setup_queue(struct dasd_block *);
56 static void dasd_free_queue(struct dasd_block *);
57 static void dasd_flush_request_queue(struct dasd_block *);
58 static int dasd_flush_block_queue(struct dasd_block *);
59 static void dasd_device_tasklet(struct dasd_device *);
60 static void dasd_block_tasklet(struct dasd_block *);
61 static void do_kick_device(struct work_struct *);
62 static void do_restore_device(struct work_struct *);
63 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
64 static void dasd_device_timeout(unsigned long);
65 static void dasd_block_timeout(unsigned long);
68 * SECTION: Operations on the device structure.
70 static wait_queue_head_t dasd_init_waitq;
71 static wait_queue_head_t dasd_flush_wq;
72 static wait_queue_head_t generic_waitq;
75 * Allocate memory for a new device structure.
77 struct dasd_device *dasd_alloc_device(void)
79 struct dasd_device *device;
81 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
83 return ERR_PTR(-ENOMEM);
85 /* Get two pages for normal block device operations. */
86 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
87 if (!device->ccw_mem) {
89 return ERR_PTR(-ENOMEM);
91 /* Get one page for error recovery. */
92 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
93 if (!device->erp_mem) {
94 free_pages((unsigned long) device->ccw_mem, 1);
96 return ERR_PTR(-ENOMEM);
99 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
100 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
101 spin_lock_init(&device->mem_lock);
102 atomic_set(&device->tasklet_scheduled, 0);
103 tasklet_init(&device->tasklet,
104 (void (*)(unsigned long)) dasd_device_tasklet,
105 (unsigned long) device);
106 INIT_LIST_HEAD(&device->ccw_queue);
107 init_timer(&device->timer);
108 device->timer.function = dasd_device_timeout;
109 device->timer.data = (unsigned long) device;
110 INIT_WORK(&device->kick_work, do_kick_device);
111 INIT_WORK(&device->restore_device, do_restore_device);
112 device->state = DASD_STATE_NEW;
113 device->target = DASD_STATE_NEW;
119 * Free memory of a device structure.
121 void dasd_free_device(struct dasd_device *device)
123 kfree(device->private);
124 free_page((unsigned long) device->erp_mem);
125 free_pages((unsigned long) device->ccw_mem, 1);
130 * Allocate memory for a new device structure.
132 struct dasd_block *dasd_alloc_block(void)
134 struct dasd_block *block;
136 block = kzalloc(sizeof(*block), GFP_ATOMIC);
138 return ERR_PTR(-ENOMEM);
139 /* open_count = 0 means device online but not in use */
140 atomic_set(&block->open_count, -1);
142 spin_lock_init(&block->request_queue_lock);
143 atomic_set(&block->tasklet_scheduled, 0);
144 tasklet_init(&block->tasklet,
145 (void (*)(unsigned long)) dasd_block_tasklet,
146 (unsigned long) block);
147 INIT_LIST_HEAD(&block->ccw_queue);
148 spin_lock_init(&block->queue_lock);
149 init_timer(&block->timer);
150 block->timer.function = dasd_block_timeout;
151 block->timer.data = (unsigned long) block;
157 * Free memory of a device structure.
159 void dasd_free_block(struct dasd_block *block)
165 * Make a new device known to the system.
167 static int dasd_state_new_to_known(struct dasd_device *device)
172 * As long as the device is not in state DASD_STATE_NEW we want to
173 * keep the reference count > 0.
175 dasd_get_device(device);
178 rc = dasd_alloc_queue(device->block);
180 dasd_put_device(device);
184 device->state = DASD_STATE_KNOWN;
189 * Let the system forget about a device.
191 static int dasd_state_known_to_new(struct dasd_device *device)
193 /* Disable extended error reporting for this device. */
194 dasd_eer_disable(device);
195 /* Forget the discipline information. */
196 if (device->discipline) {
197 if (device->discipline->uncheck_device)
198 device->discipline->uncheck_device(device);
199 module_put(device->discipline->owner);
201 device->discipline = NULL;
202 if (device->base_discipline)
203 module_put(device->base_discipline->owner);
204 device->base_discipline = NULL;
205 device->state = DASD_STATE_NEW;
208 dasd_free_queue(device->block);
210 /* Give up reference we took in dasd_state_new_to_known. */
211 dasd_put_device(device);
216 * Request the irq line for the device.
218 static int dasd_state_known_to_basic(struct dasd_device *device)
222 /* Allocate and register gendisk structure. */
224 rc = dasd_gendisk_alloc(device->block);
228 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
229 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
231 debug_register_view(device->debug_area, &debug_sprintf_view);
232 debug_set_level(device->debug_area, DBF_WARNING);
233 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
235 device->state = DASD_STATE_BASIC;
240 * Release the irq line for the device. Terminate any running i/o.
242 static int dasd_state_basic_to_known(struct dasd_device *device)
246 dasd_gendisk_free(device->block);
247 dasd_block_clear_timer(device->block);
249 rc = dasd_flush_device_queue(device);
252 dasd_device_clear_timer(device);
254 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
255 if (device->debug_area != NULL) {
256 debug_unregister(device->debug_area);
257 device->debug_area = NULL;
259 device->state = DASD_STATE_KNOWN;
264 * Do the initial analysis. The do_analysis function may return
265 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
266 * until the discipline decides to continue the startup sequence
267 * by calling the function dasd_change_state. The eckd disciplines
268 * uses this to start a ccw that detects the format. The completion
269 * interrupt for this detection ccw uses the kernel event daemon to
270 * trigger the call to dasd_change_state. All this is done in the
271 * discipline code, see dasd_eckd.c.
272 * After the analysis ccw is done (do_analysis returned 0) the block
274 * In case the analysis returns an error, the device setup is stopped
275 * (a fake disk was already added to allow formatting).
277 static int dasd_state_basic_to_ready(struct dasd_device *device)
280 struct dasd_block *block;
283 block = device->block;
284 /* make disk known with correct capacity */
286 if (block->base->discipline->do_analysis != NULL)
287 rc = block->base->discipline->do_analysis(block);
290 device->state = DASD_STATE_UNFMT;
293 dasd_setup_queue(block);
294 set_capacity(block->gdp,
295 block->blocks << block->s2b_shift);
296 device->state = DASD_STATE_READY;
297 rc = dasd_scan_partitions(block);
299 device->state = DASD_STATE_BASIC;
301 device->state = DASD_STATE_READY;
307 * Remove device from block device layer. Destroy dirty buffers.
308 * Forget format information. Check if the target level is basic
309 * and if it is create fake disk for formatting.
311 static int dasd_state_ready_to_basic(struct dasd_device *device)
315 device->state = DASD_STATE_BASIC;
317 struct dasd_block *block = device->block;
318 rc = dasd_flush_block_queue(block);
320 device->state = DASD_STATE_READY;
323 dasd_destroy_partitions(block);
324 dasd_flush_request_queue(block);
327 block->s2b_shift = 0;
335 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
337 device->state = DASD_STATE_BASIC;
342 * Make the device online and schedule the bottom half to start
343 * the requeueing of requests from the linux request queue to the
347 dasd_state_ready_to_online(struct dasd_device * device)
350 struct gendisk *disk;
351 struct disk_part_iter piter;
352 struct hd_struct *part;
354 if (device->discipline->ready_to_online) {
355 rc = device->discipline->ready_to_online(device);
359 device->state = DASD_STATE_ONLINE;
361 dasd_schedule_block_bh(device->block);
362 disk = device->block->bdev->bd_disk;
363 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
364 while ((part = disk_part_iter_next(&piter)))
365 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
366 disk_part_iter_exit(&piter);
372 * Stop the requeueing of requests again.
374 static int dasd_state_online_to_ready(struct dasd_device *device)
377 struct gendisk *disk;
378 struct disk_part_iter piter;
379 struct hd_struct *part;
381 if (device->discipline->online_to_ready) {
382 rc = device->discipline->online_to_ready(device);
386 device->state = DASD_STATE_READY;
388 disk = device->block->bdev->bd_disk;
389 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
390 while ((part = disk_part_iter_next(&piter)))
391 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
392 disk_part_iter_exit(&piter);
398 * Device startup state changes.
400 static int dasd_increase_state(struct dasd_device *device)
405 if (device->state == DASD_STATE_NEW &&
406 device->target >= DASD_STATE_KNOWN)
407 rc = dasd_state_new_to_known(device);
410 device->state == DASD_STATE_KNOWN &&
411 device->target >= DASD_STATE_BASIC)
412 rc = dasd_state_known_to_basic(device);
415 device->state == DASD_STATE_BASIC &&
416 device->target >= DASD_STATE_READY)
417 rc = dasd_state_basic_to_ready(device);
420 device->state == DASD_STATE_UNFMT &&
421 device->target > DASD_STATE_UNFMT)
425 device->state == DASD_STATE_READY &&
426 device->target >= DASD_STATE_ONLINE)
427 rc = dasd_state_ready_to_online(device);
433 * Device shutdown state changes.
435 static int dasd_decrease_state(struct dasd_device *device)
440 if (device->state == DASD_STATE_ONLINE &&
441 device->target <= DASD_STATE_READY)
442 rc = dasd_state_online_to_ready(device);
445 device->state == DASD_STATE_READY &&
446 device->target <= DASD_STATE_BASIC)
447 rc = dasd_state_ready_to_basic(device);
450 device->state == DASD_STATE_UNFMT &&
451 device->target <= DASD_STATE_BASIC)
452 rc = dasd_state_unfmt_to_basic(device);
455 device->state == DASD_STATE_BASIC &&
456 device->target <= DASD_STATE_KNOWN)
457 rc = dasd_state_basic_to_known(device);
460 device->state == DASD_STATE_KNOWN &&
461 device->target <= DASD_STATE_NEW)
462 rc = dasd_state_known_to_new(device);
468 * This is the main startup/shutdown routine.
470 static void dasd_change_state(struct dasd_device *device)
474 if (device->state == device->target)
475 /* Already where we want to go today... */
477 if (device->state < device->target)
478 rc = dasd_increase_state(device);
480 rc = dasd_decrease_state(device);
484 device->target = device->state;
486 if (device->state == device->target) {
487 wake_up(&dasd_init_waitq);
488 dasd_put_device(device);
491 /* let user-space know that the device status changed */
492 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
496 * Kick starter for devices that did not complete the startup/shutdown
497 * procedure or were sleeping because of a pending state.
498 * dasd_kick_device will schedule a call do do_kick_device to the kernel
501 static void do_kick_device(struct work_struct *work)
503 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
504 dasd_change_state(device);
505 dasd_schedule_device_bh(device);
506 dasd_put_device(device);
509 void dasd_kick_device(struct dasd_device *device)
511 dasd_get_device(device);
512 /* queue call to dasd_kick_device to the kernel event daemon. */
513 schedule_work(&device->kick_work);
517 * dasd_restore_device will schedule a call do do_restore_device to the kernel
520 static void do_restore_device(struct work_struct *work)
522 struct dasd_device *device = container_of(work, struct dasd_device,
524 device->cdev->drv->restore(device->cdev);
525 dasd_put_device(device);
528 void dasd_restore_device(struct dasd_device *device)
530 dasd_get_device(device);
531 /* queue call to dasd_restore_device to the kernel event daemon. */
532 schedule_work(&device->restore_device);
536 * Set the target state for a device and starts the state change.
538 void dasd_set_target_state(struct dasd_device *device, int target)
540 dasd_get_device(device);
541 /* If we are in probeonly mode stop at DASD_STATE_READY. */
542 if (dasd_probeonly && target > DASD_STATE_READY)
543 target = DASD_STATE_READY;
544 if (device->target != target) {
545 if (device->state == target) {
546 wake_up(&dasd_init_waitq);
547 dasd_put_device(device);
549 device->target = target;
551 if (device->state != device->target)
552 dasd_change_state(device);
556 * Enable devices with device numbers in [from..to].
558 static inline int _wait_for_device(struct dasd_device *device)
560 return (device->state == device->target);
563 void dasd_enable_device(struct dasd_device *device)
565 dasd_set_target_state(device, DASD_STATE_ONLINE);
566 if (device->state <= DASD_STATE_KNOWN)
567 /* No discipline for device found. */
568 dasd_set_target_state(device, DASD_STATE_NEW);
569 /* Now wait for the devices to come up. */
570 wait_event(dasd_init_waitq, _wait_for_device(device));
574 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
576 #ifdef CONFIG_DASD_PROFILE
578 struct dasd_profile_info_t dasd_global_profile;
579 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
582 * Increments counter in global and local profiling structures.
584 #define dasd_profile_counter(value, counter, block) \
587 for (index = 0; index < 31 && value >> (2+index); index++); \
588 dasd_global_profile.counter[index]++; \
589 block->profile.counter[index]++; \
593 * Add profiling information for cqr before execution.
595 static void dasd_profile_start(struct dasd_block *block,
596 struct dasd_ccw_req *cqr,
600 unsigned int counter;
602 if (dasd_profile_level != DASD_PROFILE_ON)
605 /* count the length of the chanq for statistics */
607 list_for_each(l, &block->ccw_queue)
610 dasd_global_profile.dasd_io_nr_req[counter]++;
611 block->profile.dasd_io_nr_req[counter]++;
615 * Add profiling information for cqr after execution.
617 static void dasd_profile_end(struct dasd_block *block,
618 struct dasd_ccw_req *cqr,
621 long strtime, irqtime, endtime, tottime; /* in microseconds */
622 long tottimeps, sectors;
624 if (dasd_profile_level != DASD_PROFILE_ON)
627 sectors = blk_rq_sectors(req);
628 if (!cqr->buildclk || !cqr->startclk ||
629 !cqr->stopclk || !cqr->endclk ||
633 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
634 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
635 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
636 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
637 tottimeps = tottime / sectors;
639 if (!dasd_global_profile.dasd_io_reqs)
640 memset(&dasd_global_profile, 0,
641 sizeof(struct dasd_profile_info_t));
642 dasd_global_profile.dasd_io_reqs++;
643 dasd_global_profile.dasd_io_sects += sectors;
645 if (!block->profile.dasd_io_reqs)
646 memset(&block->profile, 0,
647 sizeof(struct dasd_profile_info_t));
648 block->profile.dasd_io_reqs++;
649 block->profile.dasd_io_sects += sectors;
651 dasd_profile_counter(sectors, dasd_io_secs, block);
652 dasd_profile_counter(tottime, dasd_io_times, block);
653 dasd_profile_counter(tottimeps, dasd_io_timps, block);
654 dasd_profile_counter(strtime, dasd_io_time1, block);
655 dasd_profile_counter(irqtime, dasd_io_time2, block);
656 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
657 dasd_profile_counter(endtime, dasd_io_time3, block);
660 #define dasd_profile_start(block, cqr, req) do {} while (0)
661 #define dasd_profile_end(block, cqr, req) do {} while (0)
662 #endif /* CONFIG_DASD_PROFILE */
665 * Allocate memory for a channel program with 'cplength' channel
666 * command words and 'datasize' additional space. There are two
667 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
668 * memory and 2) dasd_smalloc_request uses the static ccw memory
669 * that gets allocated for each device.
671 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
673 struct dasd_device *device)
675 struct dasd_ccw_req *cqr;
678 BUG_ON(datasize > PAGE_SIZE ||
679 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
681 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
683 return ERR_PTR(-ENOMEM);
686 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
687 GFP_ATOMIC | GFP_DMA);
688 if (cqr->cpaddr == NULL) {
690 return ERR_PTR(-ENOMEM);
695 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
696 if (cqr->data == NULL) {
699 return ERR_PTR(-ENOMEM);
703 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
704 dasd_get_device(device);
708 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
710 struct dasd_device *device)
713 struct dasd_ccw_req *cqr;
718 BUG_ON(datasize > PAGE_SIZE ||
719 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
721 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
723 size += cplength * sizeof(struct ccw1);
726 spin_lock_irqsave(&device->mem_lock, flags);
727 cqr = (struct dasd_ccw_req *)
728 dasd_alloc_chunk(&device->ccw_chunks, size);
729 spin_unlock_irqrestore(&device->mem_lock, flags);
731 return ERR_PTR(-ENOMEM);
732 memset(cqr, 0, sizeof(struct dasd_ccw_req));
733 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
736 cqr->cpaddr = (struct ccw1 *) data;
737 data += cplength*sizeof(struct ccw1);
738 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
743 memset(cqr->data, 0, datasize);
746 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
747 dasd_get_device(device);
752 * Free memory of a channel program. This function needs to free all the
753 * idal lists that might have been created by dasd_set_cda and the
754 * struct dasd_ccw_req itself.
756 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
761 /* Clear any idals used for the request. */
764 clear_normalized_cda(ccw);
765 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
770 dasd_put_device(device);
773 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
777 spin_lock_irqsave(&device->mem_lock, flags);
778 dasd_free_chunk(&device->ccw_chunks, cqr);
779 spin_unlock_irqrestore(&device->mem_lock, flags);
780 dasd_put_device(device);
784 * Check discipline magic in cqr.
786 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
788 struct dasd_device *device;
792 device = cqr->startdev;
793 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
794 DBF_DEV_EVENT(DBF_WARNING, device,
795 " dasd_ccw_req 0x%08x magic doesn't match"
796 " discipline 0x%08x",
798 *(unsigned int *) device->discipline->name);
805 * Terminate the current i/o and set the request to clear_pending.
806 * Timer keeps device runnig.
807 * ccw_device_clear can fail if the i/o subsystem
810 int dasd_term_IO(struct dasd_ccw_req *cqr)
812 struct dasd_device *device;
814 char errorstring[ERRORLENGTH];
817 rc = dasd_check_cqr(cqr);
821 device = (struct dasd_device *) cqr->startdev;
822 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
823 rc = ccw_device_clear(device->cdev, (long) cqr);
825 case 0: /* termination successful */
827 cqr->status = DASD_CQR_CLEAR_PENDING;
828 cqr->stopclk = get_clock();
830 DBF_DEV_EVENT(DBF_DEBUG, device,
831 "terminate cqr %p successful",
835 DBF_DEV_EVENT(DBF_ERR, device, "%s",
836 "device gone, retry");
839 DBF_DEV_EVENT(DBF_ERR, device, "%s",
844 DBF_DEV_EVENT(DBF_ERR, device, "%s",
845 "device busy, retry later");
848 /* internal error 10 - unknown rc*/
849 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
850 dev_err(&device->cdev->dev, "An error occurred in the "
851 "DASD device driver, reason=%s\n", errorstring);
857 dasd_schedule_device_bh(device);
862 * Start the i/o. This start_IO can fail if the channel is really busy.
863 * In that case set up a timer to start the request later.
865 int dasd_start_IO(struct dasd_ccw_req *cqr)
867 struct dasd_device *device;
869 char errorstring[ERRORLENGTH];
872 rc = dasd_check_cqr(cqr);
877 device = (struct dasd_device *) cqr->startdev;
878 if (cqr->retries < 0) {
879 /* internal error 14 - start_IO run out of retries */
880 sprintf(errorstring, "14 %p", cqr);
881 dev_err(&device->cdev->dev, "An error occurred in the DASD "
882 "device driver, reason=%s\n", errorstring);
883 cqr->status = DASD_CQR_ERROR;
886 cqr->startclk = get_clock();
887 cqr->starttime = jiffies;
889 if (cqr->cpmode == 1) {
890 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
891 (long) cqr, cqr->lpm);
893 rc = ccw_device_start(device->cdev, cqr->cpaddr,
894 (long) cqr, cqr->lpm, 0);
898 cqr->status = DASD_CQR_IN_IO;
901 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
902 "start_IO: device busy, retry later");
905 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
906 "start_IO: request timeout, retry later");
909 /* -EACCES indicates that the request used only a
910 * subset of the available pathes and all these
912 * Do a retry with all available pathes.
914 cqr->lpm = LPM_ANYPATH;
915 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
916 "start_IO: selected pathes gone,"
917 " retry on all pathes");
920 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
921 "start_IO: -ENODEV device gone, retry");
924 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
925 "start_IO: -EIO device gone, retry");
928 /* most likely caused in power management context */
929 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
930 "start_IO: -EINVAL device currently "
934 /* internal error 11 - unknown rc */
935 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
936 dev_err(&device->cdev->dev,
937 "An error occurred in the DASD device driver, "
938 "reason=%s\n", errorstring);
947 * Timeout function for dasd devices. This is used for different purposes
948 * 1) missing interrupt handler for normal operation
949 * 2) delayed start of request where start_IO failed with -EBUSY
950 * 3) timeout for missing state change interrupts
951 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
952 * DASD_CQR_QUEUED for 2) and 3).
954 static void dasd_device_timeout(unsigned long ptr)
957 struct dasd_device *device;
959 device = (struct dasd_device *) ptr;
960 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
961 /* re-activate request queue */
962 device->stopped &= ~DASD_STOPPED_PENDING;
963 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
964 dasd_schedule_device_bh(device);
968 * Setup timeout for a device in jiffies.
970 void dasd_device_set_timer(struct dasd_device *device, int expires)
973 del_timer(&device->timer);
975 mod_timer(&device->timer, jiffies + expires);
979 * Clear timeout for a device.
981 void dasd_device_clear_timer(struct dasd_device *device)
983 del_timer(&device->timer);
986 static void dasd_handle_killed_request(struct ccw_device *cdev,
987 unsigned long intparm)
989 struct dasd_ccw_req *cqr;
990 struct dasd_device *device;
994 cqr = (struct dasd_ccw_req *) intparm;
995 if (cqr->status != DASD_CQR_IN_IO) {
997 "invalid status in handle_killed_request: "
998 "bus_id %s, status %02x",
999 dev_name(&cdev->dev), cqr->status);
1003 device = (struct dasd_device *) cqr->startdev;
1004 if (device == NULL ||
1005 device != dasd_device_from_cdev_locked(cdev) ||
1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1007 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1008 "bus_id %s", dev_name(&cdev->dev));
1012 /* Schedule request to be retried. */
1013 cqr->status = DASD_CQR_QUEUED;
1015 dasd_device_clear_timer(device);
1016 dasd_schedule_device_bh(device);
1017 dasd_put_device(device);
1020 void dasd_generic_handle_state_change(struct dasd_device *device)
1022 /* First of all start sense subsystem status request. */
1023 dasd_eer_snss(device);
1025 device->stopped &= ~DASD_STOPPED_PENDING;
1026 dasd_schedule_device_bh(device);
1028 dasd_schedule_block_bh(device->block);
1032 * Interrupt handler for "normal" ssch-io based dasd devices.
1034 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1037 struct dasd_ccw_req *cqr, *next;
1038 struct dasd_device *device;
1039 unsigned long long now;
1043 switch (PTR_ERR(irb)) {
1047 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
1048 __func__, dev_name(&cdev->dev));
1051 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
1052 __func__, dev_name(&cdev->dev), PTR_ERR(irb));
1054 dasd_handle_killed_request(cdev, intparm);
1060 /* check for unsolicited interrupts */
1061 cqr = (struct dasd_ccw_req *) intparm;
1062 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1063 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1064 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1065 if (cqr && cqr->status == DASD_CQR_IN_IO)
1066 cqr->status = DASD_CQR_QUEUED;
1067 device = dasd_device_from_cdev_locked(cdev);
1068 if (!IS_ERR(device)) {
1069 dasd_device_clear_timer(device);
1070 device->discipline->handle_unsolicited_interrupt(device,
1072 dasd_put_device(device);
1077 device = (struct dasd_device *) cqr->startdev;
1079 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1080 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1081 "bus_id %s", dev_name(&cdev->dev));
1085 /* Check for clear pending */
1086 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1087 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1088 cqr->status = DASD_CQR_CLEARED;
1089 dasd_device_clear_timer(device);
1090 wake_up(&dasd_flush_wq);
1091 dasd_schedule_device_bh(device);
1095 /* check status - the request might have been killed by dyn detach */
1096 if (cqr->status != DASD_CQR_IN_IO) {
1097 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1098 "status %02x", dev_name(&cdev->dev), cqr->status);
1104 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1105 scsw_cstat(&irb->scsw) == 0) {
1106 /* request was completed successfully */
1107 cqr->status = DASD_CQR_SUCCESS;
1109 /* Start first request on queue if possible -> fast_io. */
1110 if (cqr->devlist.next != &device->ccw_queue) {
1111 next = list_entry(cqr->devlist.next,
1112 struct dasd_ccw_req, devlist);
1114 } else { /* error */
1115 memcpy(&cqr->irb, irb, sizeof(struct irb));
1116 /* log sense for every failed I/O to s390 debugfeature */
1117 dasd_log_sense_dbf(cqr, irb);
1118 if (device->features & DASD_FEATURE_ERPLOG) {
1119 dasd_log_sense(cqr, irb);
1123 * If we don't want complex ERP for this request, then just
1124 * reset this and retry it in the fastpath
1126 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1128 if (cqr->lpm == LPM_ANYPATH)
1129 DBF_DEV_EVENT(DBF_DEBUG, device,
1130 "default ERP in fastpath "
1131 "(%i retries left)",
1133 cqr->lpm = LPM_ANYPATH;
1134 cqr->status = DASD_CQR_QUEUED;
1137 cqr->status = DASD_CQR_ERROR;
1139 if (next && (next->status == DASD_CQR_QUEUED) &&
1140 (!device->stopped)) {
1141 if (device->discipline->start_IO(next) == 0)
1142 expires = next->expires;
1145 dasd_device_set_timer(device, expires);
1147 dasd_device_clear_timer(device);
1148 dasd_schedule_device_bh(device);
1152 * If we have an error on a dasd_block layer request then we cancel
1153 * and return all further requests from the same dasd_block as well.
1155 static void __dasd_device_recovery(struct dasd_device *device,
1156 struct dasd_ccw_req *ref_cqr)
1158 struct list_head *l, *n;
1159 struct dasd_ccw_req *cqr;
1162 * only requeue request that came from the dasd_block layer
1164 if (!ref_cqr->block)
1167 list_for_each_safe(l, n, &device->ccw_queue) {
1168 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1169 if (cqr->status == DASD_CQR_QUEUED &&
1170 ref_cqr->block == cqr->block) {
1171 cqr->status = DASD_CQR_CLEARED;
1177 * Remove those ccw requests from the queue that need to be returned
1178 * to the upper layer.
1180 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1181 struct list_head *final_queue)
1183 struct list_head *l, *n;
1184 struct dasd_ccw_req *cqr;
1186 /* Process request with final status. */
1187 list_for_each_safe(l, n, &device->ccw_queue) {
1188 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1190 /* Stop list processing at the first non-final request. */
1191 if (cqr->status == DASD_CQR_QUEUED ||
1192 cqr->status == DASD_CQR_IN_IO ||
1193 cqr->status == DASD_CQR_CLEAR_PENDING)
1195 if (cqr->status == DASD_CQR_ERROR) {
1196 __dasd_device_recovery(device, cqr);
1198 /* Rechain finished requests to final queue */
1199 list_move_tail(&cqr->devlist, final_queue);
1204 * the cqrs from the final queue are returned to the upper layer
1205 * by setting a dasd_block state and calling the callback function
1207 static void __dasd_device_process_final_queue(struct dasd_device *device,
1208 struct list_head *final_queue)
1210 struct list_head *l, *n;
1211 struct dasd_ccw_req *cqr;
1212 struct dasd_block *block;
1213 void (*callback)(struct dasd_ccw_req *, void *data);
1214 void *callback_data;
1215 char errorstring[ERRORLENGTH];
1217 list_for_each_safe(l, n, final_queue) {
1218 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1219 list_del_init(&cqr->devlist);
1221 callback = cqr->callback;
1222 callback_data = cqr->callback_data;
1224 spin_lock_bh(&block->queue_lock);
1225 switch (cqr->status) {
1226 case DASD_CQR_SUCCESS:
1227 cqr->status = DASD_CQR_DONE;
1229 case DASD_CQR_ERROR:
1230 cqr->status = DASD_CQR_NEED_ERP;
1232 case DASD_CQR_CLEARED:
1233 cqr->status = DASD_CQR_TERMINATED;
1236 /* internal error 12 - wrong cqr status*/
1237 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1238 dev_err(&device->cdev->dev,
1239 "An error occurred in the DASD device driver, "
1240 "reason=%s\n", errorstring);
1243 if (cqr->callback != NULL)
1244 (callback)(cqr, callback_data);
1246 spin_unlock_bh(&block->queue_lock);
1251 * Take a look at the first request on the ccw queue and check
1252 * if it reached its expire time. If so, terminate the IO.
1254 static void __dasd_device_check_expire(struct dasd_device *device)
1256 struct dasd_ccw_req *cqr;
1258 if (list_empty(&device->ccw_queue))
1260 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1261 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1262 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1263 if (device->discipline->term_IO(cqr) != 0) {
1264 /* Hmpf, try again in 5 sec */
1265 dev_err(&device->cdev->dev,
1266 "cqr %p timed out (%is) but cannot be "
1267 "ended, retrying in 5 s\n",
1268 cqr, (cqr->expires/HZ));
1269 cqr->expires += 5*HZ;
1270 dasd_device_set_timer(device, 5*HZ);
1272 dev_err(&device->cdev->dev,
1273 "cqr %p timed out (%is), %i retries "
1274 "remaining\n", cqr, (cqr->expires/HZ),
1281 * Take a look at the first request on the ccw queue and check
1282 * if it needs to be started.
1284 static void __dasd_device_start_head(struct dasd_device *device)
1286 struct dasd_ccw_req *cqr;
1289 if (list_empty(&device->ccw_queue))
1291 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1292 if (cqr->status != DASD_CQR_QUEUED)
1294 /* when device is stopped, return request to previous layer */
1295 if (device->stopped) {
1296 cqr->status = DASD_CQR_CLEARED;
1297 dasd_schedule_device_bh(device);
1301 rc = device->discipline->start_IO(cqr);
1303 dasd_device_set_timer(device, cqr->expires);
1304 else if (rc == -EACCES) {
1305 dasd_schedule_device_bh(device);
1307 /* Hmpf, try again in 1/2 sec */
1308 dasd_device_set_timer(device, 50);
1312 * Go through all request on the dasd_device request queue,
1313 * terminate them on the cdev if necessary, and return them to the
1314 * submitting layer via callback.
1316 * Make sure that all 'submitting layers' still exist when
1317 * this function is called!. In other words, when 'device' is a base
1318 * device then all block layer requests must have been removed before
1319 * via dasd_flush_block_queue.
1321 int dasd_flush_device_queue(struct dasd_device *device)
1323 struct dasd_ccw_req *cqr, *n;
1325 struct list_head flush_queue;
1327 INIT_LIST_HEAD(&flush_queue);
1328 spin_lock_irq(get_ccwdev_lock(device->cdev));
1330 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1331 /* Check status and move request to flush_queue */
1332 switch (cqr->status) {
1333 case DASD_CQR_IN_IO:
1334 rc = device->discipline->term_IO(cqr);
1336 /* unable to terminate requeust */
1337 dev_err(&device->cdev->dev,
1338 "Flushing the DASD request queue "
1339 "failed for request %p\n", cqr);
1340 /* stop flush processing */
1344 case DASD_CQR_QUEUED:
1345 cqr->stopclk = get_clock();
1346 cqr->status = DASD_CQR_CLEARED;
1348 default: /* no need to modify the others */
1351 list_move_tail(&cqr->devlist, &flush_queue);
1354 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1356 * After this point all requests must be in state CLEAR_PENDING,
1357 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1358 * one of the others.
1360 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1361 wait_event(dasd_flush_wq,
1362 (cqr->status != DASD_CQR_CLEAR_PENDING));
1364 * Now set each request back to TERMINATED, DONE or NEED_ERP
1365 * and call the callback function of flushed requests
1367 __dasd_device_process_final_queue(device, &flush_queue);
1372 * Acquire the device lock and process queues for the device.
1374 static void dasd_device_tasklet(struct dasd_device *device)
1376 struct list_head final_queue;
1378 atomic_set (&device->tasklet_scheduled, 0);
1379 INIT_LIST_HEAD(&final_queue);
1380 spin_lock_irq(get_ccwdev_lock(device->cdev));
1381 /* Check expire time of first request on the ccw queue. */
1382 __dasd_device_check_expire(device);
1383 /* find final requests on ccw queue */
1384 __dasd_device_process_ccw_queue(device, &final_queue);
1385 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1386 /* Now call the callback function of requests with final status */
1387 __dasd_device_process_final_queue(device, &final_queue);
1388 spin_lock_irq(get_ccwdev_lock(device->cdev));
1389 /* Now check if the head of the ccw queue needs to be started. */
1390 __dasd_device_start_head(device);
1391 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1392 dasd_put_device(device);
1396 * Schedules a call to dasd_tasklet over the device tasklet.
1398 void dasd_schedule_device_bh(struct dasd_device *device)
1400 /* Protect against rescheduling. */
1401 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1403 dasd_get_device(device);
1404 tasklet_hi_schedule(&device->tasklet);
1408 * Queue a request to the head of the device ccw_queue.
1409 * Start the I/O if possible.
1411 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1413 struct dasd_device *device;
1414 unsigned long flags;
1416 device = cqr->startdev;
1417 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1418 cqr->status = DASD_CQR_QUEUED;
1419 list_add(&cqr->devlist, &device->ccw_queue);
1420 /* let the bh start the request to keep them in order */
1421 dasd_schedule_device_bh(device);
1422 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1426 * Queue a request to the tail of the device ccw_queue.
1427 * Start the I/O if possible.
1429 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1431 struct dasd_device *device;
1432 unsigned long flags;
1434 device = cqr->startdev;
1435 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1436 cqr->status = DASD_CQR_QUEUED;
1437 list_add_tail(&cqr->devlist, &device->ccw_queue);
1438 /* let the bh start the request to keep them in order */
1439 dasd_schedule_device_bh(device);
1440 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1444 * Wakeup helper for the 'sleep_on' functions.
1446 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1448 wake_up((wait_queue_head_t *) data);
1451 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1453 struct dasd_device *device;
1456 device = cqr->startdev;
1457 spin_lock_irq(get_ccwdev_lock(device->cdev));
1458 rc = ((cqr->status == DASD_CQR_DONE ||
1459 cqr->status == DASD_CQR_NEED_ERP ||
1460 cqr->status == DASD_CQR_TERMINATED) &&
1461 list_empty(&cqr->devlist));
1462 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1467 * Queue a request to the tail of the device ccw_queue and wait for
1470 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1472 struct dasd_device *device;
1475 device = cqr->startdev;
1477 cqr->callback = dasd_wakeup_cb;
1478 cqr->callback_data = (void *) &generic_waitq;
1479 dasd_add_request_tail(cqr);
1480 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1482 if (cqr->status == DASD_CQR_DONE)
1484 else if (cqr->intrc)
1492 * Queue a request to the tail of the device ccw_queue and wait
1493 * interruptible for it's completion.
1495 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1497 struct dasd_device *device;
1500 device = cqr->startdev;
1501 cqr->callback = dasd_wakeup_cb;
1502 cqr->callback_data = (void *) &generic_waitq;
1503 dasd_add_request_tail(cqr);
1504 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1505 if (rc == -ERESTARTSYS) {
1506 dasd_cancel_req(cqr);
1507 /* wait (non-interruptible) for final status */
1508 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1512 if (cqr->status == DASD_CQR_DONE)
1514 else if (cqr->intrc)
1522 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1523 * for eckd devices) the currently running request has to be terminated
1524 * and be put back to status queued, before the special request is added
1525 * to the head of the queue. Then the special request is waited on normally.
1527 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1529 struct dasd_ccw_req *cqr;
1531 if (list_empty(&device->ccw_queue))
1533 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1534 return device->discipline->term_IO(cqr);
1537 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1539 struct dasd_device *device;
1542 device = cqr->startdev;
1543 spin_lock_irq(get_ccwdev_lock(device->cdev));
1544 rc = _dasd_term_running_cqr(device);
1546 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1550 cqr->callback = dasd_wakeup_cb;
1551 cqr->callback_data = (void *) &generic_waitq;
1552 cqr->status = DASD_CQR_QUEUED;
1553 list_add(&cqr->devlist, &device->ccw_queue);
1555 /* let the bh start the request to keep them in order */
1556 dasd_schedule_device_bh(device);
1558 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1560 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1562 if (cqr->status == DASD_CQR_DONE)
1564 else if (cqr->intrc)
1572 * Cancels a request that was started with dasd_sleep_on_req.
1573 * This is useful to timeout requests. The request will be
1574 * terminated if it is currently in i/o.
1575 * Returns 1 if the request has been terminated.
1576 * 0 if there was no need to terminate the request (not started yet)
1577 * negative error code if termination failed
1578 * Cancellation of a request is an asynchronous operation! The calling
1579 * function has to wait until the request is properly returned via callback.
1581 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1583 struct dasd_device *device = cqr->startdev;
1584 unsigned long flags;
1588 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1589 switch (cqr->status) {
1590 case DASD_CQR_QUEUED:
1591 /* request was not started - just set to cleared */
1592 cqr->status = DASD_CQR_CLEARED;
1594 case DASD_CQR_IN_IO:
1595 /* request in IO - terminate IO and release again */
1596 rc = device->discipline->term_IO(cqr);
1598 dev_err(&device->cdev->dev,
1599 "Cancelling request %p failed with rc=%d\n",
1602 cqr->stopclk = get_clock();
1606 default: /* already finished or clear pending - do nothing */
1609 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1610 dasd_schedule_device_bh(device);
1616 * SECTION: Operations of the dasd_block layer.
1620 * Timeout function for dasd_block. This is used when the block layer
1621 * is waiting for something that may not come reliably, (e.g. a state
1624 static void dasd_block_timeout(unsigned long ptr)
1626 unsigned long flags;
1627 struct dasd_block *block;
1629 block = (struct dasd_block *) ptr;
1630 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1631 /* re-activate request queue */
1632 block->base->stopped &= ~DASD_STOPPED_PENDING;
1633 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1634 dasd_schedule_block_bh(block);
1638 * Setup timeout for a dasd_block in jiffies.
1640 void dasd_block_set_timer(struct dasd_block *block, int expires)
1643 del_timer(&block->timer);
1645 mod_timer(&block->timer, jiffies + expires);
1649 * Clear timeout for a dasd_block.
1651 void dasd_block_clear_timer(struct dasd_block *block)
1653 del_timer(&block->timer);
1657 * Process finished error recovery ccw.
1659 static inline void __dasd_block_process_erp(struct dasd_block *block,
1660 struct dasd_ccw_req *cqr)
1662 dasd_erp_fn_t erp_fn;
1663 struct dasd_device *device = block->base;
1665 if (cqr->status == DASD_CQR_DONE)
1666 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1668 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1669 erp_fn = device->discipline->erp_postaction(cqr);
1674 * Fetch requests from the block device queue.
1676 static void __dasd_process_request_queue(struct dasd_block *block)
1678 struct request_queue *queue;
1679 struct request *req;
1680 struct dasd_ccw_req *cqr;
1681 struct dasd_device *basedev;
1682 unsigned long flags;
1683 queue = block->request_queue;
1684 basedev = block->base;
1685 /* No queue ? Then there is nothing to do. */
1690 * We requeue request from the block device queue to the ccw
1691 * queue only in two states. In state DASD_STATE_READY the
1692 * partition detection is done and we need to requeue requests
1693 * for that. State DASD_STATE_ONLINE is normal block device
1696 if (basedev->state < DASD_STATE_READY) {
1697 while ((req = blk_fetch_request(block->request_queue)))
1698 __blk_end_request_all(req, -EIO);
1701 /* Now we try to fetch requests from the request queue */
1702 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1703 if (basedev->features & DASD_FEATURE_READONLY &&
1704 rq_data_dir(req) == WRITE) {
1705 DBF_DEV_EVENT(DBF_ERR, basedev,
1706 "Rejecting write request %p",
1708 blk_start_request(req);
1709 __blk_end_request_all(req, -EIO);
1712 cqr = basedev->discipline->build_cp(basedev, block, req);
1714 if (PTR_ERR(cqr) == -EBUSY)
1715 break; /* normal end condition */
1716 if (PTR_ERR(cqr) == -ENOMEM)
1717 break; /* terminate request queue loop */
1718 if (PTR_ERR(cqr) == -EAGAIN) {
1720 * The current request cannot be build right
1721 * now, we have to try later. If this request
1722 * is the head-of-queue we stop the device
1725 if (!list_empty(&block->ccw_queue))
1727 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1728 basedev->stopped |= DASD_STOPPED_PENDING;
1729 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1730 dasd_block_set_timer(block, HZ/2);
1733 DBF_DEV_EVENT(DBF_ERR, basedev,
1734 "CCW creation failed (rc=%ld) "
1737 blk_start_request(req);
1738 __blk_end_request_all(req, -EIO);
1742 * Note: callback is set to dasd_return_cqr_cb in
1743 * __dasd_block_start_head to cover erp requests as well
1745 cqr->callback_data = (void *) req;
1746 cqr->status = DASD_CQR_FILLED;
1747 blk_start_request(req);
1748 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1749 dasd_profile_start(block, cqr, req);
1753 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1755 struct request *req;
1759 req = (struct request *) cqr->callback_data;
1760 dasd_profile_end(cqr->block, cqr, req);
1761 status = cqr->block->base->discipline->free_cp(cqr, req);
1763 error = status ? status : -EIO;
1764 __blk_end_request_all(req, error);
1768 * Process ccw request queue.
1770 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1771 struct list_head *final_queue)
1773 struct list_head *l, *n;
1774 struct dasd_ccw_req *cqr;
1775 dasd_erp_fn_t erp_fn;
1776 unsigned long flags;
1777 struct dasd_device *base = block->base;
1780 /* Process request with final status. */
1781 list_for_each_safe(l, n, &block->ccw_queue) {
1782 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1783 if (cqr->status != DASD_CQR_DONE &&
1784 cqr->status != DASD_CQR_FAILED &&
1785 cqr->status != DASD_CQR_NEED_ERP &&
1786 cqr->status != DASD_CQR_TERMINATED)
1789 if (cqr->status == DASD_CQR_TERMINATED) {
1790 base->discipline->handle_terminated_request(cqr);
1794 /* Process requests that may be recovered */
1795 if (cqr->status == DASD_CQR_NEED_ERP) {
1796 erp_fn = base->discipline->erp_action(cqr);
1801 /* log sense for fatal error */
1802 if (cqr->status == DASD_CQR_FAILED) {
1803 dasd_log_sense(cqr, &cqr->irb);
1806 /* First of all call extended error reporting. */
1807 if (dasd_eer_enabled(base) &&
1808 cqr->status == DASD_CQR_FAILED) {
1809 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1811 /* restart request */
1812 cqr->status = DASD_CQR_FILLED;
1814 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1815 base->stopped |= DASD_STOPPED_QUIESCE;
1816 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1821 /* Process finished ERP request. */
1823 __dasd_block_process_erp(block, cqr);
1827 /* Rechain finished requests to final queue */
1828 cqr->endclk = get_clock();
1829 list_move_tail(&cqr->blocklist, final_queue);
1833 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1835 dasd_schedule_block_bh(cqr->block);
1838 static void __dasd_block_start_head(struct dasd_block *block)
1840 struct dasd_ccw_req *cqr;
1842 if (list_empty(&block->ccw_queue))
1844 /* We allways begin with the first requests on the queue, as some
1845 * of previously started requests have to be enqueued on a
1846 * dasd_device again for error recovery.
1848 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1849 if (cqr->status != DASD_CQR_FILLED)
1851 /* Non-temporary stop condition will trigger fail fast */
1852 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1853 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1854 (!dasd_eer_enabled(block->base))) {
1855 cqr->status = DASD_CQR_FAILED;
1856 dasd_schedule_block_bh(block);
1859 /* Don't try to start requests if device is stopped */
1860 if (block->base->stopped)
1863 /* just a fail safe check, should not happen */
1865 cqr->startdev = block->base;
1867 /* make sure that the requests we submit find their way back */
1868 cqr->callback = dasd_return_cqr_cb;
1870 dasd_add_request_tail(cqr);
1875 * Central dasd_block layer routine. Takes requests from the generic
1876 * block layer request queue, creates ccw requests, enqueues them on
1877 * a dasd_device and processes ccw requests that have been returned.
1879 static void dasd_block_tasklet(struct dasd_block *block)
1881 struct list_head final_queue;
1882 struct list_head *l, *n;
1883 struct dasd_ccw_req *cqr;
1885 atomic_set(&block->tasklet_scheduled, 0);
1886 INIT_LIST_HEAD(&final_queue);
1887 spin_lock(&block->queue_lock);
1888 /* Finish off requests on ccw queue */
1889 __dasd_process_block_ccw_queue(block, &final_queue);
1890 spin_unlock(&block->queue_lock);
1891 /* Now call the callback function of requests with final status */
1892 spin_lock_irq(&block->request_queue_lock);
1893 list_for_each_safe(l, n, &final_queue) {
1894 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1895 list_del_init(&cqr->blocklist);
1896 __dasd_cleanup_cqr(cqr);
1898 spin_lock(&block->queue_lock);
1899 /* Get new request from the block device request queue */
1900 __dasd_process_request_queue(block);
1901 /* Now check if the head of the ccw queue needs to be started. */
1902 __dasd_block_start_head(block);
1903 spin_unlock(&block->queue_lock);
1904 spin_unlock_irq(&block->request_queue_lock);
1905 dasd_put_device(block->base);
1908 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1910 wake_up(&dasd_flush_wq);
1914 * Go through all request on the dasd_block request queue, cancel them
1915 * on the respective dasd_device, and return them to the generic
1918 static int dasd_flush_block_queue(struct dasd_block *block)
1920 struct dasd_ccw_req *cqr, *n;
1922 struct list_head flush_queue;
1924 INIT_LIST_HEAD(&flush_queue);
1925 spin_lock_bh(&block->queue_lock);
1928 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1929 /* if this request currently owned by a dasd_device cancel it */
1930 if (cqr->status >= DASD_CQR_QUEUED)
1931 rc = dasd_cancel_req(cqr);
1934 /* Rechain request (including erp chain) so it won't be
1935 * touched by the dasd_block_tasklet anymore.
1936 * Replace the callback so we notice when the request
1937 * is returned from the dasd_device layer.
1939 cqr->callback = _dasd_wake_block_flush_cb;
1940 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1941 list_move_tail(&cqr->blocklist, &flush_queue);
1943 /* moved more than one request - need to restart */
1946 spin_unlock_bh(&block->queue_lock);
1947 /* Now call the callback function of flushed requests */
1949 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1950 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1951 /* Process finished ERP request. */
1953 spin_lock_bh(&block->queue_lock);
1954 __dasd_block_process_erp(block, cqr);
1955 spin_unlock_bh(&block->queue_lock);
1956 /* restart list_for_xx loop since dasd_process_erp
1957 * might remove multiple elements */
1960 /* call the callback function */
1961 spin_lock_irq(&block->request_queue_lock);
1962 cqr->endclk = get_clock();
1963 list_del_init(&cqr->blocklist);
1964 __dasd_cleanup_cqr(cqr);
1965 spin_unlock_irq(&block->request_queue_lock);
1971 * Schedules a call to dasd_tasklet over the device tasklet.
1973 void dasd_schedule_block_bh(struct dasd_block *block)
1975 /* Protect against rescheduling. */
1976 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1978 /* life cycle of block is bound to it's base device */
1979 dasd_get_device(block->base);
1980 tasklet_hi_schedule(&block->tasklet);
1985 * SECTION: external block device operations
1986 * (request queue handling, open, release, etc.)
1990 * Dasd request queue function. Called from ll_rw_blk.c
1992 static void do_dasd_request(struct request_queue *queue)
1994 struct dasd_block *block;
1996 block = queue->queuedata;
1997 spin_lock(&block->queue_lock);
1998 /* Get new request from the block device request queue */
1999 __dasd_process_request_queue(block);
2000 /* Now check if the head of the ccw queue needs to be started. */
2001 __dasd_block_start_head(block);
2002 spin_unlock(&block->queue_lock);
2006 * Allocate and initialize request queue and default I/O scheduler.
2008 static int dasd_alloc_queue(struct dasd_block *block)
2012 block->request_queue = blk_init_queue(do_dasd_request,
2013 &block->request_queue_lock);
2014 if (block->request_queue == NULL)
2017 block->request_queue->queuedata = block;
2019 elevator_exit(block->request_queue->elevator);
2020 block->request_queue->elevator = NULL;
2021 rc = elevator_init(block->request_queue, "deadline");
2023 blk_cleanup_queue(block->request_queue);
2030 * Allocate and initialize request queue.
2032 static void dasd_setup_queue(struct dasd_block *block)
2036 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2037 max = block->base->discipline->max_blocks << block->s2b_shift;
2038 blk_queue_max_sectors(block->request_queue, max);
2039 blk_queue_max_phys_segments(block->request_queue, -1L);
2040 blk_queue_max_hw_segments(block->request_queue, -1L);
2041 /* with page sized segments we can translate each segement into
2044 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2045 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2046 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
2050 * Deactivate and free request queue.
2052 static void dasd_free_queue(struct dasd_block *block)
2054 if (block->request_queue) {
2055 blk_cleanup_queue(block->request_queue);
2056 block->request_queue = NULL;
2061 * Flush request on the request queue.
2063 static void dasd_flush_request_queue(struct dasd_block *block)
2065 struct request *req;
2067 if (!block->request_queue)
2070 spin_lock_irq(&block->request_queue_lock);
2071 while ((req = blk_fetch_request(block->request_queue)))
2072 __blk_end_request_all(req, -EIO);
2073 spin_unlock_irq(&block->request_queue_lock);
2076 static int dasd_open(struct block_device *bdev, fmode_t mode)
2078 struct dasd_block *block = bdev->bd_disk->private_data;
2079 struct dasd_device *base = block->base;
2082 atomic_inc(&block->open_count);
2083 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2088 if (!try_module_get(base->discipline->owner)) {
2093 if (dasd_probeonly) {
2094 dev_info(&base->cdev->dev,
2095 "Accessing the DASD failed because it is in "
2096 "probeonly mode\n");
2101 if (base->state <= DASD_STATE_BASIC) {
2102 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2103 " Cannot open unrecognized device");
2111 module_put(base->discipline->owner);
2113 atomic_dec(&block->open_count);
2117 static int dasd_release(struct gendisk *disk, fmode_t mode)
2119 struct dasd_block *block = disk->private_data;
2121 atomic_dec(&block->open_count);
2122 module_put(block->base->discipline->owner);
2127 * Return disk geometry.
2129 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2131 struct dasd_block *block;
2132 struct dasd_device *base;
2134 block = bdev->bd_disk->private_data;
2139 if (!base->discipline ||
2140 !base->discipline->fill_geometry)
2143 base->discipline->fill_geometry(block, geo);
2144 geo->start = get_start_sect(bdev) >> block->s2b_shift;
2148 const struct block_device_operations
2149 dasd_device_operations = {
2150 .owner = THIS_MODULE,
2152 .release = dasd_release,
2153 .ioctl = dasd_ioctl,
2154 .compat_ioctl = dasd_ioctl,
2155 .getgeo = dasd_getgeo,
2158 /*******************************************************************************
2159 * end of block device operations
2165 #ifdef CONFIG_PROC_FS
2169 if (dasd_page_cache != NULL) {
2170 kmem_cache_destroy(dasd_page_cache);
2171 dasd_page_cache = NULL;
2173 dasd_gendisk_exit();
2175 if (dasd_debug_area != NULL) {
2176 debug_unregister(dasd_debug_area);
2177 dasd_debug_area = NULL;
2182 * SECTION: common functions for ccw_driver use
2185 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2187 struct ccw_device *cdev = data;
2190 ret = ccw_device_set_online(cdev);
2192 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2193 dev_name(&cdev->dev), ret);
2195 struct dasd_device *device = dasd_device_from_cdev(cdev);
2196 wait_event(dasd_init_waitq, _wait_for_device(device));
2197 dasd_put_device(device);
2202 * Initial attempt at a probe function. this can be simplified once
2203 * the other detection code is gone.
2205 int dasd_generic_probe(struct ccw_device *cdev,
2206 struct dasd_discipline *discipline)
2210 ret = dasd_add_sysfs_files(cdev);
2212 DBF_EVENT(DBF_WARNING,
2213 "dasd_generic_probe: could not add sysfs entries "
2214 "for %s\n", dev_name(&cdev->dev));
2217 cdev->handler = &dasd_int_handler;
2220 * Automatically online either all dasd devices (dasd_autodetect)
2221 * or all devices specified with dasd= parameters during
2224 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2225 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2226 async_schedule(dasd_generic_auto_online, cdev);
2231 * This will one day be called from a global not_oper handler.
2232 * It is also used by driver_unregister during module unload.
2234 void dasd_generic_remove(struct ccw_device *cdev)
2236 struct dasd_device *device;
2237 struct dasd_block *block;
2239 cdev->handler = NULL;
2241 dasd_remove_sysfs_files(cdev);
2242 device = dasd_device_from_cdev(cdev);
2245 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2246 /* Already doing offline processing */
2247 dasd_put_device(device);
2251 * This device is removed unconditionally. Set offline
2252 * flag to prevent dasd_open from opening it while it is
2253 * no quite down yet.
2255 dasd_set_target_state(device, DASD_STATE_NEW);
2256 /* dasd_delete_device destroys the device reference. */
2257 block = device->block;
2258 device->block = NULL;
2259 dasd_delete_device(device);
2261 * life cycle of block is bound to device, so delete it after
2262 * device was safely removed
2265 dasd_free_block(block);
2269 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2270 * the device is detected for the first time and is supposed to be used
2271 * or the user has started activation through sysfs.
2273 int dasd_generic_set_online(struct ccw_device *cdev,
2274 struct dasd_discipline *base_discipline)
2276 struct dasd_discipline *discipline;
2277 struct dasd_device *device;
2280 /* first online clears initial online feature flag */
2281 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2282 device = dasd_create_device(cdev);
2284 return PTR_ERR(device);
2286 discipline = base_discipline;
2287 if (device->features & DASD_FEATURE_USEDIAG) {
2288 if (!dasd_diag_discipline_pointer) {
2289 pr_warning("%s Setting the DASD online failed because "
2290 "of missing DIAG discipline\n",
2291 dev_name(&cdev->dev));
2292 dasd_delete_device(device);
2295 discipline = dasd_diag_discipline_pointer;
2297 if (!try_module_get(base_discipline->owner)) {
2298 dasd_delete_device(device);
2301 if (!try_module_get(discipline->owner)) {
2302 module_put(base_discipline->owner);
2303 dasd_delete_device(device);
2306 device->base_discipline = base_discipline;
2307 device->discipline = discipline;
2309 /* check_device will allocate block device if necessary */
2310 rc = discipline->check_device(device);
2312 pr_warning("%s Setting the DASD online with discipline %s "
2313 "failed with rc=%i\n",
2314 dev_name(&cdev->dev), discipline->name, rc);
2315 module_put(discipline->owner);
2316 module_put(base_discipline->owner);
2317 dasd_delete_device(device);
2321 dasd_set_target_state(device, DASD_STATE_ONLINE);
2322 if (device->state <= DASD_STATE_KNOWN) {
2323 pr_warning("%s Setting the DASD online failed because of a "
2324 "missing discipline\n", dev_name(&cdev->dev));
2326 dasd_set_target_state(device, DASD_STATE_NEW);
2328 dasd_free_block(device->block);
2329 dasd_delete_device(device);
2331 pr_debug("dasd_generic device %s found\n",
2332 dev_name(&cdev->dev));
2333 dasd_put_device(device);
2337 int dasd_generic_set_offline(struct ccw_device *cdev)
2339 struct dasd_device *device;
2340 struct dasd_block *block;
2341 int max_count, open_count;
2343 device = dasd_device_from_cdev(cdev);
2345 return PTR_ERR(device);
2346 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2347 /* Already doing offline processing */
2348 dasd_put_device(device);
2352 * We must make sure that this device is currently not in use.
2353 * The open_count is increased for every opener, that includes
2354 * the blkdev_get in dasd_scan_partitions. We are only interested
2355 * in the other openers.
2357 if (device->block) {
2358 max_count = device->block->bdev ? 0 : -1;
2359 open_count = atomic_read(&device->block->open_count);
2360 if (open_count > max_count) {
2362 pr_warning("%s: The DASD cannot be set offline "
2363 "with open count %i\n",
2364 dev_name(&cdev->dev), open_count);
2366 pr_warning("%s: The DASD cannot be set offline "
2367 "while it is in use\n",
2368 dev_name(&cdev->dev));
2369 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2370 dasd_put_device(device);
2374 dasd_set_target_state(device, DASD_STATE_NEW);
2375 /* dasd_delete_device destroys the device reference. */
2376 block = device->block;
2377 device->block = NULL;
2378 dasd_delete_device(device);
2380 * life cycle of block is bound to device, so delete it after
2381 * device was safely removed
2384 dasd_free_block(block);
2388 int dasd_generic_notify(struct ccw_device *cdev, int event)
2390 struct dasd_device *device;
2391 struct dasd_ccw_req *cqr;
2394 device = dasd_device_from_cdev_locked(cdev);
2402 /* First of all call extended error reporting. */
2403 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2405 if (device->state < DASD_STATE_BASIC)
2407 /* Device is active. We want to keep it. */
2408 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2409 if (cqr->status == DASD_CQR_IN_IO) {
2410 cqr->status = DASD_CQR_QUEUED;
2413 device->stopped |= DASD_STOPPED_DC_WAIT;
2414 dasd_device_clear_timer(device);
2415 dasd_schedule_device_bh(device);
2419 /* FIXME: add a sanity check. */
2420 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2421 if (device->stopped & DASD_UNRESUMED_PM) {
2422 device->stopped &= ~DASD_UNRESUMED_PM;
2423 dasd_restore_device(device);
2427 dasd_schedule_device_bh(device);
2429 dasd_schedule_block_bh(device->block);
2433 dasd_put_device(device);
2437 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2439 struct dasd_ccw_req *cqr, *n;
2441 struct list_head freeze_queue;
2442 struct dasd_device *device = dasd_device_from_cdev(cdev);
2445 return PTR_ERR(device);
2446 /* disallow new I/O */
2447 device->stopped |= DASD_STOPPED_PM;
2448 /* clear active requests */
2449 INIT_LIST_HEAD(&freeze_queue);
2450 spin_lock_irq(get_ccwdev_lock(cdev));
2452 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2453 /* Check status and move request to flush_queue */
2454 if (cqr->status == DASD_CQR_IN_IO) {
2455 rc = device->discipline->term_IO(cqr);
2457 /* unable to terminate requeust */
2458 dev_err(&device->cdev->dev,
2459 "Unable to terminate request %p "
2460 "on suspend\n", cqr);
2461 spin_unlock_irq(get_ccwdev_lock(cdev));
2462 dasd_put_device(device);
2466 list_move_tail(&cqr->devlist, &freeze_queue);
2469 spin_unlock_irq(get_ccwdev_lock(cdev));
2471 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2472 wait_event(dasd_flush_wq,
2473 (cqr->status != DASD_CQR_CLEAR_PENDING));
2474 if (cqr->status == DASD_CQR_CLEARED)
2475 cqr->status = DASD_CQR_QUEUED;
2477 /* move freeze_queue to start of the ccw_queue */
2478 spin_lock_irq(get_ccwdev_lock(cdev));
2479 list_splice_tail(&freeze_queue, &device->ccw_queue);
2480 spin_unlock_irq(get_ccwdev_lock(cdev));
2482 if (device->discipline->freeze)
2483 rc = device->discipline->freeze(device);
2485 dasd_put_device(device);
2488 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2490 int dasd_generic_restore_device(struct ccw_device *cdev)
2492 struct dasd_device *device = dasd_device_from_cdev(cdev);
2496 return PTR_ERR(device);
2498 /* allow new IO again */
2499 device->stopped &= ~DASD_STOPPED_PM;
2500 device->stopped &= ~DASD_UNRESUMED_PM;
2502 dasd_schedule_device_bh(device);
2504 if (device->discipline->restore)
2505 rc = device->discipline->restore(device);
2508 * if the resume failed for the DASD we put it in
2509 * an UNRESUMED stop state
2511 device->stopped |= DASD_UNRESUMED_PM;
2514 dasd_schedule_block_bh(device->block);
2516 dasd_put_device(device);
2519 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2521 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2523 int rdc_buffer_size,
2526 struct dasd_ccw_req *cqr;
2528 unsigned long *idaw;
2530 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2533 /* internal error 13 - Allocating the RDC request failed*/
2534 dev_err(&device->cdev->dev,
2535 "An error occurred in the DASD device driver, "
2536 "reason=%s\n", "13");
2541 ccw->cmd_code = CCW_CMD_RDC;
2542 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2543 idaw = (unsigned long *) (cqr->data);
2544 ccw->cda = (__u32)(addr_t) idaw;
2545 ccw->flags = CCW_FLAG_IDA;
2546 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2548 ccw->cda = (__u32)(addr_t) rdc_buffer;
2552 ccw->count = rdc_buffer_size;
2553 cqr->startdev = device;
2554 cqr->memdev = device;
2555 cqr->expires = 10*HZ;
2556 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2558 cqr->buildclk = get_clock();
2559 cqr->status = DASD_CQR_FILLED;
2564 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2565 void *rdc_buffer, int rdc_buffer_size)
2568 struct dasd_ccw_req *cqr;
2570 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2573 return PTR_ERR(cqr);
2575 ret = dasd_sleep_on(cqr);
2576 dasd_sfree_request(cqr, cqr->memdev);
2579 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2582 * In command mode and transport mode we need to look for sense
2583 * data in different places. The sense data itself is allways
2584 * an array of 32 bytes, so we can unify the sense data access
2587 char *dasd_get_sense(struct irb *irb)
2589 struct tsb *tsb = NULL;
2592 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2593 if (irb->scsw.tm.tcw)
2594 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2596 if (tsb && tsb->length == 64 && tsb->flags)
2597 switch (tsb->flags & 0x07) {
2598 case 1: /* tsa_iostat */
2599 sense = tsb->tsa.iostat.sense;
2601 case 2: /* tsa_ddpc */
2602 sense = tsb->tsa.ddpc.sense;
2605 /* currently we don't use interrogate data */
2608 } else if (irb->esw.esw0.erw.cons) {
2613 EXPORT_SYMBOL_GPL(dasd_get_sense);
2615 static int __init dasd_init(void)
2619 init_waitqueue_head(&dasd_init_waitq);
2620 init_waitqueue_head(&dasd_flush_wq);
2621 init_waitqueue_head(&generic_waitq);
2623 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2624 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2625 if (dasd_debug_area == NULL) {
2629 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2630 debug_set_level(dasd_debug_area, DBF_WARNING);
2632 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2634 dasd_diag_discipline_pointer = NULL;
2636 rc = dasd_devmap_init();
2639 rc = dasd_gendisk_init();
2645 rc = dasd_eer_init();
2648 #ifdef CONFIG_PROC_FS
2649 rc = dasd_proc_init();
2656 pr_info("The DASD device driver could not be initialized\n");
2661 module_init(dasd_init);
2662 module_exit(dasd_exit);
2664 EXPORT_SYMBOL(dasd_debug_area);
2665 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2667 EXPORT_SYMBOL(dasd_add_request_head);
2668 EXPORT_SYMBOL(dasd_add_request_tail);
2669 EXPORT_SYMBOL(dasd_cancel_req);
2670 EXPORT_SYMBOL(dasd_device_clear_timer);
2671 EXPORT_SYMBOL(dasd_block_clear_timer);
2672 EXPORT_SYMBOL(dasd_enable_device);
2673 EXPORT_SYMBOL(dasd_int_handler);
2674 EXPORT_SYMBOL(dasd_kfree_request);
2675 EXPORT_SYMBOL(dasd_kick_device);
2676 EXPORT_SYMBOL(dasd_kmalloc_request);
2677 EXPORT_SYMBOL(dasd_schedule_device_bh);
2678 EXPORT_SYMBOL(dasd_schedule_block_bh);
2679 EXPORT_SYMBOL(dasd_set_target_state);
2680 EXPORT_SYMBOL(dasd_device_set_timer);
2681 EXPORT_SYMBOL(dasd_block_set_timer);
2682 EXPORT_SYMBOL(dasd_sfree_request);
2683 EXPORT_SYMBOL(dasd_sleep_on);
2684 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2685 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2686 EXPORT_SYMBOL(dasd_smalloc_request);
2687 EXPORT_SYMBOL(dasd_start_IO);
2688 EXPORT_SYMBOL(dasd_term_IO);
2690 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2691 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2692 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2693 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2694 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2695 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2696 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2697 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2698 EXPORT_SYMBOL_GPL(dasd_free_block);