[go: nahoru, domu]

device.c revision 60e4dac1abdf49ccdb7545ec406325f08423d848
1/*
2 *  drivers/s390/cio/device.c
3 *  bus driver for ccw devices
4 *
5 *    Copyright IBM Corp. 2002,2008
6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/spinlock.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <linux/device.h>
18#include <linux/workqueue.h>
19#include <linux/timer.h>
20
21#include <asm/ccwdev.h>
22#include <asm/cio.h>
23#include <asm/param.h>		/* HZ */
24#include <asm/cmb.h>
25#include <asm/isc.h>
26
27#include "chp.h"
28#include "cio.h"
29#include "cio_debug.h"
30#include "css.h"
31#include "device.h"
32#include "ioasm.h"
33#include "io_sch.h"
34#include "blacklist.h"
35
36static struct timer_list recovery_timer;
37static DEFINE_SPINLOCK(recovery_lock);
38static int recovery_phase;
39static const unsigned long recovery_delay[] = { 3, 30, 300 };
40
41/******************* bus type handling ***********************/
42
43/* The Linux driver model distinguishes between a bus type and
44 * the bus itself. Of course we only have one channel
45 * subsystem driver and one channel system per machine, but
46 * we still use the abstraction. T.R. says it's a good idea. */
47static int
48ccw_bus_match (struct device * dev, struct device_driver * drv)
49{
50	struct ccw_device *cdev = to_ccwdev(dev);
51	struct ccw_driver *cdrv = to_ccwdrv(drv);
52	const struct ccw_device_id *ids = cdrv->ids, *found;
53
54	if (!ids)
55		return 0;
56
57	found = ccw_device_id_match(ids, &cdev->id);
58	if (!found)
59		return 0;
60
61	cdev->id.driver_info = found->driver_info;
62
63	return 1;
64}
65
66/* Store modalias string delimited by prefix/suffix string into buffer with
67 * specified size. Return length of resulting string (excluding trailing '\0')
68 * even if string doesn't fit buffer (snprintf semantics). */
69static int snprint_alias(char *buf, size_t size,
70			 struct ccw_device_id *id, const char *suffix)
71{
72	int len;
73
74	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
75	if (len > size)
76		return len;
77	buf += len;
78	size -= len;
79
80	if (id->dev_type != 0)
81		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
82				id->dev_model, suffix);
83	else
84		len += snprintf(buf, size, "dtdm%s", suffix);
85
86	return len;
87}
88
89/* Set up environment variables for ccw device uevent. Return 0 on success,
90 * non-zero otherwise. */
91static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
92{
93	struct ccw_device *cdev = to_ccwdev(dev);
94	struct ccw_device_id *id = &(cdev->id);
95	int ret;
96	char modalias_buf[30];
97
98	/* CU_TYPE= */
99	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
100	if (ret)
101		return ret;
102
103	/* CU_MODEL= */
104	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
105	if (ret)
106		return ret;
107
108	/* The next two can be zero, that's ok for us */
109	/* DEV_TYPE= */
110	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
111	if (ret)
112		return ret;
113
114	/* DEV_MODEL= */
115	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
116	if (ret)
117		return ret;
118
119	/* MODALIAS=  */
120	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
121	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
122	return ret;
123}
124
125struct bus_type ccw_bus_type;
126
127static void io_subchannel_irq(struct subchannel *);
128static int io_subchannel_probe(struct subchannel *);
129static int io_subchannel_remove(struct subchannel *);
130static void io_subchannel_shutdown(struct subchannel *);
131static int io_subchannel_sch_event(struct subchannel *, int);
132static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
133				   int);
134static void recovery_func(unsigned long data);
135struct workqueue_struct *ccw_device_work;
136wait_queue_head_t ccw_device_init_wq;
137atomic_t ccw_device_init_count;
138
139static struct css_device_id io_subchannel_ids[] = {
140	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
141	{ /* end of list */ },
142};
143MODULE_DEVICE_TABLE(css, io_subchannel_ids);
144
145static int io_subchannel_prepare(struct subchannel *sch)
146{
147	struct ccw_device *cdev;
148	/*
149	 * Don't allow suspend while a ccw device registration
150	 * is still outstanding.
151	 */
152	cdev = sch_get_cdev(sch);
153	if (cdev && !device_is_registered(&cdev->dev))
154		return -EAGAIN;
155	return 0;
156}
157
158static void io_subchannel_settle(void)
159{
160	wait_event(ccw_device_init_wq,
161		   atomic_read(&ccw_device_init_count) == 0);
162	flush_workqueue(ccw_device_work);
163}
164
165static struct css_driver io_subchannel_driver = {
166	.owner = THIS_MODULE,
167	.subchannel_type = io_subchannel_ids,
168	.name = "io_subchannel",
169	.irq = io_subchannel_irq,
170	.sch_event = io_subchannel_sch_event,
171	.chp_event = io_subchannel_chp_event,
172	.probe = io_subchannel_probe,
173	.remove = io_subchannel_remove,
174	.shutdown = io_subchannel_shutdown,
175	.prepare = io_subchannel_prepare,
176	.settle = io_subchannel_settle,
177};
178
179int __init io_subchannel_init(void)
180{
181	int ret;
182
183	init_waitqueue_head(&ccw_device_init_wq);
184	atomic_set(&ccw_device_init_count, 0);
185	setup_timer(&recovery_timer, recovery_func, 0);
186
187	ccw_device_work = create_singlethread_workqueue("cio");
188	if (!ccw_device_work)
189		return -ENOMEM;
190	slow_path_wq = create_singlethread_workqueue("kslowcrw");
191	if (!slow_path_wq) {
192		ret = -ENOMEM;
193		goto out_err;
194	}
195	if ((ret = bus_register (&ccw_bus_type)))
196		goto out_err;
197
198	ret = css_driver_register(&io_subchannel_driver);
199	if (ret)
200		goto out_err;
201
202	return 0;
203out_err:
204	if (ccw_device_work)
205		destroy_workqueue(ccw_device_work);
206	if (slow_path_wq)
207		destroy_workqueue(slow_path_wq);
208	return ret;
209}
210
211
212/************************ device handling **************************/
213
214/*
215 * A ccw_device has some interfaces in sysfs in addition to the
216 * standard ones.
217 * The following entries are designed to export the information which
218 * resided in 2.4 in /proc/subchannels. Subchannel and device number
219 * are obvious, so they don't have an entry :)
220 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
221 */
222static ssize_t
223chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
224{
225	struct subchannel *sch = to_subchannel(dev);
226	struct chsc_ssd_info *ssd = &sch->ssd_info;
227	ssize_t ret = 0;
228	int chp;
229	int mask;
230
231	for (chp = 0; chp < 8; chp++) {
232		mask = 0x80 >> chp;
233		if (ssd->path_mask & mask)
234			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
235		else
236			ret += sprintf(buf + ret, "00 ");
237	}
238	ret += sprintf (buf+ret, "\n");
239	return min((ssize_t)PAGE_SIZE, ret);
240}
241
242static ssize_t
243pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
244{
245	struct subchannel *sch = to_subchannel(dev);
246	struct pmcw *pmcw = &sch->schib.pmcw;
247
248	return sprintf (buf, "%02x %02x %02x\n",
249			pmcw->pim, pmcw->pam, pmcw->pom);
250}
251
252static ssize_t
253devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
254{
255	struct ccw_device *cdev = to_ccwdev(dev);
256	struct ccw_device_id *id = &(cdev->id);
257
258	if (id->dev_type != 0)
259		return sprintf(buf, "%04x/%02x\n",
260				id->dev_type, id->dev_model);
261	else
262		return sprintf(buf, "n/a\n");
263}
264
265static ssize_t
266cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
267{
268	struct ccw_device *cdev = to_ccwdev(dev);
269	struct ccw_device_id *id = &(cdev->id);
270
271	return sprintf(buf, "%04x/%02x\n",
272		       id->cu_type, id->cu_model);
273}
274
275static ssize_t
276modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
277{
278	struct ccw_device *cdev = to_ccwdev(dev);
279	struct ccw_device_id *id = &(cdev->id);
280	int len;
281
282	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
283
284	return len > PAGE_SIZE ? PAGE_SIZE : len;
285}
286
287static ssize_t
288online_show (struct device *dev, struct device_attribute *attr, char *buf)
289{
290	struct ccw_device *cdev = to_ccwdev(dev);
291
292	return sprintf(buf, cdev->online ? "1\n" : "0\n");
293}
294
295int ccw_device_is_orphan(struct ccw_device *cdev)
296{
297	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
298}
299
300static void ccw_device_unregister(struct ccw_device *cdev)
301{
302	if (test_and_clear_bit(1, &cdev->private->registered)) {
303		device_del(&cdev->dev);
304		/* Release reference from device_initialize(). */
305		put_device(&cdev->dev);
306	}
307}
308
309static void ccw_device_remove_orphan_cb(struct work_struct *work)
310{
311	struct ccw_device_private *priv;
312	struct ccw_device *cdev;
313
314	priv = container_of(work, struct ccw_device_private, kick_work);
315	cdev = priv->cdev;
316	ccw_device_unregister(cdev);
317	/* Release cdev reference for workqueue processing. */
318	put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324	unsigned long flags;
325
326	/*
327	 * Forced offline in disconnected state means
328	 * 'throw away device'.
329	 */
330	if (ccw_device_is_orphan(cdev)) {
331		/*
332		 * Deregister ccw device.
333		 * Unfortunately, we cannot do this directly from the
334		 * attribute method.
335		 */
336		/* Get cdev reference for workqueue processing. */
337		if (!get_device(&cdev->dev))
338			return;
339		spin_lock_irqsave(cdev->ccwlock, flags);
340		cdev->private->state = DEV_STATE_NOT_OPER;
341		spin_unlock_irqrestore(cdev->ccwlock, flags);
342		PREPARE_WORK(&cdev->private->kick_work,
343				ccw_device_remove_orphan_cb);
344		queue_work(slow_path_wq, &cdev->private->kick_work);
345	} else
346		/* Deregister subchannel, which will kill the ccw device. */
347		ccw_device_schedule_sch_unregister(cdev);
348}
349
350/**
351 * ccw_device_set_offline() - disable a ccw device for I/O
352 * @cdev: target ccw device
353 *
354 * This function calls the driver's set_offline() function for @cdev, if
355 * given, and then disables @cdev.
356 * Returns:
357 *   %0 on success and a negative error value on failure.
358 * Context:
359 *  enabled, ccw device lock not held
360 */
361int ccw_device_set_offline(struct ccw_device *cdev)
362{
363	int ret;
364
365	if (!cdev)
366		return -ENODEV;
367	if (!cdev->online || !cdev->drv)
368		return -EINVAL;
369
370	if (cdev->drv->set_offline) {
371		ret = cdev->drv->set_offline(cdev);
372		if (ret != 0)
373			return ret;
374	}
375	cdev->online = 0;
376	spin_lock_irq(cdev->ccwlock);
377	/* Wait until a final state or DISCONNECTED is reached */
378	while (!dev_fsm_final_state(cdev) &&
379	       cdev->private->state != DEV_STATE_DISCONNECTED) {
380		spin_unlock_irq(cdev->ccwlock);
381		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
382			   cdev->private->state == DEV_STATE_DISCONNECTED));
383		spin_lock_irq(cdev->ccwlock);
384	}
385	ret = ccw_device_offline(cdev);
386	if (ret)
387		goto error;
388	spin_unlock_irq(cdev->ccwlock);
389	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390		   cdev->private->state == DEV_STATE_DISCONNECTED));
391	/* Give up reference from ccw_device_set_online(). */
392	put_device(&cdev->dev);
393	return 0;
394
395error:
396	CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397		      ret, cdev->private->dev_id.ssid,
398		      cdev->private->dev_id.devno);
399	cdev->private->state = DEV_STATE_OFFLINE;
400	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401	spin_unlock_irq(cdev->ccwlock);
402	/* Give up reference from ccw_device_set_online(). */
403	put_device(&cdev->dev);
404	return -ENODEV;
405}
406
407/**
408 * ccw_device_set_online() - enable a ccw device for I/O
409 * @cdev: target ccw device
410 *
411 * This function first enables @cdev and then calls the driver's set_online()
412 * function for @cdev, if given. If set_online() returns an error, @cdev is
413 * disabled again.
414 * Returns:
415 *   %0 on success and a negative error value on failure.
416 * Context:
417 *  enabled, ccw device lock not held
418 */
419int ccw_device_set_online(struct ccw_device *cdev)
420{
421	int ret;
422	int ret2;
423
424	if (!cdev)
425		return -ENODEV;
426	if (cdev->online || !cdev->drv)
427		return -EINVAL;
428	/* Hold on to an extra reference while device is online. */
429	if (!get_device(&cdev->dev))
430		return -ENODEV;
431
432	spin_lock_irq(cdev->ccwlock);
433	ret = ccw_device_online(cdev);
434	spin_unlock_irq(cdev->ccwlock);
435	if (ret == 0)
436		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
437	else {
438		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
439			      "device 0.%x.%04x\n",
440			      ret, cdev->private->dev_id.ssid,
441			      cdev->private->dev_id.devno);
442		/* Give up online reference since onlining failed. */
443		put_device(&cdev->dev);
444		return ret;
445	}
446	spin_lock_irq(cdev->ccwlock);
447	/* Check if online processing was successful */
448	if ((cdev->private->state != DEV_STATE_ONLINE) &&
449	    (cdev->private->state != DEV_STATE_W4SENSE)) {
450		spin_unlock_irq(cdev->ccwlock);
451		/* Give up online reference since onlining failed. */
452		put_device(&cdev->dev);
453		return -ENODEV;
454	}
455	spin_unlock_irq(cdev->ccwlock);
456	if (cdev->drv->set_online)
457		ret = cdev->drv->set_online(cdev);
458	if (ret)
459		goto rollback;
460	cdev->online = 1;
461	return 0;
462
463rollback:
464	spin_lock_irq(cdev->ccwlock);
465	/* Wait until a final state or DISCONNECTED is reached */
466	while (!dev_fsm_final_state(cdev) &&
467	       cdev->private->state != DEV_STATE_DISCONNECTED) {
468		spin_unlock_irq(cdev->ccwlock);
469		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
470			   cdev->private->state == DEV_STATE_DISCONNECTED));
471		spin_lock_irq(cdev->ccwlock);
472	}
473	ret2 = ccw_device_offline(cdev);
474	if (ret2)
475		goto error;
476	spin_unlock_irq(cdev->ccwlock);
477	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
478		   cdev->private->state == DEV_STATE_DISCONNECTED));
479	/* Give up online reference since onlining failed. */
480	put_device(&cdev->dev);
481	return ret;
482
483error:
484	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
485		      "device 0.%x.%04x\n",
486		      ret2, cdev->private->dev_id.ssid,
487		      cdev->private->dev_id.devno);
488	cdev->private->state = DEV_STATE_OFFLINE;
489	spin_unlock_irq(cdev->ccwlock);
490	/* Give up online reference since onlining failed. */
491	put_device(&cdev->dev);
492	return ret;
493}
494
495static int online_store_handle_offline(struct ccw_device *cdev)
496{
497	if (cdev->private->state == DEV_STATE_DISCONNECTED)
498		ccw_device_remove_disconnected(cdev);
499	else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500		return ccw_device_set_offline(cdev);
501	return 0;
502}
503
504static int online_store_recog_and_online(struct ccw_device *cdev)
505{
506	int ret;
507
508	/* Do device recognition, if needed. */
509	if (cdev->private->state == DEV_STATE_BOXED) {
510		ret = ccw_device_recognition(cdev);
511		if (ret) {
512			CIO_MSG_EVENT(0, "Couldn't start recognition "
513				      "for device 0.%x.%04x (ret=%d)\n",
514				      cdev->private->dev_id.ssid,
515				      cdev->private->dev_id.devno, ret);
516			return ret;
517		}
518		wait_event(cdev->private->wait_q,
519			   cdev->private->flags.recog_done);
520		if (cdev->private->state != DEV_STATE_OFFLINE)
521			/* recognition failed */
522			return -EAGAIN;
523	}
524	if (cdev->drv && cdev->drv->set_online)
525		ccw_device_set_online(cdev);
526	return 0;
527}
528
529static int online_store_handle_online(struct ccw_device *cdev, int force)
530{
531	int ret;
532
533	ret = online_store_recog_and_online(cdev);
534	if (ret && !force)
535		return ret;
536	if (force && cdev->private->state == DEV_STATE_BOXED) {
537		ret = ccw_device_stlck(cdev);
538		if (ret)
539			return ret;
540		if (cdev->id.cu_type == 0)
541			cdev->private->state = DEV_STATE_NOT_OPER;
542		ret = online_store_recog_and_online(cdev);
543		if (ret)
544			return ret;
545	}
546	return 0;
547}
548
549static ssize_t online_store (struct device *dev, struct device_attribute *attr,
550			     const char *buf, size_t count)
551{
552	struct ccw_device *cdev = to_ccwdev(dev);
553	int force, ret;
554	unsigned long i;
555
556	if ((cdev->private->state != DEV_STATE_OFFLINE &&
557	     cdev->private->state != DEV_STATE_ONLINE &&
558	     cdev->private->state != DEV_STATE_BOXED &&
559	     cdev->private->state != DEV_STATE_DISCONNECTED) ||
560	    atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561		return -EAGAIN;
562
563	if (cdev->drv && !try_module_get(cdev->drv->owner)) {
564		atomic_set(&cdev->private->onoff, 0);
565		return -EINVAL;
566	}
567	if (!strncmp(buf, "force\n", count)) {
568		force = 1;
569		i = 1;
570		ret = 0;
571	} else {
572		force = 0;
573		ret = strict_strtoul(buf, 16, &i);
574	}
575	if (ret)
576		goto out;
577	switch (i) {
578	case 0:
579		ret = online_store_handle_offline(cdev);
580		break;
581	case 1:
582		ret = online_store_handle_online(cdev, force);
583		break;
584	default:
585		ret = -EINVAL;
586	}
587out:
588	if (cdev->drv)
589		module_put(cdev->drv->owner);
590	atomic_set(&cdev->private->onoff, 0);
591	return (ret < 0) ? ret : count;
592}
593
594static ssize_t
595available_show (struct device *dev, struct device_attribute *attr, char *buf)
596{
597	struct ccw_device *cdev = to_ccwdev(dev);
598	struct subchannel *sch;
599
600	if (ccw_device_is_orphan(cdev))
601		return sprintf(buf, "no device\n");
602	switch (cdev->private->state) {
603	case DEV_STATE_BOXED:
604		return sprintf(buf, "boxed\n");
605	case DEV_STATE_DISCONNECTED:
606	case DEV_STATE_DISCONNECTED_SENSE_ID:
607	case DEV_STATE_NOT_OPER:
608		sch = to_subchannel(dev->parent);
609		if (!sch->lpm)
610			return sprintf(buf, "no path\n");
611		else
612			return sprintf(buf, "no device\n");
613	default:
614		/* All other states considered fine. */
615		return sprintf(buf, "good\n");
616	}
617}
618
619static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
620static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
621static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
622static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
623static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
624static DEVICE_ATTR(online, 0644, online_show, online_store);
625static DEVICE_ATTR(availability, 0444, available_show, NULL);
626
627static struct attribute *io_subchannel_attrs[] = {
628	&dev_attr_chpids.attr,
629	&dev_attr_pimpampom.attr,
630	NULL,
631};
632
633static struct attribute_group io_subchannel_attr_group = {
634	.attrs = io_subchannel_attrs,
635};
636
637static struct attribute * ccwdev_attrs[] = {
638	&dev_attr_devtype.attr,
639	&dev_attr_cutype.attr,
640	&dev_attr_modalias.attr,
641	&dev_attr_online.attr,
642	&dev_attr_cmb_enable.attr,
643	&dev_attr_availability.attr,
644	NULL,
645};
646
647static struct attribute_group ccwdev_attr_group = {
648	.attrs = ccwdev_attrs,
649};
650
651static const struct attribute_group *ccwdev_attr_groups[] = {
652	&ccwdev_attr_group,
653	NULL,
654};
655
656/* this is a simple abstraction for device_register that sets the
657 * correct bus type and adds the bus specific files */
658static int ccw_device_register(struct ccw_device *cdev)
659{
660	struct device *dev = &cdev->dev;
661	int ret;
662
663	dev->bus = &ccw_bus_type;
664	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
665			   cdev->private->dev_id.devno);
666	if (ret)
667		return ret;
668	ret = device_add(dev);
669	if (ret)
670		return ret;
671
672	set_bit(1, &cdev->private->registered);
673	return ret;
674}
675
676struct match_data {
677	struct ccw_dev_id dev_id;
678	struct ccw_device * sibling;
679};
680
681static int
682match_devno(struct device * dev, void * data)
683{
684	struct match_data * d = data;
685	struct ccw_device * cdev;
686
687	cdev = to_ccwdev(dev);
688	if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
689	    !ccw_device_is_orphan(cdev) &&
690	    ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
691	    (cdev != d->sibling))
692		return 1;
693	return 0;
694}
695
696static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
697						     struct ccw_device *sibling)
698{
699	struct device *dev;
700	struct match_data data;
701
702	data.dev_id = *dev_id;
703	data.sibling = sibling;
704	dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
705
706	return dev ? to_ccwdev(dev) : NULL;
707}
708
709static int match_orphan(struct device *dev, void *data)
710{
711	struct ccw_dev_id *dev_id;
712	struct ccw_device *cdev;
713
714	dev_id = data;
715	cdev = to_ccwdev(dev);
716	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
717}
718
719static struct ccw_device *
720get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
721			      struct ccw_dev_id *dev_id)
722{
723	struct device *dev;
724
725	dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
726				match_orphan);
727
728	return dev ? to_ccwdev(dev) : NULL;
729}
730
731void ccw_device_do_unbind_bind(struct work_struct *work)
732{
733	struct ccw_device_private *priv;
734	struct ccw_device *cdev;
735	struct subchannel *sch;
736	int ret;
737
738	priv = container_of(work, struct ccw_device_private, kick_work);
739	cdev = priv->cdev;
740	sch = to_subchannel(cdev->dev.parent);
741
742	if (test_bit(1, &cdev->private->registered)) {
743		device_release_driver(&cdev->dev);
744		ret = device_attach(&cdev->dev);
745		WARN_ON(ret == -ENODEV);
746	}
747}
748
749static void
750ccw_device_release(struct device *dev)
751{
752	struct ccw_device *cdev;
753
754	cdev = to_ccwdev(dev);
755	/* Release reference of parent subchannel. */
756	put_device(cdev->dev.parent);
757	kfree(cdev->private);
758	kfree(cdev);
759}
760
761static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
762{
763	struct ccw_device *cdev;
764
765	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
766	if (cdev) {
767		cdev->private = kzalloc(sizeof(struct ccw_device_private),
768					GFP_KERNEL | GFP_DMA);
769		if (cdev->private)
770			return cdev;
771	}
772	kfree(cdev);
773	return ERR_PTR(-ENOMEM);
774}
775
776static int io_subchannel_initialize_dev(struct subchannel *sch,
777					struct ccw_device *cdev)
778{
779	cdev->private->cdev = cdev;
780	atomic_set(&cdev->private->onoff, 0);
781	cdev->dev.parent = &sch->dev;
782	cdev->dev.release = ccw_device_release;
783	INIT_WORK(&cdev->private->kick_work, NULL);
784	cdev->dev.groups = ccwdev_attr_groups;
785	/* Do first half of device_register. */
786	device_initialize(&cdev->dev);
787	if (!get_device(&sch->dev)) {
788		/* Release reference from device_initialize(). */
789		put_device(&cdev->dev);
790		return -ENODEV;
791	}
792	return 0;
793}
794
795static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
796{
797	struct ccw_device *cdev;
798	int ret;
799
800	cdev = io_subchannel_allocate_dev(sch);
801	if (!IS_ERR(cdev)) {
802		ret = io_subchannel_initialize_dev(sch, cdev);
803		if (ret)
804			cdev = ERR_PTR(ret);
805	}
806	return cdev;
807}
808
809static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
810
811static void sch_attach_device(struct subchannel *sch,
812			      struct ccw_device *cdev)
813{
814	css_update_ssd_info(sch);
815	spin_lock_irq(sch->lock);
816	sch_set_cdev(sch, cdev);
817	cdev->private->schid = sch->schid;
818	cdev->ccwlock = sch->lock;
819	ccw_device_trigger_reprobe(cdev);
820	spin_unlock_irq(sch->lock);
821}
822
823static void sch_attach_disconnected_device(struct subchannel *sch,
824					   struct ccw_device *cdev)
825{
826	struct subchannel *other_sch;
827	int ret;
828
829	/* Get reference for new parent. */
830	if (!get_device(&sch->dev))
831		return;
832	other_sch = to_subchannel(cdev->dev.parent);
833	/* Note: device_move() changes cdev->dev.parent */
834	ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
835	if (ret) {
836		CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
837			      "(ret=%d)!\n", cdev->private->dev_id.ssid,
838			      cdev->private->dev_id.devno, ret);
839		/* Put reference for new parent. */
840		put_device(&sch->dev);
841		return;
842	}
843	sch_set_cdev(other_sch, NULL);
844	/* No need to keep a subchannel without ccw device around. */
845	css_sch_device_unregister(other_sch);
846	sch_attach_device(sch, cdev);
847	/* Put reference for old parent. */
848	put_device(&other_sch->dev);
849}
850
851static void sch_attach_orphaned_device(struct subchannel *sch,
852				       struct ccw_device *cdev)
853{
854	int ret;
855	struct subchannel *pseudo_sch;
856
857	/* Get reference for new parent. */
858	if (!get_device(&sch->dev))
859		return;
860	pseudo_sch = to_subchannel(cdev->dev.parent);
861	/*
862	 * Try to move the ccw device to its new subchannel.
863	 * Note: device_move() changes cdev->dev.parent
864	 */
865	ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
866	if (ret) {
867		CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
868			      "failed (ret=%d)!\n",
869			      cdev->private->dev_id.ssid,
870			      cdev->private->dev_id.devno, ret);
871		/* Put reference for new parent. */
872		put_device(&sch->dev);
873		return;
874	}
875	sch_attach_device(sch, cdev);
876	/* Put reference on pseudo subchannel. */
877	put_device(&pseudo_sch->dev);
878}
879
880static void sch_create_and_recog_new_device(struct subchannel *sch)
881{
882	struct ccw_device *cdev;
883
884	/* Need to allocate a new ccw device. */
885	cdev = io_subchannel_create_ccwdev(sch);
886	if (IS_ERR(cdev)) {
887		/* OK, we did everything we could... */
888		css_sch_device_unregister(sch);
889		return;
890	}
891	/* Start recognition for the new ccw device. */
892	if (io_subchannel_recog(cdev, sch)) {
893		spin_lock_irq(sch->lock);
894		sch_set_cdev(sch, NULL);
895		spin_unlock_irq(sch->lock);
896		css_sch_device_unregister(sch);
897		/* Put reference from io_subchannel_create_ccwdev(). */
898		put_device(&sch->dev);
899		/* Give up initial reference. */
900		put_device(&cdev->dev);
901	}
902}
903
904
905void ccw_device_move_to_orphanage(struct work_struct *work)
906{
907	struct ccw_device_private *priv;
908	struct ccw_device *cdev;
909	struct ccw_device *replacing_cdev;
910	struct subchannel *sch;
911	int ret;
912	struct channel_subsystem *css;
913	struct ccw_dev_id dev_id;
914
915	priv = container_of(work, struct ccw_device_private, kick_work);
916	cdev = priv->cdev;
917	sch = to_subchannel(cdev->dev.parent);
918	css = to_css(sch->dev.parent);
919	dev_id.devno = sch->schib.pmcw.dev;
920	dev_id.ssid = sch->schid.ssid;
921
922	/* Increase refcount for pseudo subchannel. */
923	get_device(&css->pseudo_subchannel->dev);
924	/*
925	 * Move the orphaned ccw device to the orphanage so the replacing
926	 * ccw device can take its place on the subchannel.
927	 * Note: device_move() changes cdev->dev.parent
928	 */
929	ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
930		DPM_ORDER_NONE);
931	if (ret) {
932		CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
933			      "(ret=%d)!\n", cdev->private->dev_id.ssid,
934			      cdev->private->dev_id.devno, ret);
935		/* Decrease refcount for pseudo subchannel again. */
936		put_device(&css->pseudo_subchannel->dev);
937		return;
938	}
939	cdev->ccwlock = css->pseudo_subchannel->lock;
940	/*
941	 * Search for the replacing ccw device
942	 * - among the disconnected devices
943	 * - in the orphanage
944	 */
945	replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
946	if (replacing_cdev) {
947		sch_attach_disconnected_device(sch, replacing_cdev);
948		/* Release reference from get_disc_ccwdev_by_dev_id() */
949		put_device(&replacing_cdev->dev);
950		/* Release reference of subchannel from old cdev. */
951		put_device(&sch->dev);
952		return;
953	}
954	replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
955	if (replacing_cdev) {
956		sch_attach_orphaned_device(sch, replacing_cdev);
957		/* Release reference from get_orphaned_ccwdev_by_dev_id() */
958		put_device(&replacing_cdev->dev);
959		/* Release reference of subchannel from old cdev. */
960		put_device(&sch->dev);
961		return;
962	}
963	sch_create_and_recog_new_device(sch);
964	/* Release reference of subchannel from old cdev. */
965	put_device(&sch->dev);
966}
967
968/*
969 * Register recognized device.
970 */
971static void
972io_subchannel_register(struct work_struct *work)
973{
974	struct ccw_device_private *priv;
975	struct ccw_device *cdev;
976	struct subchannel *sch;
977	int ret;
978	unsigned long flags;
979
980	priv = container_of(work, struct ccw_device_private, kick_work);
981	cdev = priv->cdev;
982	sch = to_subchannel(cdev->dev.parent);
983	/*
984	 * Check if subchannel is still registered. It may have become
985	 * unregistered if a machine check hit us after finishing
986	 * device recognition but before the register work could be
987	 * queued.
988	 */
989	if (!device_is_registered(&sch->dev))
990		goto out_err;
991	css_update_ssd_info(sch);
992	/*
993	 * io_subchannel_register() will also be called after device
994	 * recognition has been done for a boxed device (which will already
995	 * be registered). We need to reprobe since we may now have sense id
996	 * information.
997	 */
998	if (device_is_registered(&cdev->dev)) {
999		if (!cdev->drv) {
1000			ret = device_reprobe(&cdev->dev);
1001			if (ret)
1002				/* We can't do much here. */
1003				CIO_MSG_EVENT(0, "device_reprobe() returned"
1004					      " %d for 0.%x.%04x\n", ret,
1005					      cdev->private->dev_id.ssid,
1006					      cdev->private->dev_id.devno);
1007		}
1008		goto out;
1009	}
1010	/*
1011	 * Now we know this subchannel will stay, we can throw
1012	 * our delayed uevent.
1013	 */
1014	dev_set_uevent_suppress(&sch->dev, 0);
1015	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1016	/* make it known to the system */
1017	ret = ccw_device_register(cdev);
1018	if (ret) {
1019		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
1020			      cdev->private->dev_id.ssid,
1021			      cdev->private->dev_id.devno, ret);
1022		spin_lock_irqsave(sch->lock, flags);
1023		sch_set_cdev(sch, NULL);
1024		spin_unlock_irqrestore(sch->lock, flags);
1025		/* Release initial device reference. */
1026		put_device(&cdev->dev);
1027		goto out_err;
1028	}
1029out:
1030	cdev->private->flags.recog_done = 1;
1031	wake_up(&cdev->private->wait_q);
1032out_err:
1033	/* Release reference for workqueue processing. */
1034	put_device(&cdev->dev);
1035	if (atomic_dec_and_test(&ccw_device_init_count))
1036		wake_up(&ccw_device_init_wq);
1037}
1038
1039static void ccw_device_call_sch_unregister(struct work_struct *work)
1040{
1041	struct ccw_device_private *priv;
1042	struct ccw_device *cdev;
1043	struct subchannel *sch;
1044
1045	priv = container_of(work, struct ccw_device_private, kick_work);
1046	cdev = priv->cdev;
1047	/* Get subchannel reference for local processing. */
1048	if (!get_device(cdev->dev.parent))
1049		return;
1050	sch = to_subchannel(cdev->dev.parent);
1051	css_sch_device_unregister(sch);
1052	/* Release cdev reference for workqueue processing.*/
1053	put_device(&cdev->dev);
1054	/* Release subchannel reference for local processing. */
1055	put_device(&sch->dev);
1056}
1057
1058void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1059{
1060	/* Get cdev reference for workqueue processing. */
1061	if (!get_device(&cdev->dev))
1062		return;
1063	PREPARE_WORK(&cdev->private->kick_work,
1064		     ccw_device_call_sch_unregister);
1065	queue_work(slow_path_wq, &cdev->private->kick_work);
1066}
1067
1068/*
1069 * subchannel recognition done. Called from the state machine.
1070 */
1071void
1072io_subchannel_recog_done(struct ccw_device *cdev)
1073{
1074	if (css_init_done == 0) {
1075		cdev->private->flags.recog_done = 1;
1076		return;
1077	}
1078	switch (cdev->private->state) {
1079	case DEV_STATE_BOXED:
1080		/* Device did not respond in time. */
1081	case DEV_STATE_NOT_OPER:
1082		cdev->private->flags.recog_done = 1;
1083		ccw_device_schedule_sch_unregister(cdev);
1084		if (atomic_dec_and_test(&ccw_device_init_count))
1085			wake_up(&ccw_device_init_wq);
1086		break;
1087	case DEV_STATE_OFFLINE:
1088		/*
1089		 * We can't register the device in interrupt context so
1090		 * we schedule a work item.
1091		 */
1092		if (!get_device(&cdev->dev))
1093			break;
1094		PREPARE_WORK(&cdev->private->kick_work,
1095			     io_subchannel_register);
1096		queue_work(slow_path_wq, &cdev->private->kick_work);
1097		break;
1098	}
1099}
1100
1101static int
1102io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1103{
1104	int rc;
1105	struct ccw_device_private *priv;
1106
1107	cdev->ccwlock = sch->lock;
1108
1109	/* Init private data. */
1110	priv = cdev->private;
1111	priv->dev_id.devno = sch->schib.pmcw.dev;
1112	priv->dev_id.ssid = sch->schid.ssid;
1113	priv->schid = sch->schid;
1114	priv->state = DEV_STATE_NOT_OPER;
1115	INIT_LIST_HEAD(&priv->cmb_list);
1116	init_waitqueue_head(&priv->wait_q);
1117	init_timer(&priv->timer);
1118
1119	/* Increase counter of devices currently in recognition. */
1120	atomic_inc(&ccw_device_init_count);
1121
1122	/* Start async. device sensing. */
1123	spin_lock_irq(sch->lock);
1124	sch_set_cdev(sch, cdev);
1125	rc = ccw_device_recognition(cdev);
1126	spin_unlock_irq(sch->lock);
1127	if (rc) {
1128		if (atomic_dec_and_test(&ccw_device_init_count))
1129			wake_up(&ccw_device_init_wq);
1130	}
1131	return rc;
1132}
1133
1134static void ccw_device_move_to_sch(struct work_struct *work)
1135{
1136	struct ccw_device_private *priv;
1137	int rc;
1138	struct subchannel *sch;
1139	struct ccw_device *cdev;
1140	struct subchannel *former_parent;
1141
1142	priv = container_of(work, struct ccw_device_private, kick_work);
1143	sch = priv->sch;
1144	cdev = priv->cdev;
1145	former_parent = to_subchannel(cdev->dev.parent);
1146	/* Get reference for new parent. */
1147	if (!get_device(&sch->dev))
1148		return;
1149	mutex_lock(&sch->reg_mutex);
1150	/*
1151	 * Try to move the ccw device to its new subchannel.
1152	 * Note: device_move() changes cdev->dev.parent
1153	 */
1154	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
1155	mutex_unlock(&sch->reg_mutex);
1156	if (rc) {
1157		CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1158			      "0.%x.%04x failed (ret=%d)!\n",
1159			      cdev->private->dev_id.ssid,
1160			      cdev->private->dev_id.devno, sch->schid.ssid,
1161			      sch->schid.sch_no, rc);
1162		css_sch_device_unregister(sch);
1163		/* Put reference for new parent again. */
1164		put_device(&sch->dev);
1165		goto out;
1166	}
1167	if (!sch_is_pseudo_sch(former_parent)) {
1168		spin_lock_irq(former_parent->lock);
1169		sch_set_cdev(former_parent, NULL);
1170		spin_unlock_irq(former_parent->lock);
1171		css_sch_device_unregister(former_parent);
1172		/* Reset intparm to zeroes. */
1173		former_parent->config.intparm = 0;
1174		cio_commit_config(former_parent);
1175	}
1176	sch_attach_device(sch, cdev);
1177out:
1178	/* Put reference for old parent. */
1179	put_device(&former_parent->dev);
1180	put_device(&cdev->dev);
1181}
1182
1183static void io_subchannel_irq(struct subchannel *sch)
1184{
1185	struct ccw_device *cdev;
1186
1187	cdev = sch_get_cdev(sch);
1188
1189	CIO_TRACE_EVENT(6, "IRQ");
1190	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1191	if (cdev)
1192		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1193}
1194
1195void io_subchannel_init_config(struct subchannel *sch)
1196{
1197	memset(&sch->config, 0, sizeof(sch->config));
1198	sch->config.csense = 1;
1199	/* Use subchannel mp mode when there is more than 1 installed CHPID. */
1200	if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1201		sch->config.mp = 1;
1202}
1203
1204static void io_subchannel_init_fields(struct subchannel *sch)
1205{
1206	if (cio_is_console(sch->schid))
1207		sch->opm = 0xff;
1208	else
1209		sch->opm = chp_get_sch_opm(sch);
1210	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1211	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1212
1213	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1214		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1215		      sch->schib.pmcw.dev, sch->schid.ssid,
1216		      sch->schid.sch_no, sch->schib.pmcw.pim,
1217		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1218
1219	io_subchannel_init_config(sch);
1220}
1221
1222static void io_subchannel_do_unreg(struct work_struct *work)
1223{
1224	struct subchannel *sch;
1225
1226	sch = container_of(work, struct subchannel, work);
1227	css_sch_device_unregister(sch);
1228	put_device(&sch->dev);
1229}
1230
1231/* Schedule unregister if we have no cdev. */
1232static void io_subchannel_schedule_removal(struct subchannel *sch)
1233{
1234	get_device(&sch->dev);
1235	INIT_WORK(&sch->work, io_subchannel_do_unreg);
1236	queue_work(slow_path_wq, &sch->work);
1237}
1238
1239/*
1240 * Note: We always return 0 so that we bind to the device even on error.
1241 * This is needed so that our remove function is called on unregister.
1242 */
1243static int io_subchannel_probe(struct subchannel *sch)
1244{
1245	struct ccw_device *cdev;
1246	int rc;
1247	unsigned long flags;
1248	struct ccw_dev_id dev_id;
1249
1250	if (cio_is_console(sch->schid)) {
1251		rc = sysfs_create_group(&sch->dev.kobj,
1252					&io_subchannel_attr_group);
1253		if (rc)
1254			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1255				      "attributes for subchannel "
1256				      "0.%x.%04x (rc=%d)\n",
1257				      sch->schid.ssid, sch->schid.sch_no, rc);
1258		/*
1259		 * The console subchannel already has an associated ccw_device.
1260		 * Throw the delayed uevent for the subchannel, register
1261		 * the ccw_device and exit.
1262		 */
1263		dev_set_uevent_suppress(&sch->dev, 0);
1264		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1265		cdev = sch_get_cdev(sch);
1266		cdev->dev.groups = ccwdev_attr_groups;
1267		device_initialize(&cdev->dev);
1268		ccw_device_register(cdev);
1269		/*
1270		 * Check if the device is already online. If it is
1271		 * the reference count needs to be corrected since we
1272		 * didn't obtain a reference in ccw_device_set_online.
1273		 */
1274		if (cdev->private->state != DEV_STATE_NOT_OPER &&
1275		    cdev->private->state != DEV_STATE_OFFLINE &&
1276		    cdev->private->state != DEV_STATE_BOXED)
1277			get_device(&cdev->dev);
1278		return 0;
1279	}
1280	io_subchannel_init_fields(sch);
1281	rc = cio_commit_config(sch);
1282	if (rc)
1283		goto out_schedule;
1284	rc = sysfs_create_group(&sch->dev.kobj,
1285				&io_subchannel_attr_group);
1286	if (rc)
1287		goto out_schedule;
1288	/* Allocate I/O subchannel private data. */
1289	sch->private = kzalloc(sizeof(struct io_subchannel_private),
1290			       GFP_KERNEL | GFP_DMA);
1291	if (!sch->private)
1292		goto out_schedule;
1293	/*
1294	 * First check if a fitting device may be found amongst the
1295	 * disconnected devices or in the orphanage.
1296	 */
1297	dev_id.devno = sch->schib.pmcw.dev;
1298	dev_id.ssid = sch->schid.ssid;
1299	cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1300	if (!cdev)
1301		cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1302						     &dev_id);
1303	if (cdev) {
1304		/*
1305		 * Schedule moving the device until when we have a registered
1306		 * subchannel to move to and succeed the probe. We can
1307		 * unregister later again, when the probe is through.
1308		 */
1309		cdev->private->sch = sch;
1310		PREPARE_WORK(&cdev->private->kick_work,
1311			     ccw_device_move_to_sch);
1312		queue_work(slow_path_wq, &cdev->private->kick_work);
1313		return 0;
1314	}
1315	cdev = io_subchannel_create_ccwdev(sch);
1316	if (IS_ERR(cdev))
1317		goto out_schedule;
1318	rc = io_subchannel_recog(cdev, sch);
1319	if (rc) {
1320		spin_lock_irqsave(sch->lock, flags);
1321		io_subchannel_recog_done(cdev);
1322		spin_unlock_irqrestore(sch->lock, flags);
1323	}
1324	return 0;
1325
1326out_schedule:
1327	io_subchannel_schedule_removal(sch);
1328	return 0;
1329}
1330
1331static int
1332io_subchannel_remove (struct subchannel *sch)
1333{
1334	struct ccw_device *cdev;
1335	unsigned long flags;
1336
1337	cdev = sch_get_cdev(sch);
1338	if (!cdev)
1339		goto out_free;
1340	/* Set ccw device to not operational and drop reference. */
1341	spin_lock_irqsave(cdev->ccwlock, flags);
1342	sch_set_cdev(sch, NULL);
1343	cdev->private->state = DEV_STATE_NOT_OPER;
1344	spin_unlock_irqrestore(cdev->ccwlock, flags);
1345	ccw_device_unregister(cdev);
1346out_free:
1347	kfree(sch->private);
1348	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1349	return 0;
1350}
1351
1352static int io_subchannel_notify(struct subchannel *sch, int event)
1353{
1354	struct ccw_device *cdev;
1355
1356	cdev = sch_get_cdev(sch);
1357	if (!cdev)
1358		return 0;
1359	return ccw_device_notify(cdev, event);
1360}
1361
1362static void io_subchannel_verify(struct subchannel *sch)
1363{
1364	struct ccw_device *cdev;
1365
1366	cdev = sch_get_cdev(sch);
1367	if (cdev)
1368		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1369}
1370
1371static int check_for_io_on_path(struct subchannel *sch, int mask)
1372{
1373	if (cio_update_schib(sch))
1374		return 0;
1375	if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1376		return 1;
1377	return 0;
1378}
1379
1380static void terminate_internal_io(struct subchannel *sch,
1381				  struct ccw_device *cdev)
1382{
1383	if (cio_clear(sch)) {
1384		/* Recheck device in case clear failed. */
1385		sch->lpm = 0;
1386		if (cdev->online)
1387			dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1388		else
1389			css_schedule_eval(sch->schid);
1390		return;
1391	}
1392	cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1393	/* Request retry of internal operation. */
1394	cdev->private->flags.intretry = 1;
1395	/* Call handler. */
1396	if (cdev->handler)
1397		cdev->handler(cdev, cdev->private->intparm,
1398			      ERR_PTR(-EIO));
1399}
1400
1401static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1402{
1403	struct ccw_device *cdev;
1404
1405	cdev = sch_get_cdev(sch);
1406	if (!cdev)
1407		return;
1408	if (check_for_io_on_path(sch, mask)) {
1409		if (cdev->private->state == DEV_STATE_ONLINE)
1410			ccw_device_kill_io(cdev);
1411		else {
1412			terminate_internal_io(sch, cdev);
1413			/* Re-start path verification. */
1414			dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1415		}
1416	} else
1417		/* trigger path verification. */
1418		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1419
1420}
1421
1422static int io_subchannel_chp_event(struct subchannel *sch,
1423				   struct chp_link *link, int event)
1424{
1425	int mask;
1426
1427	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1428	if (!mask)
1429		return 0;
1430	switch (event) {
1431	case CHP_VARY_OFF:
1432		sch->opm &= ~mask;
1433		sch->lpm &= ~mask;
1434		io_subchannel_terminate_path(sch, mask);
1435		break;
1436	case CHP_VARY_ON:
1437		sch->opm |= mask;
1438		sch->lpm |= mask;
1439		io_subchannel_verify(sch);
1440		break;
1441	case CHP_OFFLINE:
1442		if (cio_update_schib(sch))
1443			return -ENODEV;
1444		io_subchannel_terminate_path(sch, mask);
1445		break;
1446	case CHP_ONLINE:
1447		if (cio_update_schib(sch))
1448			return -ENODEV;
1449		sch->lpm |= mask & sch->opm;
1450		io_subchannel_verify(sch);
1451		break;
1452	}
1453	return 0;
1454}
1455
1456static void
1457io_subchannel_shutdown(struct subchannel *sch)
1458{
1459	struct ccw_device *cdev;
1460	int ret;
1461
1462	cdev = sch_get_cdev(sch);
1463
1464	if (cio_is_console(sch->schid))
1465		return;
1466	if (!sch->schib.pmcw.ena)
1467		/* Nothing to do. */
1468		return;
1469	ret = cio_disable_subchannel(sch);
1470	if (ret != -EBUSY)
1471		/* Subchannel is disabled, we're done. */
1472		return;
1473	cdev->private->state = DEV_STATE_QUIESCE;
1474	if (cdev->handler)
1475		cdev->handler(cdev, cdev->private->intparm,
1476			      ERR_PTR(-EIO));
1477	ret = ccw_device_cancel_halt_clear(cdev);
1478	if (ret == -EBUSY) {
1479		ccw_device_set_timeout(cdev, HZ/10);
1480		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1481	}
1482	cio_disable_subchannel(sch);
1483}
1484
1485static int io_subchannel_get_status(struct subchannel *sch)
1486{
1487	struct schib schib;
1488
1489	if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1490		return CIO_GONE;
1491	if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1492		return CIO_REVALIDATE;
1493	if (!sch->lpm)
1494		return CIO_NO_PATH;
1495	return CIO_OPER;
1496}
1497
1498static int device_is_disconnected(struct ccw_device *cdev)
1499{
1500	if (!cdev)
1501		return 0;
1502	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1503		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1504}
1505
1506static int recovery_check(struct device *dev, void *data)
1507{
1508	struct ccw_device *cdev = to_ccwdev(dev);
1509	int *redo = data;
1510
1511	spin_lock_irq(cdev->ccwlock);
1512	switch (cdev->private->state) {
1513	case DEV_STATE_DISCONNECTED:
1514		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1515			      cdev->private->dev_id.ssid,
1516			      cdev->private->dev_id.devno);
1517		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1518		*redo = 1;
1519		break;
1520	case DEV_STATE_DISCONNECTED_SENSE_ID:
1521		*redo = 1;
1522		break;
1523	}
1524	spin_unlock_irq(cdev->ccwlock);
1525
1526	return 0;
1527}
1528
1529static void recovery_work_func(struct work_struct *unused)
1530{
1531	int redo = 0;
1532
1533	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1534	if (redo) {
1535		spin_lock_irq(&recovery_lock);
1536		if (!timer_pending(&recovery_timer)) {
1537			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1538				recovery_phase++;
1539			mod_timer(&recovery_timer, jiffies +
1540				  recovery_delay[recovery_phase] * HZ);
1541		}
1542		spin_unlock_irq(&recovery_lock);
1543	} else
1544		CIO_MSG_EVENT(4, "recovery: end\n");
1545}
1546
1547static DECLARE_WORK(recovery_work, recovery_work_func);
1548
1549static void recovery_func(unsigned long data)
1550{
1551	/*
1552	 * We can't do our recovery in softirq context and it's not
1553	 * performance critical, so we schedule it.
1554	 */
1555	schedule_work(&recovery_work);
1556}
1557
1558static void ccw_device_schedule_recovery(void)
1559{
1560	unsigned long flags;
1561
1562	CIO_MSG_EVENT(4, "recovery: schedule\n");
1563	spin_lock_irqsave(&recovery_lock, flags);
1564	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1565		recovery_phase = 0;
1566		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1567	}
1568	spin_unlock_irqrestore(&recovery_lock, flags);
1569}
1570
1571static int purge_fn(struct device *dev, void *data)
1572{
1573	struct ccw_device *cdev = to_ccwdev(dev);
1574	struct ccw_device_private *priv = cdev->private;
1575	int unreg;
1576
1577	spin_lock_irq(cdev->ccwlock);
1578	unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
1579		(priv->state == DEV_STATE_OFFLINE);
1580	spin_unlock_irq(cdev->ccwlock);
1581	if (!unreg)
1582		goto out;
1583	CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1584		      priv->dev_id.devno);
1585	ccw_device_schedule_sch_unregister(cdev);
1586
1587out:
1588	/* Abort loop in case of pending signal. */
1589	if (signal_pending(current))
1590		return -EINTR;
1591
1592	return 0;
1593}
1594
1595/**
1596 * ccw_purge_blacklisted - purge unused, blacklisted devices
1597 *
1598 * Unregister all ccw devices that are offline and on the blacklist.
1599 */
1600int ccw_purge_blacklisted(void)
1601{
1602	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1603	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1604	return 0;
1605}
1606
1607void ccw_device_set_disconnected(struct ccw_device *cdev)
1608{
1609	if (!cdev)
1610		return;
1611	ccw_device_set_timeout(cdev, 0);
1612	cdev->private->flags.fake_irb = 0;
1613	cdev->private->state = DEV_STATE_DISCONNECTED;
1614	if (cdev->online)
1615		ccw_device_schedule_recovery();
1616}
1617
1618void ccw_device_set_notoper(struct ccw_device *cdev)
1619{
1620	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1621
1622	CIO_TRACE_EVENT(2, "notoper");
1623	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1624	ccw_device_set_timeout(cdev, 0);
1625	cio_disable_subchannel(sch);
1626	cdev->private->state = DEV_STATE_NOT_OPER;
1627}
1628
1629static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1630{
1631	int event, ret, disc;
1632	unsigned long flags;
1633	enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
1634	struct ccw_device *cdev;
1635
1636	spin_lock_irqsave(sch->lock, flags);
1637	cdev = sch_get_cdev(sch);
1638	disc = device_is_disconnected(cdev);
1639	if (disc && slow) {
1640		/* Disconnected devices are evaluated directly only.*/
1641		spin_unlock_irqrestore(sch->lock, flags);
1642		return 0;
1643	}
1644	/* No interrupt after machine check - kill pending timers. */
1645	if (cdev)
1646		ccw_device_set_timeout(cdev, 0);
1647	if (!disc && !slow) {
1648		/* Non-disconnected devices are evaluated on the slow path. */
1649		spin_unlock_irqrestore(sch->lock, flags);
1650		return -EAGAIN;
1651	}
1652	event = io_subchannel_get_status(sch);
1653	CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1654		      sch->schid.ssid, sch->schid.sch_no, event,
1655		      disc ? "disconnected" : "normal",
1656		      slow ? "slow" : "fast");
1657	/* Analyze subchannel status. */
1658	action = NONE;
1659	switch (event) {
1660	case CIO_NO_PATH:
1661		if (disc) {
1662			/* Check if paths have become available. */
1663			action = REPROBE;
1664			break;
1665		}
1666		/* fall through */
1667	case CIO_GONE:
1668		/* Ask driver what to do with device. */
1669		if (io_subchannel_notify(sch, event))
1670			action = DISC;
1671		else
1672			action = UNREGISTER;
1673		break;
1674	case CIO_REVALIDATE:
1675		/* Device will be removed, so no notify necessary. */
1676		if (disc)
1677			/* Reprobe because immediate unregister might block. */
1678			action = REPROBE;
1679		else
1680			action = UNREGISTER_PROBE;
1681		break;
1682	case CIO_OPER:
1683		if (disc)
1684			/* Get device operational again. */
1685			action = REPROBE;
1686		break;
1687	}
1688	/* Perform action. */
1689	ret = 0;
1690	switch (action) {
1691	case UNREGISTER:
1692	case UNREGISTER_PROBE:
1693		ccw_device_set_notoper(cdev);
1694		/* Unregister device (will use subchannel lock). */
1695		spin_unlock_irqrestore(sch->lock, flags);
1696		css_sch_device_unregister(sch);
1697		spin_lock_irqsave(sch->lock, flags);
1698		break;
1699	case REPROBE:
1700		ccw_device_trigger_reprobe(cdev);
1701		break;
1702	case DISC:
1703		ccw_device_set_disconnected(cdev);
1704		break;
1705	default:
1706		break;
1707	}
1708	spin_unlock_irqrestore(sch->lock, flags);
1709	/* Probe if necessary. */
1710	if (action == UNREGISTER_PROBE)
1711		ret = css_probe_device(sch->schid);
1712
1713	return ret;
1714}
1715
1716#ifdef CONFIG_CCW_CONSOLE
1717static struct ccw_device console_cdev;
1718static struct ccw_device_private console_private;
1719static int console_cdev_in_use;
1720
1721static DEFINE_SPINLOCK(ccw_console_lock);
1722
1723spinlock_t * cio_get_console_lock(void)
1724{
1725	return &ccw_console_lock;
1726}
1727
1728static int ccw_device_console_enable(struct ccw_device *cdev,
1729				     struct subchannel *sch)
1730{
1731	int rc;
1732
1733	/* Attach subchannel private data. */
1734	sch->private = cio_get_console_priv();
1735	memset(sch->private, 0, sizeof(struct io_subchannel_private));
1736	io_subchannel_init_fields(sch);
1737	rc = cio_commit_config(sch);
1738	if (rc)
1739		return rc;
1740	sch->driver = &io_subchannel_driver;
1741	/* Initialize the ccw_device structure. */
1742	cdev->dev.parent= &sch->dev;
1743	rc = io_subchannel_recog(cdev, sch);
1744	if (rc)
1745		return rc;
1746
1747	/* Now wait for the async. recognition to come to an end. */
1748	spin_lock_irq(cdev->ccwlock);
1749	while (!dev_fsm_final_state(cdev))
1750		wait_cons_dev();
1751	rc = -EIO;
1752	if (cdev->private->state != DEV_STATE_OFFLINE)
1753		goto out_unlock;
1754	ccw_device_online(cdev);
1755	while (!dev_fsm_final_state(cdev))
1756		wait_cons_dev();
1757	if (cdev->private->state != DEV_STATE_ONLINE)
1758		goto out_unlock;
1759	rc = 0;
1760out_unlock:
1761	spin_unlock_irq(cdev->ccwlock);
1762	return 0;
1763}
1764
1765struct ccw_device *
1766ccw_device_probe_console(void)
1767{
1768	struct subchannel *sch;
1769	int ret;
1770
1771	if (xchg(&console_cdev_in_use, 1) != 0)
1772		return ERR_PTR(-EBUSY);
1773	sch = cio_probe_console();
1774	if (IS_ERR(sch)) {
1775		console_cdev_in_use = 0;
1776		return (void *) sch;
1777	}
1778	memset(&console_cdev, 0, sizeof(struct ccw_device));
1779	memset(&console_private, 0, sizeof(struct ccw_device_private));
1780	console_cdev.private = &console_private;
1781	console_private.cdev = &console_cdev;
1782	ret = ccw_device_console_enable(&console_cdev, sch);
1783	if (ret) {
1784		cio_release_console();
1785		console_cdev_in_use = 0;
1786		return ERR_PTR(ret);
1787	}
1788	console_cdev.online = 1;
1789	return &console_cdev;
1790}
1791
1792static int ccw_device_pm_restore(struct device *dev);
1793
1794int ccw_device_force_console(void)
1795{
1796	if (!console_cdev_in_use)
1797		return -ENODEV;
1798	return ccw_device_pm_restore(&console_cdev.dev);
1799}
1800EXPORT_SYMBOL_GPL(ccw_device_force_console);
1801#endif
1802
1803/*
1804 * get ccw_device matching the busid, but only if owned by cdrv
1805 */
1806static int
1807__ccwdev_check_busid(struct device *dev, void *id)
1808{
1809	char *bus_id;
1810
1811	bus_id = id;
1812
1813	return (strcmp(bus_id, dev_name(dev)) == 0);
1814}
1815
1816
1817/**
1818 * get_ccwdev_by_busid() - obtain device from a bus id
1819 * @cdrv: driver the device is owned by
1820 * @bus_id: bus id of the device to be searched
1821 *
1822 * This function searches all devices owned by @cdrv for a device with a bus
1823 * id matching @bus_id.
1824 * Returns:
1825 *  If a match is found, its reference count of the found device is increased
1826 *  and it is returned; else %NULL is returned.
1827 */
1828struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1829				       const char *bus_id)
1830{
1831	struct device *dev;
1832	struct device_driver *drv;
1833
1834	drv = get_driver(&cdrv->driver);
1835	if (!drv)
1836		return NULL;
1837
1838	dev = driver_find_device(drv, NULL, (void *)bus_id,
1839				 __ccwdev_check_busid);
1840	put_driver(drv);
1841
1842	return dev ? to_ccwdev(dev) : NULL;
1843}
1844
1845/************************** device driver handling ************************/
1846
1847/* This is the implementation of the ccw_driver class. The probe, remove
1848 * and release methods are initially very similar to the device_driver
1849 * implementations, with the difference that they have ccw_device
1850 * arguments.
1851 *
1852 * A ccw driver also contains the information that is needed for
1853 * device matching.
1854 */
1855static int
1856ccw_device_probe (struct device *dev)
1857{
1858	struct ccw_device *cdev = to_ccwdev(dev);
1859	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1860	int ret;
1861
1862	cdev->drv = cdrv; /* to let the driver call _set_online */
1863
1864	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1865
1866	if (ret) {
1867		cdev->drv = NULL;
1868		return ret;
1869	}
1870
1871	return 0;
1872}
1873
1874static int
1875ccw_device_remove (struct device *dev)
1876{
1877	struct ccw_device *cdev = to_ccwdev(dev);
1878	struct ccw_driver *cdrv = cdev->drv;
1879	int ret;
1880
1881	if (cdrv->remove)
1882		cdrv->remove(cdev);
1883	if (cdev->online) {
1884		cdev->online = 0;
1885		spin_lock_irq(cdev->ccwlock);
1886		ret = ccw_device_offline(cdev);
1887		spin_unlock_irq(cdev->ccwlock);
1888		if (ret == 0)
1889			wait_event(cdev->private->wait_q,
1890				   dev_fsm_final_state(cdev));
1891		else
1892			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1893				      "device 0.%x.%04x\n",
1894				      ret, cdev->private->dev_id.ssid,
1895				      cdev->private->dev_id.devno);
1896		/* Give up reference obtained in ccw_device_set_online(). */
1897		put_device(&cdev->dev);
1898	}
1899	ccw_device_set_timeout(cdev, 0);
1900	cdev->drv = NULL;
1901	return 0;
1902}
1903
1904static void ccw_device_shutdown(struct device *dev)
1905{
1906	struct ccw_device *cdev;
1907
1908	cdev = to_ccwdev(dev);
1909	if (cdev->drv && cdev->drv->shutdown)
1910		cdev->drv->shutdown(cdev);
1911	disable_cmf(cdev);
1912}
1913
1914static int ccw_device_pm_prepare(struct device *dev)
1915{
1916	struct ccw_device *cdev = to_ccwdev(dev);
1917
1918	if (work_pending(&cdev->private->kick_work))
1919		return -EAGAIN;
1920	/* Fail while device is being set online/offline. */
1921	if (atomic_read(&cdev->private->onoff))
1922		return -EAGAIN;
1923
1924	if (cdev->online && cdev->drv && cdev->drv->prepare)
1925		return cdev->drv->prepare(cdev);
1926
1927	return 0;
1928}
1929
1930static void ccw_device_pm_complete(struct device *dev)
1931{
1932	struct ccw_device *cdev = to_ccwdev(dev);
1933
1934	if (cdev->online && cdev->drv && cdev->drv->complete)
1935		cdev->drv->complete(cdev);
1936}
1937
1938static int ccw_device_pm_freeze(struct device *dev)
1939{
1940	struct ccw_device *cdev = to_ccwdev(dev);
1941	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1942	int ret, cm_enabled;
1943
1944	/* Fail suspend while device is in transistional state. */
1945	if (!dev_fsm_final_state(cdev))
1946		return -EAGAIN;
1947	if (!cdev->online)
1948		return 0;
1949	if (cdev->drv && cdev->drv->freeze) {
1950		ret = cdev->drv->freeze(cdev);
1951		if (ret)
1952			return ret;
1953	}
1954
1955	spin_lock_irq(sch->lock);
1956	cm_enabled = cdev->private->cmb != NULL;
1957	spin_unlock_irq(sch->lock);
1958	if (cm_enabled) {
1959		/* Don't have the css write on memory. */
1960		ret = ccw_set_cmf(cdev, 0);
1961		if (ret)
1962			return ret;
1963	}
1964	/* From here on, disallow device driver I/O. */
1965	spin_lock_irq(sch->lock);
1966	ret = cio_disable_subchannel(sch);
1967	spin_unlock_irq(sch->lock);
1968
1969	return ret;
1970}
1971
1972static int ccw_device_pm_thaw(struct device *dev)
1973{
1974	struct ccw_device *cdev = to_ccwdev(dev);
1975	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1976	int ret, cm_enabled;
1977
1978	if (!cdev->online)
1979		return 0;
1980
1981	spin_lock_irq(sch->lock);
1982	/* Allow device driver I/O again. */
1983	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1984	cm_enabled = cdev->private->cmb != NULL;
1985	spin_unlock_irq(sch->lock);
1986	if (ret)
1987		return ret;
1988
1989	if (cm_enabled) {
1990		ret = ccw_set_cmf(cdev, 1);
1991		if (ret)
1992			return ret;
1993	}
1994
1995	if (cdev->drv && cdev->drv->thaw)
1996		ret = cdev->drv->thaw(cdev);
1997
1998	return ret;
1999}
2000
2001static void __ccw_device_pm_restore(struct ccw_device *cdev)
2002{
2003	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2004	int ret;
2005
2006	if (cio_is_console(sch->schid))
2007		goto out;
2008	/*
2009	 * While we were sleeping, devices may have gone or become
2010	 * available again. Kick re-detection.
2011	 */
2012	spin_lock_irq(sch->lock);
2013	cdev->private->flags.resuming = 1;
2014	ret = ccw_device_recognition(cdev);
2015	spin_unlock_irq(sch->lock);
2016	if (ret) {
2017		CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2018			      "0.%x.%04x (ret=%d)\n",
2019			      cdev->private->dev_id.ssid,
2020			      cdev->private->dev_id.devno, ret);
2021		spin_lock_irq(sch->lock);
2022		cdev->private->state = DEV_STATE_DISCONNECTED;
2023		spin_unlock_irq(sch->lock);
2024		/* notify driver after the resume cb */
2025		goto out;
2026	}
2027	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
2028		   cdev->private->state == DEV_STATE_DISCONNECTED);
2029
2030out:
2031	cdev->private->flags.resuming = 0;
2032}
2033
2034static int resume_handle_boxed(struct ccw_device *cdev)
2035{
2036	cdev->private->state = DEV_STATE_BOXED;
2037	if (ccw_device_notify(cdev, CIO_BOXED))
2038		return 0;
2039	ccw_device_schedule_sch_unregister(cdev);
2040	return -ENODEV;
2041}
2042
2043static int resume_handle_disc(struct ccw_device *cdev)
2044{
2045	cdev->private->state = DEV_STATE_DISCONNECTED;
2046	if (ccw_device_notify(cdev, CIO_GONE))
2047		return 0;
2048	ccw_device_schedule_sch_unregister(cdev);
2049	return -ENODEV;
2050}
2051
2052static int ccw_device_pm_restore(struct device *dev)
2053{
2054	struct ccw_device *cdev = to_ccwdev(dev);
2055	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2056	int ret = 0, cm_enabled;
2057
2058	__ccw_device_pm_restore(cdev);
2059	spin_lock_irq(sch->lock);
2060	if (cio_is_console(sch->schid)) {
2061		cio_enable_subchannel(sch, (u32)(addr_t)sch);
2062		spin_unlock_irq(sch->lock);
2063		goto out_restore;
2064	}
2065	cdev->private->flags.donotify = 0;
2066	/* check recognition results */
2067	switch (cdev->private->state) {
2068	case DEV_STATE_OFFLINE:
2069		break;
2070	case DEV_STATE_BOXED:
2071		ret = resume_handle_boxed(cdev);
2072		spin_unlock_irq(sch->lock);
2073		if (ret)
2074			goto out;
2075		goto out_restore;
2076	case DEV_STATE_DISCONNECTED:
2077		goto out_disc_unlock;
2078	default:
2079		goto out_unreg_unlock;
2080	}
2081	/* check if the device id has changed */
2082	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
2083		CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
2084			      "changed from %04x to %04x)\n",
2085			      sch->schid.ssid, sch->schid.sch_no,
2086			      cdev->private->dev_id.devno,
2087			      sch->schib.pmcw.dev);
2088		goto out_unreg_unlock;
2089	}
2090	/* check if the device type has changed */
2091	if (!ccw_device_test_sense_data(cdev)) {
2092		ccw_device_update_sense_data(cdev);
2093		PREPARE_WORK(&cdev->private->kick_work,
2094			     ccw_device_do_unbind_bind);
2095		queue_work(ccw_device_work, &cdev->private->kick_work);
2096		ret = -ENODEV;
2097		goto out_unlock;
2098	}
2099	if (!cdev->online) {
2100		ret = 0;
2101		goto out_unlock;
2102	}
2103	ret = ccw_device_online(cdev);
2104	if (ret)
2105		goto out_disc_unlock;
2106
2107	cm_enabled = cdev->private->cmb != NULL;
2108	spin_unlock_irq(sch->lock);
2109
2110	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
2111	if (cdev->private->state != DEV_STATE_ONLINE) {
2112		spin_lock_irq(sch->lock);
2113		goto out_disc_unlock;
2114	}
2115	if (cm_enabled) {
2116		ret = ccw_set_cmf(cdev, 1);
2117		if (ret) {
2118			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2119				      "(rc=%d)\n", cdev->private->dev_id.ssid,
2120				      cdev->private->dev_id.devno, ret);
2121			ret = 0;
2122		}
2123	}
2124
2125out_restore:
2126	if (cdev->online && cdev->drv && cdev->drv->restore)
2127		ret = cdev->drv->restore(cdev);
2128out:
2129	return ret;
2130
2131out_disc_unlock:
2132	ret = resume_handle_disc(cdev);
2133	spin_unlock_irq(sch->lock);
2134	if (ret)
2135		return ret;
2136	goto out_restore;
2137
2138out_unreg_unlock:
2139	ccw_device_schedule_sch_unregister(cdev);
2140	ret = -ENODEV;
2141out_unlock:
2142	spin_unlock_irq(sch->lock);
2143	return ret;
2144}
2145
2146static struct dev_pm_ops ccw_pm_ops = {
2147	.prepare = ccw_device_pm_prepare,
2148	.complete = ccw_device_pm_complete,
2149	.freeze = ccw_device_pm_freeze,
2150	.thaw = ccw_device_pm_thaw,
2151	.restore = ccw_device_pm_restore,
2152};
2153
2154struct bus_type ccw_bus_type = {
2155	.name   = "ccw",
2156	.match  = ccw_bus_match,
2157	.uevent = ccw_uevent,
2158	.probe  = ccw_device_probe,
2159	.remove = ccw_device_remove,
2160	.shutdown = ccw_device_shutdown,
2161	.pm = &ccw_pm_ops,
2162};
2163
2164/**
2165 * ccw_driver_register() - register a ccw driver
2166 * @cdriver: driver to be registered
2167 *
2168 * This function is mainly a wrapper around driver_register().
2169 * Returns:
2170 *   %0 on success and a negative error value on failure.
2171 */
2172int ccw_driver_register(struct ccw_driver *cdriver)
2173{
2174	struct device_driver *drv = &cdriver->driver;
2175
2176	drv->bus = &ccw_bus_type;
2177	drv->name = cdriver->name;
2178	drv->owner = cdriver->owner;
2179
2180	return driver_register(drv);
2181}
2182
2183/**
2184 * ccw_driver_unregister() - deregister a ccw driver
2185 * @cdriver: driver to be deregistered
2186 *
2187 * This function is mainly a wrapper around driver_unregister().
2188 */
2189void ccw_driver_unregister(struct ccw_driver *cdriver)
2190{
2191	driver_unregister(&cdriver->driver);
2192}
2193
2194/* Helper func for qdio. */
2195struct subchannel_id
2196ccw_device_get_subchannel_id(struct ccw_device *cdev)
2197{
2198	struct subchannel *sch;
2199
2200	sch = to_subchannel(cdev->dev.parent);
2201	return sch->schid;
2202}
2203
2204MODULE_LICENSE("GPL");
2205EXPORT_SYMBOL(ccw_device_set_online);
2206EXPORT_SYMBOL(ccw_device_set_offline);
2207EXPORT_SYMBOL(ccw_driver_register);
2208EXPORT_SYMBOL(ccw_driver_unregister);
2209EXPORT_SYMBOL(get_ccwdev_by_busid);
2210EXPORT_SYMBOL(ccw_bus_type);
2211EXPORT_SYMBOL(ccw_device_work);
2212EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
2213