[go: nahoru, domu]

device.c revision 390935acac21f3ea1a130bdca8eb9397cb293643
1/*
2 *  drivers/s390/cio/device.c
3 *  bus driver for ccw devices
4 *
5 *    Copyright IBM Corp. 2002,2008
6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/spinlock.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <linux/device.h>
18#include <linux/workqueue.h>
19#include <linux/timer.h>
20
21#include <asm/ccwdev.h>
22#include <asm/cio.h>
23#include <asm/param.h>		/* HZ */
24#include <asm/cmb.h>
25#include <asm/isc.h>
26
27#include "chp.h"
28#include "cio.h"
29#include "cio_debug.h"
30#include "css.h"
31#include "device.h"
32#include "ioasm.h"
33#include "io_sch.h"
34#include "blacklist.h"
35
36static struct timer_list recovery_timer;
37static DEFINE_SPINLOCK(recovery_lock);
38static int recovery_phase;
39static const unsigned long recovery_delay[] = { 3, 30, 300 };
40
41/******************* bus type handling ***********************/
42
43/* The Linux driver model distinguishes between a bus type and
44 * the bus itself. Of course we only have one channel
45 * subsystem driver and one channel system per machine, but
46 * we still use the abstraction. T.R. says it's a good idea. */
47static int
48ccw_bus_match (struct device * dev, struct device_driver * drv)
49{
50	struct ccw_device *cdev = to_ccwdev(dev);
51	struct ccw_driver *cdrv = to_ccwdrv(drv);
52	const struct ccw_device_id *ids = cdrv->ids, *found;
53
54	if (!ids)
55		return 0;
56
57	found = ccw_device_id_match(ids, &cdev->id);
58	if (!found)
59		return 0;
60
61	cdev->id.driver_info = found->driver_info;
62
63	return 1;
64}
65
66/* Store modalias string delimited by prefix/suffix string into buffer with
67 * specified size. Return length of resulting string (excluding trailing '\0')
68 * even if string doesn't fit buffer (snprintf semantics). */
69static int snprint_alias(char *buf, size_t size,
70			 struct ccw_device_id *id, const char *suffix)
71{
72	int len;
73
74	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
75	if (len > size)
76		return len;
77	buf += len;
78	size -= len;
79
80	if (id->dev_type != 0)
81		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
82				id->dev_model, suffix);
83	else
84		len += snprintf(buf, size, "dtdm%s", suffix);
85
86	return len;
87}
88
89/* Set up environment variables for ccw device uevent. Return 0 on success,
90 * non-zero otherwise. */
91static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
92{
93	struct ccw_device *cdev = to_ccwdev(dev);
94	struct ccw_device_id *id = &(cdev->id);
95	int ret;
96	char modalias_buf[30];
97
98	/* CU_TYPE= */
99	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
100	if (ret)
101		return ret;
102
103	/* CU_MODEL= */
104	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
105	if (ret)
106		return ret;
107
108	/* The next two can be zero, that's ok for us */
109	/* DEV_TYPE= */
110	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
111	if (ret)
112		return ret;
113
114	/* DEV_MODEL= */
115	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
116	if (ret)
117		return ret;
118
119	/* MODALIAS=  */
120	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
121	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
122	return ret;
123}
124
125struct bus_type ccw_bus_type;
126
127static void io_subchannel_irq(struct subchannel *);
128static int io_subchannel_probe(struct subchannel *);
129static int io_subchannel_remove(struct subchannel *);
130static void io_subchannel_shutdown(struct subchannel *);
131static int io_subchannel_sch_event(struct subchannel *, int);
132static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
133				   int);
134static void recovery_func(unsigned long data);
135struct workqueue_struct *ccw_device_work;
136wait_queue_head_t ccw_device_init_wq;
137atomic_t ccw_device_init_count;
138
139static struct css_device_id io_subchannel_ids[] = {
140	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
141	{ /* end of list */ },
142};
143MODULE_DEVICE_TABLE(css, io_subchannel_ids);
144
145static int io_subchannel_prepare(struct subchannel *sch)
146{
147	struct ccw_device *cdev;
148	/*
149	 * Don't allow suspend while a ccw device registration
150	 * is still outstanding.
151	 */
152	cdev = sch_get_cdev(sch);
153	if (cdev && !device_is_registered(&cdev->dev))
154		return -EAGAIN;
155	return 0;
156}
157
158static void io_subchannel_settle(void)
159{
160	wait_event(ccw_device_init_wq,
161		   atomic_read(&ccw_device_init_count) == 0);
162	flush_workqueue(ccw_device_work);
163}
164
165static struct css_driver io_subchannel_driver = {
166	.owner = THIS_MODULE,
167	.subchannel_type = io_subchannel_ids,
168	.name = "io_subchannel",
169	.irq = io_subchannel_irq,
170	.sch_event = io_subchannel_sch_event,
171	.chp_event = io_subchannel_chp_event,
172	.probe = io_subchannel_probe,
173	.remove = io_subchannel_remove,
174	.shutdown = io_subchannel_shutdown,
175	.prepare = io_subchannel_prepare,
176	.settle = io_subchannel_settle,
177};
178
179int __init io_subchannel_init(void)
180{
181	int ret;
182
183	init_waitqueue_head(&ccw_device_init_wq);
184	atomic_set(&ccw_device_init_count, 0);
185	setup_timer(&recovery_timer, recovery_func, 0);
186
187	ccw_device_work = create_singlethread_workqueue("cio");
188	if (!ccw_device_work)
189		return -ENOMEM;
190	slow_path_wq = create_singlethread_workqueue("kslowcrw");
191	if (!slow_path_wq) {
192		ret = -ENOMEM;
193		goto out_err;
194	}
195	if ((ret = bus_register (&ccw_bus_type)))
196		goto out_err;
197
198	ret = css_driver_register(&io_subchannel_driver);
199	if (ret)
200		goto out_err;
201
202	return 0;
203out_err:
204	if (ccw_device_work)
205		destroy_workqueue(ccw_device_work);
206	if (slow_path_wq)
207		destroy_workqueue(slow_path_wq);
208	return ret;
209}
210
211
212/************************ device handling **************************/
213
214/*
215 * A ccw_device has some interfaces in sysfs in addition to the
216 * standard ones.
217 * The following entries are designed to export the information which
218 * resided in 2.4 in /proc/subchannels. Subchannel and device number
219 * are obvious, so they don't have an entry :)
220 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
221 */
222static ssize_t
223chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
224{
225	struct subchannel *sch = to_subchannel(dev);
226	struct chsc_ssd_info *ssd = &sch->ssd_info;
227	ssize_t ret = 0;
228	int chp;
229	int mask;
230
231	for (chp = 0; chp < 8; chp++) {
232		mask = 0x80 >> chp;
233		if (ssd->path_mask & mask)
234			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
235		else
236			ret += sprintf(buf + ret, "00 ");
237	}
238	ret += sprintf (buf+ret, "\n");
239	return min((ssize_t)PAGE_SIZE, ret);
240}
241
242static ssize_t
243pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
244{
245	struct subchannel *sch = to_subchannel(dev);
246	struct pmcw *pmcw = &sch->schib.pmcw;
247
248	return sprintf (buf, "%02x %02x %02x\n",
249			pmcw->pim, pmcw->pam, pmcw->pom);
250}
251
252static ssize_t
253devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
254{
255	struct ccw_device *cdev = to_ccwdev(dev);
256	struct ccw_device_id *id = &(cdev->id);
257
258	if (id->dev_type != 0)
259		return sprintf(buf, "%04x/%02x\n",
260				id->dev_type, id->dev_model);
261	else
262		return sprintf(buf, "n/a\n");
263}
264
265static ssize_t
266cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
267{
268	struct ccw_device *cdev = to_ccwdev(dev);
269	struct ccw_device_id *id = &(cdev->id);
270
271	return sprintf(buf, "%04x/%02x\n",
272		       id->cu_type, id->cu_model);
273}
274
275static ssize_t
276modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
277{
278	struct ccw_device *cdev = to_ccwdev(dev);
279	struct ccw_device_id *id = &(cdev->id);
280	int len;
281
282	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
283
284	return len > PAGE_SIZE ? PAGE_SIZE : len;
285}
286
287static ssize_t
288online_show (struct device *dev, struct device_attribute *attr, char *buf)
289{
290	struct ccw_device *cdev = to_ccwdev(dev);
291
292	return sprintf(buf, cdev->online ? "1\n" : "0\n");
293}
294
295int ccw_device_is_orphan(struct ccw_device *cdev)
296{
297	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
298}
299
300static void ccw_device_unregister(struct ccw_device *cdev)
301{
302	if (test_and_clear_bit(1, &cdev->private->registered)) {
303		device_del(&cdev->dev);
304		/* Release reference from device_initialize(). */
305		put_device(&cdev->dev);
306	}
307}
308
309static void ccw_device_remove_orphan_cb(struct work_struct *work)
310{
311	struct ccw_device_private *priv;
312	struct ccw_device *cdev;
313
314	priv = container_of(work, struct ccw_device_private, kick_work);
315	cdev = priv->cdev;
316	ccw_device_unregister(cdev);
317	/* Release cdev reference for workqueue processing. */
318	put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324	unsigned long flags;
325
326	/*
327	 * Forced offline in disconnected state means
328	 * 'throw away device'.
329	 */
330	if (ccw_device_is_orphan(cdev)) {
331		/*
332		 * Deregister ccw device.
333		 * Unfortunately, we cannot do this directly from the
334		 * attribute method.
335		 */
336		/* Get cdev reference for workqueue processing. */
337		if (!get_device(&cdev->dev))
338			return;
339		spin_lock_irqsave(cdev->ccwlock, flags);
340		cdev->private->state = DEV_STATE_NOT_OPER;
341		spin_unlock_irqrestore(cdev->ccwlock, flags);
342		PREPARE_WORK(&cdev->private->kick_work,
343				ccw_device_remove_orphan_cb);
344		queue_work(slow_path_wq, &cdev->private->kick_work);
345	} else
346		/* Deregister subchannel, which will kill the ccw device. */
347		ccw_device_schedule_sch_unregister(cdev);
348}
349
350/**
351 * ccw_device_set_offline() - disable a ccw device for I/O
352 * @cdev: target ccw device
353 *
354 * This function calls the driver's set_offline() function for @cdev, if
355 * given, and then disables @cdev.
356 * Returns:
357 *   %0 on success and a negative error value on failure.
358 * Context:
359 *  enabled, ccw device lock not held
360 */
361int ccw_device_set_offline(struct ccw_device *cdev)
362{
363	int ret;
364
365	if (!cdev)
366		return -ENODEV;
367	if (!cdev->online || !cdev->drv)
368		return -EINVAL;
369
370	if (cdev->drv->set_offline) {
371		ret = cdev->drv->set_offline(cdev);
372		if (ret != 0)
373			return ret;
374	}
375	cdev->online = 0;
376	spin_lock_irq(cdev->ccwlock);
377	/* Wait until a final state or DISCONNECTED is reached */
378	while (!dev_fsm_final_state(cdev) &&
379	       cdev->private->state != DEV_STATE_DISCONNECTED) {
380		spin_unlock_irq(cdev->ccwlock);
381		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
382			   cdev->private->state == DEV_STATE_DISCONNECTED));
383		spin_lock_irq(cdev->ccwlock);
384	}
385	ret = ccw_device_offline(cdev);
386	if (ret)
387		goto error;
388	spin_unlock_irq(cdev->ccwlock);
389	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390		   cdev->private->state == DEV_STATE_DISCONNECTED));
391	/* Give up reference from ccw_device_set_online(). */
392	put_device(&cdev->dev);
393	return 0;
394
395error:
396	CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397		      ret, cdev->private->dev_id.ssid,
398		      cdev->private->dev_id.devno);
399	cdev->private->state = DEV_STATE_OFFLINE;
400	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401	spin_unlock_irq(cdev->ccwlock);
402	/* Give up reference from ccw_device_set_online(). */
403	put_device(&cdev->dev);
404	return -ENODEV;
405}
406
407/**
408 * ccw_device_set_online() - enable a ccw device for I/O
409 * @cdev: target ccw device
410 *
411 * This function first enables @cdev and then calls the driver's set_online()
412 * function for @cdev, if given. If set_online() returns an error, @cdev is
413 * disabled again.
414 * Returns:
415 *   %0 on success and a negative error value on failure.
416 * Context:
417 *  enabled, ccw device lock not held
418 */
419int ccw_device_set_online(struct ccw_device *cdev)
420{
421	int ret;
422	int ret2;
423
424	if (!cdev)
425		return -ENODEV;
426	if (cdev->online || !cdev->drv)
427		return -EINVAL;
428	/* Hold on to an extra reference while device is online. */
429	if (!get_device(&cdev->dev))
430		return -ENODEV;
431
432	spin_lock_irq(cdev->ccwlock);
433	ret = ccw_device_online(cdev);
434	spin_unlock_irq(cdev->ccwlock);
435	if (ret == 0)
436		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
437	else {
438		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
439			      "device 0.%x.%04x\n",
440			      ret, cdev->private->dev_id.ssid,
441			      cdev->private->dev_id.devno);
442		/* Give up online reference since onlining failed. */
443		put_device(&cdev->dev);
444		return ret;
445	}
446	spin_lock_irq(cdev->ccwlock);
447	/* Check if online processing was successful */
448	if ((cdev->private->state != DEV_STATE_ONLINE) &&
449	    (cdev->private->state != DEV_STATE_W4SENSE)) {
450		spin_unlock_irq(cdev->ccwlock);
451		/* Give up online reference since onlining failed. */
452		put_device(&cdev->dev);
453		return -ENODEV;
454	}
455	spin_unlock_irq(cdev->ccwlock);
456	if (cdev->drv->set_online)
457		ret = cdev->drv->set_online(cdev);
458	if (ret)
459		goto rollback;
460	cdev->online = 1;
461	return 0;
462
463rollback:
464	spin_lock_irq(cdev->ccwlock);
465	/* Wait until a final state or DISCONNECTED is reached */
466	while (!dev_fsm_final_state(cdev) &&
467	       cdev->private->state != DEV_STATE_DISCONNECTED) {
468		spin_unlock_irq(cdev->ccwlock);
469		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
470			   cdev->private->state == DEV_STATE_DISCONNECTED));
471		spin_lock_irq(cdev->ccwlock);
472	}
473	ret2 = ccw_device_offline(cdev);
474	if (ret2)
475		goto error;
476	spin_unlock_irq(cdev->ccwlock);
477	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
478		   cdev->private->state == DEV_STATE_DISCONNECTED));
479	/* Give up online reference since onlining failed. */
480	put_device(&cdev->dev);
481	return ret;
482
483error:
484	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
485		      "device 0.%x.%04x\n",
486		      ret2, cdev->private->dev_id.ssid,
487		      cdev->private->dev_id.devno);
488	cdev->private->state = DEV_STATE_OFFLINE;
489	spin_unlock_irq(cdev->ccwlock);
490	/* Give up online reference since onlining failed. */
491	put_device(&cdev->dev);
492	return ret;
493}
494
495static int online_store_handle_offline(struct ccw_device *cdev)
496{
497	if (cdev->private->state == DEV_STATE_DISCONNECTED)
498		ccw_device_remove_disconnected(cdev);
499	else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500		return ccw_device_set_offline(cdev);
501	return 0;
502}
503
504static int online_store_recog_and_online(struct ccw_device *cdev)
505{
506	int ret;
507
508	/* Do device recognition, if needed. */
509	if (cdev->private->state == DEV_STATE_BOXED) {
510		ret = ccw_device_recognition(cdev);
511		if (ret) {
512			CIO_MSG_EVENT(0, "Couldn't start recognition "
513				      "for device 0.%x.%04x (ret=%d)\n",
514				      cdev->private->dev_id.ssid,
515				      cdev->private->dev_id.devno, ret);
516			return ret;
517		}
518		wait_event(cdev->private->wait_q,
519			   cdev->private->flags.recog_done);
520		if (cdev->private->state != DEV_STATE_OFFLINE)
521			/* recognition failed */
522			return -EAGAIN;
523	}
524	if (cdev->drv && cdev->drv->set_online)
525		ccw_device_set_online(cdev);
526	return 0;
527}
528
529static int online_store_handle_online(struct ccw_device *cdev, int force)
530{
531	int ret;
532
533	ret = online_store_recog_and_online(cdev);
534	if (ret && !force)
535		return ret;
536	if (force && cdev->private->state == DEV_STATE_BOXED) {
537		ret = ccw_device_stlck(cdev);
538		if (ret)
539			return ret;
540		if (cdev->id.cu_type == 0)
541			cdev->private->state = DEV_STATE_NOT_OPER;
542		ret = online_store_recog_and_online(cdev);
543		if (ret)
544			return ret;
545	}
546	return 0;
547}
548
549static ssize_t online_store (struct device *dev, struct device_attribute *attr,
550			     const char *buf, size_t count)
551{
552	struct ccw_device *cdev = to_ccwdev(dev);
553	int force, ret;
554	unsigned long i;
555
556	if ((cdev->private->state != DEV_STATE_OFFLINE &&
557	     cdev->private->state != DEV_STATE_ONLINE &&
558	     cdev->private->state != DEV_STATE_BOXED &&
559	     cdev->private->state != DEV_STATE_DISCONNECTED) ||
560	    atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561		return -EAGAIN;
562
563	if (cdev->drv && !try_module_get(cdev->drv->owner)) {
564		atomic_set(&cdev->private->onoff, 0);
565		return -EINVAL;
566	}
567	if (!strncmp(buf, "force\n", count)) {
568		force = 1;
569		i = 1;
570		ret = 0;
571	} else {
572		force = 0;
573		ret = strict_strtoul(buf, 16, &i);
574	}
575	if (ret)
576		goto out;
577	switch (i) {
578	case 0:
579		ret = online_store_handle_offline(cdev);
580		break;
581	case 1:
582		ret = online_store_handle_online(cdev, force);
583		break;
584	default:
585		ret = -EINVAL;
586	}
587out:
588	if (cdev->drv)
589		module_put(cdev->drv->owner);
590	atomic_set(&cdev->private->onoff, 0);
591	return (ret < 0) ? ret : count;
592}
593
594static ssize_t
595available_show (struct device *dev, struct device_attribute *attr, char *buf)
596{
597	struct ccw_device *cdev = to_ccwdev(dev);
598	struct subchannel *sch;
599
600	if (ccw_device_is_orphan(cdev))
601		return sprintf(buf, "no device\n");
602	switch (cdev->private->state) {
603	case DEV_STATE_BOXED:
604		return sprintf(buf, "boxed\n");
605	case DEV_STATE_DISCONNECTED:
606	case DEV_STATE_DISCONNECTED_SENSE_ID:
607	case DEV_STATE_NOT_OPER:
608		sch = to_subchannel(dev->parent);
609		if (!sch->lpm)
610			return sprintf(buf, "no path\n");
611		else
612			return sprintf(buf, "no device\n");
613	default:
614		/* All other states considered fine. */
615		return sprintf(buf, "good\n");
616	}
617}
618
619static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
620static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
621static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
622static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
623static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
624static DEVICE_ATTR(online, 0644, online_show, online_store);
625static DEVICE_ATTR(availability, 0444, available_show, NULL);
626
627static struct attribute *io_subchannel_attrs[] = {
628	&dev_attr_chpids.attr,
629	&dev_attr_pimpampom.attr,
630	NULL,
631};
632
633static struct attribute_group io_subchannel_attr_group = {
634	.attrs = io_subchannel_attrs,
635};
636
637static struct attribute * ccwdev_attrs[] = {
638	&dev_attr_devtype.attr,
639	&dev_attr_cutype.attr,
640	&dev_attr_modalias.attr,
641	&dev_attr_online.attr,
642	&dev_attr_cmb_enable.attr,
643	&dev_attr_availability.attr,
644	NULL,
645};
646
647static struct attribute_group ccwdev_attr_group = {
648	.attrs = ccwdev_attrs,
649};
650
651static const struct attribute_group *ccwdev_attr_groups[] = {
652	&ccwdev_attr_group,
653	NULL,
654};
655
656/* this is a simple abstraction for device_register that sets the
657 * correct bus type and adds the bus specific files */
658static int ccw_device_register(struct ccw_device *cdev)
659{
660	struct device *dev = &cdev->dev;
661	int ret;
662
663	dev->bus = &ccw_bus_type;
664	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
665			   cdev->private->dev_id.devno);
666	if (ret)
667		return ret;
668	ret = device_add(dev);
669	if (ret)
670		return ret;
671
672	set_bit(1, &cdev->private->registered);
673	return ret;
674}
675
676static int match_dev_id(struct device *dev, void *data)
677{
678	struct ccw_device *cdev = to_ccwdev(dev);
679	struct ccw_dev_id *dev_id = data;
680
681	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
682}
683
684static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
685{
686	struct device *dev;
687
688	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
689
690	return dev ? to_ccwdev(dev) : NULL;
691}
692
693void ccw_device_do_unbind_bind(struct work_struct *work)
694{
695	struct ccw_device_private *priv;
696	struct ccw_device *cdev;
697	struct subchannel *sch;
698	int ret;
699
700	priv = container_of(work, struct ccw_device_private, kick_work);
701	cdev = priv->cdev;
702	sch = to_subchannel(cdev->dev.parent);
703
704	if (test_bit(1, &cdev->private->registered)) {
705		device_release_driver(&cdev->dev);
706		ret = device_attach(&cdev->dev);
707		WARN_ON(ret == -ENODEV);
708	}
709}
710
711static void
712ccw_device_release(struct device *dev)
713{
714	struct ccw_device *cdev;
715
716	cdev = to_ccwdev(dev);
717	/* Release reference of parent subchannel. */
718	put_device(cdev->dev.parent);
719	kfree(cdev->private);
720	kfree(cdev);
721}
722
723static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
724{
725	struct ccw_device *cdev;
726
727	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
728	if (cdev) {
729		cdev->private = kzalloc(sizeof(struct ccw_device_private),
730					GFP_KERNEL | GFP_DMA);
731		if (cdev->private)
732			return cdev;
733	}
734	kfree(cdev);
735	return ERR_PTR(-ENOMEM);
736}
737
738static int io_subchannel_initialize_dev(struct subchannel *sch,
739					struct ccw_device *cdev)
740{
741	cdev->private->cdev = cdev;
742	atomic_set(&cdev->private->onoff, 0);
743	cdev->dev.parent = &sch->dev;
744	cdev->dev.release = ccw_device_release;
745	INIT_WORK(&cdev->private->kick_work, NULL);
746	cdev->dev.groups = ccwdev_attr_groups;
747	/* Do first half of device_register. */
748	device_initialize(&cdev->dev);
749	if (!get_device(&sch->dev)) {
750		/* Release reference from device_initialize(). */
751		put_device(&cdev->dev);
752		return -ENODEV;
753	}
754	return 0;
755}
756
757static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
758{
759	struct ccw_device *cdev;
760	int ret;
761
762	cdev = io_subchannel_allocate_dev(sch);
763	if (!IS_ERR(cdev)) {
764		ret = io_subchannel_initialize_dev(sch, cdev);
765		if (ret)
766			cdev = ERR_PTR(ret);
767	}
768	return cdev;
769}
770
771static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
772
773static void sch_create_and_recog_new_device(struct subchannel *sch)
774{
775	struct ccw_device *cdev;
776
777	/* Need to allocate a new ccw device. */
778	cdev = io_subchannel_create_ccwdev(sch);
779	if (IS_ERR(cdev)) {
780		/* OK, we did everything we could... */
781		css_sch_device_unregister(sch);
782		return;
783	}
784	/* Start recognition for the new ccw device. */
785	if (io_subchannel_recog(cdev, sch)) {
786		spin_lock_irq(sch->lock);
787		sch_set_cdev(sch, NULL);
788		spin_unlock_irq(sch->lock);
789		css_sch_device_unregister(sch);
790		/* Put reference from io_subchannel_create_ccwdev(). */
791		put_device(&sch->dev);
792		/* Give up initial reference. */
793		put_device(&cdev->dev);
794	}
795}
796
797/*
798 * Register recognized device.
799 */
800static void
801io_subchannel_register(struct work_struct *work)
802{
803	struct ccw_device_private *priv;
804	struct ccw_device *cdev;
805	struct subchannel *sch;
806	int ret;
807	unsigned long flags;
808
809	priv = container_of(work, struct ccw_device_private, kick_work);
810	cdev = priv->cdev;
811	sch = to_subchannel(cdev->dev.parent);
812	/*
813	 * Check if subchannel is still registered. It may have become
814	 * unregistered if a machine check hit us after finishing
815	 * device recognition but before the register work could be
816	 * queued.
817	 */
818	if (!device_is_registered(&sch->dev))
819		goto out_err;
820	css_update_ssd_info(sch);
821	/*
822	 * io_subchannel_register() will also be called after device
823	 * recognition has been done for a boxed device (which will already
824	 * be registered). We need to reprobe since we may now have sense id
825	 * information.
826	 */
827	if (device_is_registered(&cdev->dev)) {
828		if (!cdev->drv) {
829			ret = device_reprobe(&cdev->dev);
830			if (ret)
831				/* We can't do much here. */
832				CIO_MSG_EVENT(0, "device_reprobe() returned"
833					      " %d for 0.%x.%04x\n", ret,
834					      cdev->private->dev_id.ssid,
835					      cdev->private->dev_id.devno);
836		}
837		goto out;
838	}
839	/*
840	 * Now we know this subchannel will stay, we can throw
841	 * our delayed uevent.
842	 */
843	dev_set_uevent_suppress(&sch->dev, 0);
844	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
845	/* make it known to the system */
846	ret = ccw_device_register(cdev);
847	if (ret) {
848		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
849			      cdev->private->dev_id.ssid,
850			      cdev->private->dev_id.devno, ret);
851		spin_lock_irqsave(sch->lock, flags);
852		sch_set_cdev(sch, NULL);
853		spin_unlock_irqrestore(sch->lock, flags);
854		/* Release initial device reference. */
855		put_device(&cdev->dev);
856		goto out_err;
857	}
858out:
859	cdev->private->flags.recog_done = 1;
860	wake_up(&cdev->private->wait_q);
861out_err:
862	/* Release reference for workqueue processing. */
863	put_device(&cdev->dev);
864	if (atomic_dec_and_test(&ccw_device_init_count))
865		wake_up(&ccw_device_init_wq);
866}
867
868static void ccw_device_call_sch_unregister(struct work_struct *work)
869{
870	struct ccw_device_private *priv;
871	struct ccw_device *cdev;
872	struct subchannel *sch;
873
874	priv = container_of(work, struct ccw_device_private, kick_work);
875	cdev = priv->cdev;
876	/* Get subchannel reference for local processing. */
877	if (!get_device(cdev->dev.parent))
878		return;
879	sch = to_subchannel(cdev->dev.parent);
880	css_sch_device_unregister(sch);
881	/* Release cdev reference for workqueue processing.*/
882	put_device(&cdev->dev);
883	/* Release subchannel reference for local processing. */
884	put_device(&sch->dev);
885}
886
887void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
888{
889	/* Get cdev reference for workqueue processing. */
890	if (!get_device(&cdev->dev))
891		return;
892	PREPARE_WORK(&cdev->private->kick_work,
893		     ccw_device_call_sch_unregister);
894	queue_work(slow_path_wq, &cdev->private->kick_work);
895}
896
897/*
898 * subchannel recognition done. Called from the state machine.
899 */
900void
901io_subchannel_recog_done(struct ccw_device *cdev)
902{
903	if (css_init_done == 0) {
904		cdev->private->flags.recog_done = 1;
905		return;
906	}
907	switch (cdev->private->state) {
908	case DEV_STATE_BOXED:
909		/* Device did not respond in time. */
910	case DEV_STATE_NOT_OPER:
911		cdev->private->flags.recog_done = 1;
912		ccw_device_schedule_sch_unregister(cdev);
913		if (atomic_dec_and_test(&ccw_device_init_count))
914			wake_up(&ccw_device_init_wq);
915		break;
916	case DEV_STATE_OFFLINE:
917		/*
918		 * We can't register the device in interrupt context so
919		 * we schedule a work item.
920		 */
921		if (!get_device(&cdev->dev))
922			break;
923		PREPARE_WORK(&cdev->private->kick_work,
924			     io_subchannel_register);
925		queue_work(slow_path_wq, &cdev->private->kick_work);
926		break;
927	}
928}
929
930static int
931io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
932{
933	int rc;
934	struct ccw_device_private *priv;
935
936	cdev->ccwlock = sch->lock;
937
938	/* Init private data. */
939	priv = cdev->private;
940	priv->dev_id.devno = sch->schib.pmcw.dev;
941	priv->dev_id.ssid = sch->schid.ssid;
942	priv->schid = sch->schid;
943	priv->state = DEV_STATE_NOT_OPER;
944	INIT_LIST_HEAD(&priv->cmb_list);
945	init_waitqueue_head(&priv->wait_q);
946	init_timer(&priv->timer);
947
948	/* Increase counter of devices currently in recognition. */
949	atomic_inc(&ccw_device_init_count);
950
951	/* Start async. device sensing. */
952	spin_lock_irq(sch->lock);
953	sch_set_cdev(sch, cdev);
954	rc = ccw_device_recognition(cdev);
955	spin_unlock_irq(sch->lock);
956	if (rc) {
957		if (atomic_dec_and_test(&ccw_device_init_count))
958			wake_up(&ccw_device_init_wq);
959	}
960	return rc;
961}
962
963static int ccw_device_move_to_sch(struct ccw_device *cdev,
964				  struct subchannel *sch)
965{
966	struct subchannel *old_sch;
967	int rc;
968
969	old_sch = to_subchannel(cdev->dev.parent);
970	/* Obtain child reference for new parent. */
971	if (!get_device(&sch->dev))
972		return -ENODEV;
973	mutex_lock(&sch->reg_mutex);
974	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
975	mutex_unlock(&sch->reg_mutex);
976	if (rc) {
977		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
978			      cdev->private->dev_id.ssid,
979			      cdev->private->dev_id.devno, sch->schid.ssid,
980			      sch->schib.pmcw.dev, rc);
981		/* Release child reference for new parent. */
982		put_device(&sch->dev);
983		return rc;
984	}
985	/* Clean up old subchannel. */
986	if (!sch_is_pseudo_sch(old_sch)) {
987		spin_lock_irq(old_sch->lock);
988		sch_set_cdev(old_sch, NULL);
989		cio_disable_subchannel(old_sch);
990		spin_unlock_irq(old_sch->lock);
991		css_schedule_eval(old_sch->schid);
992	}
993	/* Release child reference for old parent. */
994	put_device(&old_sch->dev);
995	/* Initialize new subchannel. */
996	spin_lock_irq(sch->lock);
997	cdev->private->schid = sch->schid;
998	cdev->ccwlock = sch->lock;
999	if (!sch_is_pseudo_sch(sch))
1000		sch_set_cdev(sch, cdev);
1001	spin_unlock_irq(sch->lock);
1002	if (!sch_is_pseudo_sch(sch))
1003		css_update_ssd_info(sch);
1004	return 0;
1005}
1006
1007static int ccw_device_move_to_orph(struct ccw_device *cdev)
1008{
1009	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1010	struct channel_subsystem *css = to_css(sch->dev.parent);
1011
1012	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1013}
1014
1015static void io_subchannel_irq(struct subchannel *sch)
1016{
1017	struct ccw_device *cdev;
1018
1019	cdev = sch_get_cdev(sch);
1020
1021	CIO_TRACE_EVENT(6, "IRQ");
1022	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1023	if (cdev)
1024		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1025}
1026
1027void io_subchannel_init_config(struct subchannel *sch)
1028{
1029	memset(&sch->config, 0, sizeof(sch->config));
1030	sch->config.csense = 1;
1031	/* Use subchannel mp mode when there is more than 1 installed CHPID. */
1032	if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1033		sch->config.mp = 1;
1034}
1035
1036static void io_subchannel_init_fields(struct subchannel *sch)
1037{
1038	if (cio_is_console(sch->schid))
1039		sch->opm = 0xff;
1040	else
1041		sch->opm = chp_get_sch_opm(sch);
1042	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1043	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1044
1045	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1046		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1047		      sch->schib.pmcw.dev, sch->schid.ssid,
1048		      sch->schid.sch_no, sch->schib.pmcw.pim,
1049		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1050
1051	io_subchannel_init_config(sch);
1052}
1053
1054/*
1055 * Note: We always return 0 so that we bind to the device even on error.
1056 * This is needed so that our remove function is called on unregister.
1057 */
1058static int io_subchannel_probe(struct subchannel *sch)
1059{
1060	struct ccw_device *cdev;
1061	int rc;
1062
1063	if (cio_is_console(sch->schid)) {
1064		rc = sysfs_create_group(&sch->dev.kobj,
1065					&io_subchannel_attr_group);
1066		if (rc)
1067			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1068				      "attributes for subchannel "
1069				      "0.%x.%04x (rc=%d)\n",
1070				      sch->schid.ssid, sch->schid.sch_no, rc);
1071		/*
1072		 * The console subchannel already has an associated ccw_device.
1073		 * Throw the delayed uevent for the subchannel, register
1074		 * the ccw_device and exit.
1075		 */
1076		dev_set_uevent_suppress(&sch->dev, 0);
1077		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1078		cdev = sch_get_cdev(sch);
1079		cdev->dev.groups = ccwdev_attr_groups;
1080		device_initialize(&cdev->dev);
1081		ccw_device_register(cdev);
1082		/*
1083		 * Check if the device is already online. If it is
1084		 * the reference count needs to be corrected since we
1085		 * didn't obtain a reference in ccw_device_set_online.
1086		 */
1087		if (cdev->private->state != DEV_STATE_NOT_OPER &&
1088		    cdev->private->state != DEV_STATE_OFFLINE &&
1089		    cdev->private->state != DEV_STATE_BOXED)
1090			get_device(&cdev->dev);
1091		return 0;
1092	}
1093	io_subchannel_init_fields(sch);
1094	rc = cio_commit_config(sch);
1095	if (rc)
1096		goto out_schedule;
1097	rc = sysfs_create_group(&sch->dev.kobj,
1098				&io_subchannel_attr_group);
1099	if (rc)
1100		goto out_schedule;
1101	/* Allocate I/O subchannel private data. */
1102	sch->private = kzalloc(sizeof(struct io_subchannel_private),
1103			       GFP_KERNEL | GFP_DMA);
1104	if (!sch->private)
1105		goto out_schedule;
1106	css_schedule_eval(sch->schid);
1107	return 0;
1108
1109out_schedule:
1110	spin_lock_irq(sch->lock);
1111	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1112	spin_unlock_irq(sch->lock);
1113	return 0;
1114}
1115
1116static int
1117io_subchannel_remove (struct subchannel *sch)
1118{
1119	struct ccw_device *cdev;
1120	unsigned long flags;
1121
1122	cdev = sch_get_cdev(sch);
1123	if (!cdev)
1124		goto out_free;
1125	/* Set ccw device to not operational and drop reference. */
1126	spin_lock_irqsave(cdev->ccwlock, flags);
1127	sch_set_cdev(sch, NULL);
1128	cdev->private->state = DEV_STATE_NOT_OPER;
1129	spin_unlock_irqrestore(cdev->ccwlock, flags);
1130	ccw_device_unregister(cdev);
1131out_free:
1132	kfree(sch->private);
1133	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1134	return 0;
1135}
1136
1137static void io_subchannel_verify(struct subchannel *sch)
1138{
1139	struct ccw_device *cdev;
1140
1141	cdev = sch_get_cdev(sch);
1142	if (cdev)
1143		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1144}
1145
1146static int check_for_io_on_path(struct subchannel *sch, int mask)
1147{
1148	if (cio_update_schib(sch))
1149		return 0;
1150	if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1151		return 1;
1152	return 0;
1153}
1154
1155static void terminate_internal_io(struct subchannel *sch,
1156				  struct ccw_device *cdev)
1157{
1158	if (cio_clear(sch)) {
1159		/* Recheck device in case clear failed. */
1160		sch->lpm = 0;
1161		if (cdev->online)
1162			dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1163		else
1164			css_schedule_eval(sch->schid);
1165		return;
1166	}
1167	cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1168	/* Request retry of internal operation. */
1169	cdev->private->flags.intretry = 1;
1170	/* Call handler. */
1171	if (cdev->handler)
1172		cdev->handler(cdev, cdev->private->intparm,
1173			      ERR_PTR(-EIO));
1174}
1175
1176static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1177{
1178	struct ccw_device *cdev;
1179
1180	cdev = sch_get_cdev(sch);
1181	if (!cdev)
1182		return;
1183	if (check_for_io_on_path(sch, mask)) {
1184		if (cdev->private->state == DEV_STATE_ONLINE)
1185			ccw_device_kill_io(cdev);
1186		else {
1187			terminate_internal_io(sch, cdev);
1188			/* Re-start path verification. */
1189			dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1190		}
1191	} else
1192		/* trigger path verification. */
1193		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1194
1195}
1196
1197static int io_subchannel_chp_event(struct subchannel *sch,
1198				   struct chp_link *link, int event)
1199{
1200	int mask;
1201
1202	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1203	if (!mask)
1204		return 0;
1205	switch (event) {
1206	case CHP_VARY_OFF:
1207		sch->opm &= ~mask;
1208		sch->lpm &= ~mask;
1209		io_subchannel_terminate_path(sch, mask);
1210		break;
1211	case CHP_VARY_ON:
1212		sch->opm |= mask;
1213		sch->lpm |= mask;
1214		io_subchannel_verify(sch);
1215		break;
1216	case CHP_OFFLINE:
1217		if (cio_update_schib(sch))
1218			return -ENODEV;
1219		io_subchannel_terminate_path(sch, mask);
1220		break;
1221	case CHP_ONLINE:
1222		if (cio_update_schib(sch))
1223			return -ENODEV;
1224		sch->lpm |= mask & sch->opm;
1225		io_subchannel_verify(sch);
1226		break;
1227	}
1228	return 0;
1229}
1230
1231static void
1232io_subchannel_shutdown(struct subchannel *sch)
1233{
1234	struct ccw_device *cdev;
1235	int ret;
1236
1237	cdev = sch_get_cdev(sch);
1238
1239	if (cio_is_console(sch->schid))
1240		return;
1241	if (!sch->schib.pmcw.ena)
1242		/* Nothing to do. */
1243		return;
1244	ret = cio_disable_subchannel(sch);
1245	if (ret != -EBUSY)
1246		/* Subchannel is disabled, we're done. */
1247		return;
1248	cdev->private->state = DEV_STATE_QUIESCE;
1249	if (cdev->handler)
1250		cdev->handler(cdev, cdev->private->intparm,
1251			      ERR_PTR(-EIO));
1252	ret = ccw_device_cancel_halt_clear(cdev);
1253	if (ret == -EBUSY) {
1254		ccw_device_set_timeout(cdev, HZ/10);
1255		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1256	}
1257	cio_disable_subchannel(sch);
1258}
1259
1260static int device_is_disconnected(struct ccw_device *cdev)
1261{
1262	if (!cdev)
1263		return 0;
1264	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1265		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1266}
1267
1268static int recovery_check(struct device *dev, void *data)
1269{
1270	struct ccw_device *cdev = to_ccwdev(dev);
1271	int *redo = data;
1272
1273	spin_lock_irq(cdev->ccwlock);
1274	switch (cdev->private->state) {
1275	case DEV_STATE_DISCONNECTED:
1276		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1277			      cdev->private->dev_id.ssid,
1278			      cdev->private->dev_id.devno);
1279		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1280		*redo = 1;
1281		break;
1282	case DEV_STATE_DISCONNECTED_SENSE_ID:
1283		*redo = 1;
1284		break;
1285	}
1286	spin_unlock_irq(cdev->ccwlock);
1287
1288	return 0;
1289}
1290
1291static void recovery_work_func(struct work_struct *unused)
1292{
1293	int redo = 0;
1294
1295	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1296	if (redo) {
1297		spin_lock_irq(&recovery_lock);
1298		if (!timer_pending(&recovery_timer)) {
1299			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1300				recovery_phase++;
1301			mod_timer(&recovery_timer, jiffies +
1302				  recovery_delay[recovery_phase] * HZ);
1303		}
1304		spin_unlock_irq(&recovery_lock);
1305	} else
1306		CIO_MSG_EVENT(4, "recovery: end\n");
1307}
1308
1309static DECLARE_WORK(recovery_work, recovery_work_func);
1310
1311static void recovery_func(unsigned long data)
1312{
1313	/*
1314	 * We can't do our recovery in softirq context and it's not
1315	 * performance critical, so we schedule it.
1316	 */
1317	schedule_work(&recovery_work);
1318}
1319
1320static void ccw_device_schedule_recovery(void)
1321{
1322	unsigned long flags;
1323
1324	CIO_MSG_EVENT(4, "recovery: schedule\n");
1325	spin_lock_irqsave(&recovery_lock, flags);
1326	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1327		recovery_phase = 0;
1328		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1329	}
1330	spin_unlock_irqrestore(&recovery_lock, flags);
1331}
1332
1333static int purge_fn(struct device *dev, void *data)
1334{
1335	struct ccw_device *cdev = to_ccwdev(dev);
1336	struct ccw_device_private *priv = cdev->private;
1337	int unreg;
1338
1339	spin_lock_irq(cdev->ccwlock);
1340	unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
1341		(priv->state == DEV_STATE_OFFLINE);
1342	spin_unlock_irq(cdev->ccwlock);
1343	if (!unreg)
1344		goto out;
1345	CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1346		      priv->dev_id.devno);
1347	ccw_device_schedule_sch_unregister(cdev);
1348
1349out:
1350	/* Abort loop in case of pending signal. */
1351	if (signal_pending(current))
1352		return -EINTR;
1353
1354	return 0;
1355}
1356
1357/**
1358 * ccw_purge_blacklisted - purge unused, blacklisted devices
1359 *
1360 * Unregister all ccw devices that are offline and on the blacklist.
1361 */
1362int ccw_purge_blacklisted(void)
1363{
1364	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1365	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1366	return 0;
1367}
1368
1369void ccw_device_set_disconnected(struct ccw_device *cdev)
1370{
1371	if (!cdev)
1372		return;
1373	ccw_device_set_timeout(cdev, 0);
1374	cdev->private->flags.fake_irb = 0;
1375	cdev->private->state = DEV_STATE_DISCONNECTED;
1376	if (cdev->online)
1377		ccw_device_schedule_recovery();
1378}
1379
1380void ccw_device_set_notoper(struct ccw_device *cdev)
1381{
1382	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1383
1384	CIO_TRACE_EVENT(2, "notoper");
1385	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1386	ccw_device_set_timeout(cdev, 0);
1387	cio_disable_subchannel(sch);
1388	cdev->private->state = DEV_STATE_NOT_OPER;
1389}
1390
1391enum io_sch_action {
1392	IO_SCH_UNREG,
1393	IO_SCH_ORPH_UNREG,
1394	IO_SCH_ATTACH,
1395	IO_SCH_UNREG_ATTACH,
1396	IO_SCH_ORPH_ATTACH,
1397	IO_SCH_REPROBE,
1398	IO_SCH_VERIFY,
1399	IO_SCH_DISC,
1400	IO_SCH_NOP,
1401};
1402
1403static enum io_sch_action sch_get_action(struct subchannel *sch)
1404{
1405	struct ccw_device *cdev;
1406
1407	cdev = sch_get_cdev(sch);
1408	if (cio_update_schib(sch)) {
1409		/* Not operational. */
1410		if (!cdev)
1411			return IO_SCH_UNREG;
1412		if (!ccw_device_notify(cdev, CIO_GONE))
1413			return IO_SCH_UNREG;
1414		return IO_SCH_ORPH_UNREG;
1415	}
1416	/* Operational. */
1417	if (!cdev)
1418		return IO_SCH_ATTACH;
1419	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1420		if (!ccw_device_notify(cdev, CIO_GONE))
1421			return IO_SCH_UNREG_ATTACH;
1422		return IO_SCH_ORPH_ATTACH;
1423	}
1424	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1425		if (!ccw_device_notify(cdev, CIO_NO_PATH))
1426			return IO_SCH_UNREG;
1427		return IO_SCH_DISC;
1428	}
1429	if (device_is_disconnected(cdev))
1430		return IO_SCH_REPROBE;
1431	if (cdev->online)
1432		return IO_SCH_VERIFY;
1433	return IO_SCH_NOP;
1434}
1435
1436/**
1437 * io_subchannel_sch_event - process subchannel event
1438 * @sch: subchannel
1439 * @process: non-zero if function is called in process context
1440 *
1441 * An unspecified event occurred for this subchannel. Adjust data according
1442 * to the current operational state of the subchannel and device. Return
1443 * zero when the event has been handled sufficiently or -EAGAIN when this
1444 * function should be called again in process context.
1445 */
1446static int io_subchannel_sch_event(struct subchannel *sch, int process)
1447{
1448	unsigned long flags;
1449	struct ccw_device *cdev;
1450	struct ccw_dev_id dev_id;
1451	enum io_sch_action action;
1452	int rc = -EAGAIN;
1453
1454	spin_lock_irqsave(sch->lock, flags);
1455	if (!device_is_registered(&sch->dev))
1456		goto out_unlock;
1457	if (work_pending(&sch->todo_work))
1458		goto out_unlock;
1459	action = sch_get_action(sch);
1460	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1461		      sch->schid.ssid, sch->schid.sch_no, process,
1462		      action);
1463	/* Perform immediate actions while holding the lock. */
1464	cdev = sch_get_cdev(sch);
1465	switch (action) {
1466	case IO_SCH_REPROBE:
1467		/* Trigger device recognition. */
1468		ccw_device_trigger_reprobe(cdev);
1469		rc = 0;
1470		goto out_unlock;
1471	case IO_SCH_VERIFY:
1472		/* Trigger path verification. */
1473		io_subchannel_verify(sch);
1474		rc = 0;
1475		goto out_unlock;
1476	case IO_SCH_DISC:
1477		ccw_device_set_disconnected(cdev);
1478		rc = 0;
1479		goto out_unlock;
1480	case IO_SCH_ORPH_UNREG:
1481	case IO_SCH_ORPH_ATTACH:
1482		ccw_device_set_disconnected(cdev);
1483		break;
1484	case IO_SCH_UNREG_ATTACH:
1485	case IO_SCH_UNREG:
1486		if (cdev)
1487			ccw_device_set_notoper(cdev);
1488		break;
1489	case IO_SCH_NOP:
1490		rc = 0;
1491		goto out_unlock;
1492	default:
1493		break;
1494	}
1495	spin_unlock_irqrestore(sch->lock, flags);
1496	/* All other actions require process context. */
1497	if (!process)
1498		goto out;
1499	/* Handle attached ccw device. */
1500	switch (action) {
1501	case IO_SCH_ORPH_UNREG:
1502	case IO_SCH_ORPH_ATTACH:
1503		/* Move ccw device to orphanage. */
1504		rc = ccw_device_move_to_orph(cdev);
1505		if (rc)
1506			goto out;
1507		break;
1508	case IO_SCH_UNREG_ATTACH:
1509		/* Unregister ccw device. */
1510		ccw_device_unregister(cdev);
1511		break;
1512	default:
1513		break;
1514	}
1515	/* Handle subchannel. */
1516	switch (action) {
1517	case IO_SCH_ORPH_UNREG:
1518	case IO_SCH_UNREG:
1519		css_sch_device_unregister(sch);
1520		break;
1521	case IO_SCH_ORPH_ATTACH:
1522	case IO_SCH_UNREG_ATTACH:
1523	case IO_SCH_ATTACH:
1524		dev_id.ssid = sch->schid.ssid;
1525		dev_id.devno = sch->schib.pmcw.dev;
1526		cdev = get_ccwdev_by_dev_id(&dev_id);
1527		if (!cdev) {
1528			sch_create_and_recog_new_device(sch);
1529			break;
1530		}
1531		rc = ccw_device_move_to_sch(cdev, sch);
1532		if (rc) {
1533			/* Release reference from get_ccwdev_by_dev_id() */
1534			put_device(&cdev->dev);
1535			goto out;
1536		}
1537		spin_lock_irqsave(sch->lock, flags);
1538		ccw_device_trigger_reprobe(cdev);
1539		spin_unlock_irqrestore(sch->lock, flags);
1540		/* Release reference from get_ccwdev_by_dev_id() */
1541		put_device(&cdev->dev);
1542		break;
1543	default:
1544		break;
1545	}
1546	return 0;
1547
1548out_unlock:
1549	spin_unlock_irqrestore(sch->lock, flags);
1550out:
1551	return rc;
1552}
1553
1554#ifdef CONFIG_CCW_CONSOLE
1555static struct ccw_device console_cdev;
1556static struct ccw_device_private console_private;
1557static int console_cdev_in_use;
1558
1559static DEFINE_SPINLOCK(ccw_console_lock);
1560
1561spinlock_t * cio_get_console_lock(void)
1562{
1563	return &ccw_console_lock;
1564}
1565
1566static int ccw_device_console_enable(struct ccw_device *cdev,
1567				     struct subchannel *sch)
1568{
1569	int rc;
1570
1571	/* Attach subchannel private data. */
1572	sch->private = cio_get_console_priv();
1573	memset(sch->private, 0, sizeof(struct io_subchannel_private));
1574	io_subchannel_init_fields(sch);
1575	rc = cio_commit_config(sch);
1576	if (rc)
1577		return rc;
1578	sch->driver = &io_subchannel_driver;
1579	/* Initialize the ccw_device structure. */
1580	cdev->dev.parent= &sch->dev;
1581	rc = io_subchannel_recog(cdev, sch);
1582	if (rc)
1583		return rc;
1584
1585	/* Now wait for the async. recognition to come to an end. */
1586	spin_lock_irq(cdev->ccwlock);
1587	while (!dev_fsm_final_state(cdev))
1588		wait_cons_dev();
1589	rc = -EIO;
1590	if (cdev->private->state != DEV_STATE_OFFLINE)
1591		goto out_unlock;
1592	ccw_device_online(cdev);
1593	while (!dev_fsm_final_state(cdev))
1594		wait_cons_dev();
1595	if (cdev->private->state != DEV_STATE_ONLINE)
1596		goto out_unlock;
1597	rc = 0;
1598out_unlock:
1599	spin_unlock_irq(cdev->ccwlock);
1600	return 0;
1601}
1602
1603struct ccw_device *
1604ccw_device_probe_console(void)
1605{
1606	struct subchannel *sch;
1607	int ret;
1608
1609	if (xchg(&console_cdev_in_use, 1) != 0)
1610		return ERR_PTR(-EBUSY);
1611	sch = cio_probe_console();
1612	if (IS_ERR(sch)) {
1613		console_cdev_in_use = 0;
1614		return (void *) sch;
1615	}
1616	memset(&console_cdev, 0, sizeof(struct ccw_device));
1617	memset(&console_private, 0, sizeof(struct ccw_device_private));
1618	console_cdev.private = &console_private;
1619	console_private.cdev = &console_cdev;
1620	ret = ccw_device_console_enable(&console_cdev, sch);
1621	if (ret) {
1622		cio_release_console();
1623		console_cdev_in_use = 0;
1624		return ERR_PTR(ret);
1625	}
1626	console_cdev.online = 1;
1627	return &console_cdev;
1628}
1629
1630static int ccw_device_pm_restore(struct device *dev);
1631
1632int ccw_device_force_console(void)
1633{
1634	if (!console_cdev_in_use)
1635		return -ENODEV;
1636	return ccw_device_pm_restore(&console_cdev.dev);
1637}
1638EXPORT_SYMBOL_GPL(ccw_device_force_console);
1639#endif
1640
1641/*
1642 * get ccw_device matching the busid, but only if owned by cdrv
1643 */
1644static int
1645__ccwdev_check_busid(struct device *dev, void *id)
1646{
1647	char *bus_id;
1648
1649	bus_id = id;
1650
1651	return (strcmp(bus_id, dev_name(dev)) == 0);
1652}
1653
1654
1655/**
1656 * get_ccwdev_by_busid() - obtain device from a bus id
1657 * @cdrv: driver the device is owned by
1658 * @bus_id: bus id of the device to be searched
1659 *
1660 * This function searches all devices owned by @cdrv for a device with a bus
1661 * id matching @bus_id.
1662 * Returns:
1663 *  If a match is found, its reference count of the found device is increased
1664 *  and it is returned; else %NULL is returned.
1665 */
1666struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1667				       const char *bus_id)
1668{
1669	struct device *dev;
1670	struct device_driver *drv;
1671
1672	drv = get_driver(&cdrv->driver);
1673	if (!drv)
1674		return NULL;
1675
1676	dev = driver_find_device(drv, NULL, (void *)bus_id,
1677				 __ccwdev_check_busid);
1678	put_driver(drv);
1679
1680	return dev ? to_ccwdev(dev) : NULL;
1681}
1682
1683/************************** device driver handling ************************/
1684
1685/* This is the implementation of the ccw_driver class. The probe, remove
1686 * and release methods are initially very similar to the device_driver
1687 * implementations, with the difference that they have ccw_device
1688 * arguments.
1689 *
1690 * A ccw driver also contains the information that is needed for
1691 * device matching.
1692 */
1693static int
1694ccw_device_probe (struct device *dev)
1695{
1696	struct ccw_device *cdev = to_ccwdev(dev);
1697	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1698	int ret;
1699
1700	cdev->drv = cdrv; /* to let the driver call _set_online */
1701
1702	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1703
1704	if (ret) {
1705		cdev->drv = NULL;
1706		return ret;
1707	}
1708
1709	return 0;
1710}
1711
1712static int
1713ccw_device_remove (struct device *dev)
1714{
1715	struct ccw_device *cdev = to_ccwdev(dev);
1716	struct ccw_driver *cdrv = cdev->drv;
1717	int ret;
1718
1719	if (cdrv->remove)
1720		cdrv->remove(cdev);
1721	if (cdev->online) {
1722		cdev->online = 0;
1723		spin_lock_irq(cdev->ccwlock);
1724		ret = ccw_device_offline(cdev);
1725		spin_unlock_irq(cdev->ccwlock);
1726		if (ret == 0)
1727			wait_event(cdev->private->wait_q,
1728				   dev_fsm_final_state(cdev));
1729		else
1730			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1731				      "device 0.%x.%04x\n",
1732				      ret, cdev->private->dev_id.ssid,
1733				      cdev->private->dev_id.devno);
1734		/* Give up reference obtained in ccw_device_set_online(). */
1735		put_device(&cdev->dev);
1736	}
1737	ccw_device_set_timeout(cdev, 0);
1738	cdev->drv = NULL;
1739	return 0;
1740}
1741
1742static void ccw_device_shutdown(struct device *dev)
1743{
1744	struct ccw_device *cdev;
1745
1746	cdev = to_ccwdev(dev);
1747	if (cdev->drv && cdev->drv->shutdown)
1748		cdev->drv->shutdown(cdev);
1749	disable_cmf(cdev);
1750}
1751
1752static int ccw_device_pm_prepare(struct device *dev)
1753{
1754	struct ccw_device *cdev = to_ccwdev(dev);
1755
1756	if (work_pending(&cdev->private->kick_work))
1757		return -EAGAIN;
1758	/* Fail while device is being set online/offline. */
1759	if (atomic_read(&cdev->private->onoff))
1760		return -EAGAIN;
1761
1762	if (cdev->online && cdev->drv && cdev->drv->prepare)
1763		return cdev->drv->prepare(cdev);
1764
1765	return 0;
1766}
1767
1768static void ccw_device_pm_complete(struct device *dev)
1769{
1770	struct ccw_device *cdev = to_ccwdev(dev);
1771
1772	if (cdev->online && cdev->drv && cdev->drv->complete)
1773		cdev->drv->complete(cdev);
1774}
1775
1776static int ccw_device_pm_freeze(struct device *dev)
1777{
1778	struct ccw_device *cdev = to_ccwdev(dev);
1779	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1780	int ret, cm_enabled;
1781
1782	/* Fail suspend while device is in transistional state. */
1783	if (!dev_fsm_final_state(cdev))
1784		return -EAGAIN;
1785	if (!cdev->online)
1786		return 0;
1787	if (cdev->drv && cdev->drv->freeze) {
1788		ret = cdev->drv->freeze(cdev);
1789		if (ret)
1790			return ret;
1791	}
1792
1793	spin_lock_irq(sch->lock);
1794	cm_enabled = cdev->private->cmb != NULL;
1795	spin_unlock_irq(sch->lock);
1796	if (cm_enabled) {
1797		/* Don't have the css write on memory. */
1798		ret = ccw_set_cmf(cdev, 0);
1799		if (ret)
1800			return ret;
1801	}
1802	/* From here on, disallow device driver I/O. */
1803	spin_lock_irq(sch->lock);
1804	ret = cio_disable_subchannel(sch);
1805	spin_unlock_irq(sch->lock);
1806
1807	return ret;
1808}
1809
1810static int ccw_device_pm_thaw(struct device *dev)
1811{
1812	struct ccw_device *cdev = to_ccwdev(dev);
1813	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1814	int ret, cm_enabled;
1815
1816	if (!cdev->online)
1817		return 0;
1818
1819	spin_lock_irq(sch->lock);
1820	/* Allow device driver I/O again. */
1821	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1822	cm_enabled = cdev->private->cmb != NULL;
1823	spin_unlock_irq(sch->lock);
1824	if (ret)
1825		return ret;
1826
1827	if (cm_enabled) {
1828		ret = ccw_set_cmf(cdev, 1);
1829		if (ret)
1830			return ret;
1831	}
1832
1833	if (cdev->drv && cdev->drv->thaw)
1834		ret = cdev->drv->thaw(cdev);
1835
1836	return ret;
1837}
1838
1839static void __ccw_device_pm_restore(struct ccw_device *cdev)
1840{
1841	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1842	int ret;
1843
1844	if (cio_is_console(sch->schid))
1845		goto out;
1846	/*
1847	 * While we were sleeping, devices may have gone or become
1848	 * available again. Kick re-detection.
1849	 */
1850	spin_lock_irq(sch->lock);
1851	cdev->private->flags.resuming = 1;
1852	ret = ccw_device_recognition(cdev);
1853	spin_unlock_irq(sch->lock);
1854	if (ret) {
1855		CIO_MSG_EVENT(0, "Couldn't start recognition for device "
1856			      "0.%x.%04x (ret=%d)\n",
1857			      cdev->private->dev_id.ssid,
1858			      cdev->private->dev_id.devno, ret);
1859		spin_lock_irq(sch->lock);
1860		cdev->private->state = DEV_STATE_DISCONNECTED;
1861		spin_unlock_irq(sch->lock);
1862		/* notify driver after the resume cb */
1863		goto out;
1864	}
1865	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1866		   cdev->private->state == DEV_STATE_DISCONNECTED);
1867
1868out:
1869	cdev->private->flags.resuming = 0;
1870}
1871
1872static int resume_handle_boxed(struct ccw_device *cdev)
1873{
1874	cdev->private->state = DEV_STATE_BOXED;
1875	if (ccw_device_notify(cdev, CIO_BOXED))
1876		return 0;
1877	ccw_device_schedule_sch_unregister(cdev);
1878	return -ENODEV;
1879}
1880
1881static int resume_handle_disc(struct ccw_device *cdev)
1882{
1883	cdev->private->state = DEV_STATE_DISCONNECTED;
1884	if (ccw_device_notify(cdev, CIO_GONE))
1885		return 0;
1886	ccw_device_schedule_sch_unregister(cdev);
1887	return -ENODEV;
1888}
1889
1890static int ccw_device_pm_restore(struct device *dev)
1891{
1892	struct ccw_device *cdev = to_ccwdev(dev);
1893	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1894	int ret = 0, cm_enabled;
1895
1896	__ccw_device_pm_restore(cdev);
1897	spin_lock_irq(sch->lock);
1898	if (cio_is_console(sch->schid)) {
1899		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1900		spin_unlock_irq(sch->lock);
1901		goto out_restore;
1902	}
1903	cdev->private->flags.donotify = 0;
1904	/* check recognition results */
1905	switch (cdev->private->state) {
1906	case DEV_STATE_OFFLINE:
1907		break;
1908	case DEV_STATE_BOXED:
1909		ret = resume_handle_boxed(cdev);
1910		spin_unlock_irq(sch->lock);
1911		if (ret)
1912			goto out;
1913		goto out_restore;
1914	case DEV_STATE_DISCONNECTED:
1915		goto out_disc_unlock;
1916	default:
1917		goto out_unreg_unlock;
1918	}
1919	/* check if the device id has changed */
1920	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1921		CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1922			      "changed from %04x to %04x)\n",
1923			      sch->schid.ssid, sch->schid.sch_no,
1924			      cdev->private->dev_id.devno,
1925			      sch->schib.pmcw.dev);
1926		goto out_unreg_unlock;
1927	}
1928	/* check if the device type has changed */
1929	if (!ccw_device_test_sense_data(cdev)) {
1930		ccw_device_update_sense_data(cdev);
1931		PREPARE_WORK(&cdev->private->kick_work,
1932			     ccw_device_do_unbind_bind);
1933		queue_work(ccw_device_work, &cdev->private->kick_work);
1934		ret = -ENODEV;
1935		goto out_unlock;
1936	}
1937	if (!cdev->online) {
1938		ret = 0;
1939		goto out_unlock;
1940	}
1941	ret = ccw_device_online(cdev);
1942	if (ret)
1943		goto out_disc_unlock;
1944
1945	cm_enabled = cdev->private->cmb != NULL;
1946	spin_unlock_irq(sch->lock);
1947
1948	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1949	if (cdev->private->state != DEV_STATE_ONLINE) {
1950		spin_lock_irq(sch->lock);
1951		goto out_disc_unlock;
1952	}
1953	if (cm_enabled) {
1954		ret = ccw_set_cmf(cdev, 1);
1955		if (ret) {
1956			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1957				      "(rc=%d)\n", cdev->private->dev_id.ssid,
1958				      cdev->private->dev_id.devno, ret);
1959			ret = 0;
1960		}
1961	}
1962
1963out_restore:
1964	if (cdev->online && cdev->drv && cdev->drv->restore)
1965		ret = cdev->drv->restore(cdev);
1966out:
1967	return ret;
1968
1969out_disc_unlock:
1970	ret = resume_handle_disc(cdev);
1971	spin_unlock_irq(sch->lock);
1972	if (ret)
1973		return ret;
1974	goto out_restore;
1975
1976out_unreg_unlock:
1977	ccw_device_schedule_sch_unregister(cdev);
1978	ret = -ENODEV;
1979out_unlock:
1980	spin_unlock_irq(sch->lock);
1981	return ret;
1982}
1983
1984static struct dev_pm_ops ccw_pm_ops = {
1985	.prepare = ccw_device_pm_prepare,
1986	.complete = ccw_device_pm_complete,
1987	.freeze = ccw_device_pm_freeze,
1988	.thaw = ccw_device_pm_thaw,
1989	.restore = ccw_device_pm_restore,
1990};
1991
1992struct bus_type ccw_bus_type = {
1993	.name   = "ccw",
1994	.match  = ccw_bus_match,
1995	.uevent = ccw_uevent,
1996	.probe  = ccw_device_probe,
1997	.remove = ccw_device_remove,
1998	.shutdown = ccw_device_shutdown,
1999	.pm = &ccw_pm_ops,
2000};
2001
2002/**
2003 * ccw_driver_register() - register a ccw driver
2004 * @cdriver: driver to be registered
2005 *
2006 * This function is mainly a wrapper around driver_register().
2007 * Returns:
2008 *   %0 on success and a negative error value on failure.
2009 */
2010int ccw_driver_register(struct ccw_driver *cdriver)
2011{
2012	struct device_driver *drv = &cdriver->driver;
2013
2014	drv->bus = &ccw_bus_type;
2015	drv->name = cdriver->name;
2016	drv->owner = cdriver->owner;
2017
2018	return driver_register(drv);
2019}
2020
2021/**
2022 * ccw_driver_unregister() - deregister a ccw driver
2023 * @cdriver: driver to be deregistered
2024 *
2025 * This function is mainly a wrapper around driver_unregister().
2026 */
2027void ccw_driver_unregister(struct ccw_driver *cdriver)
2028{
2029	driver_unregister(&cdriver->driver);
2030}
2031
2032/* Helper func for qdio. */
2033struct subchannel_id
2034ccw_device_get_subchannel_id(struct ccw_device *cdev)
2035{
2036	struct subchannel *sch;
2037
2038	sch = to_subchannel(cdev->dev.parent);
2039	return sch->schid;
2040}
2041
2042MODULE_LICENSE("GPL");
2043EXPORT_SYMBOL(ccw_device_set_online);
2044EXPORT_SYMBOL(ccw_device_set_offline);
2045EXPORT_SYMBOL(ccw_driver_register);
2046EXPORT_SYMBOL(ccw_driver_unregister);
2047EXPORT_SYMBOL(get_ccwdev_by_busid);
2048EXPORT_SYMBOL(ccw_bus_type);
2049EXPORT_SYMBOL(ccw_device_work);
2050EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
2051