[go: nahoru, domu]

1/*
2   drbd_state.c
3
4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10   Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11   from Logicworks, Inc. for making SDP replication support possible.
12
13   drbd is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2, or (at your option)
16   any later version.
17
18   drbd is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with drbd; see the file COPYING.  If not, write to
25   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/drbd_limits.h>
29#include "drbd_int.h"
30#include "drbd_protocol.h"
31#include "drbd_req.h"
32
33struct after_state_chg_work {
34	struct drbd_work w;
35	struct drbd_device *device;
36	union drbd_state os;
37	union drbd_state ns;
38	enum chg_state_flags flags;
39	struct completion *done;
40};
41
42enum sanitize_state_warnings {
43	NO_WARNING,
44	ABORTED_ONLINE_VERIFY,
45	ABORTED_RESYNC,
46	CONNECTION_LOST_NEGOTIATING,
47	IMPLICITLY_UPGRADED_DISK,
48	IMPLICITLY_UPGRADED_PDSK,
49};
50
51static int w_after_state_ch(struct drbd_work *w, int unused);
52static void after_state_ch(struct drbd_device *device, union drbd_state os,
53			   union drbd_state ns, enum chg_state_flags flags);
54static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
55static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
56static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
57static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
58				       union drbd_state ns, enum sanitize_state_warnings *warn);
59
60static inline bool is_susp(union drbd_state s)
61{
62        return s.susp || s.susp_nod || s.susp_fen;
63}
64
65bool conn_all_vols_unconf(struct drbd_connection *connection)
66{
67	struct drbd_peer_device *peer_device;
68	bool rv = true;
69	int vnr;
70
71	rcu_read_lock();
72	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
73		struct drbd_device *device = peer_device->device;
74		if (device->state.disk != D_DISKLESS ||
75		    device->state.conn != C_STANDALONE ||
76		    device->state.role != R_SECONDARY) {
77			rv = false;
78			break;
79		}
80	}
81	rcu_read_unlock();
82
83	return rv;
84}
85
86/* Unfortunately the states where not correctly ordered, when
87   they where defined. therefore can not use max_t() here. */
88static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
89{
90	if (role1 == R_PRIMARY || role2 == R_PRIMARY)
91		return R_PRIMARY;
92	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
93		return R_SECONDARY;
94	return R_UNKNOWN;
95}
96static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
97{
98	if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
99		return R_UNKNOWN;
100	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
101		return R_SECONDARY;
102	return R_PRIMARY;
103}
104
105enum drbd_role conn_highest_role(struct drbd_connection *connection)
106{
107	enum drbd_role role = R_UNKNOWN;
108	struct drbd_peer_device *peer_device;
109	int vnr;
110
111	rcu_read_lock();
112	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
113		struct drbd_device *device = peer_device->device;
114		role = max_role(role, device->state.role);
115	}
116	rcu_read_unlock();
117
118	return role;
119}
120
121enum drbd_role conn_highest_peer(struct drbd_connection *connection)
122{
123	enum drbd_role peer = R_UNKNOWN;
124	struct drbd_peer_device *peer_device;
125	int vnr;
126
127	rcu_read_lock();
128	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
129		struct drbd_device *device = peer_device->device;
130		peer = max_role(peer, device->state.peer);
131	}
132	rcu_read_unlock();
133
134	return peer;
135}
136
137enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
138{
139	enum drbd_disk_state disk_state = D_DISKLESS;
140	struct drbd_peer_device *peer_device;
141	int vnr;
142
143	rcu_read_lock();
144	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
145		struct drbd_device *device = peer_device->device;
146		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
147	}
148	rcu_read_unlock();
149
150	return disk_state;
151}
152
153enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
154{
155	enum drbd_disk_state disk_state = D_MASK;
156	struct drbd_peer_device *peer_device;
157	int vnr;
158
159	rcu_read_lock();
160	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
161		struct drbd_device *device = peer_device->device;
162		disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
163	}
164	rcu_read_unlock();
165
166	return disk_state;
167}
168
169enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
170{
171	enum drbd_disk_state disk_state = D_DISKLESS;
172	struct drbd_peer_device *peer_device;
173	int vnr;
174
175	rcu_read_lock();
176	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
177		struct drbd_device *device = peer_device->device;
178		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
179	}
180	rcu_read_unlock();
181
182	return disk_state;
183}
184
185enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
186{
187	enum drbd_conns conn = C_MASK;
188	struct drbd_peer_device *peer_device;
189	int vnr;
190
191	rcu_read_lock();
192	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
193		struct drbd_device *device = peer_device->device;
194		conn = min_t(enum drbd_conns, conn, device->state.conn);
195	}
196	rcu_read_unlock();
197
198	return conn;
199}
200
201static bool no_peer_wf_report_params(struct drbd_connection *connection)
202{
203	struct drbd_peer_device *peer_device;
204	int vnr;
205	bool rv = true;
206
207	rcu_read_lock();
208	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
209		if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
210			rv = false;
211			break;
212		}
213	rcu_read_unlock();
214
215	return rv;
216}
217
218
219/**
220 * cl_wide_st_chg() - true if the state change is a cluster wide one
221 * @device:	DRBD device.
222 * @os:		old (current) state.
223 * @ns:		new (wanted) state.
224 */
225static int cl_wide_st_chg(struct drbd_device *device,
226			  union drbd_state os, union drbd_state ns)
227{
228	return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
229		 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
230		  (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
231		  (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
232		  (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
233		(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
234		(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
235		(os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
236}
237
238static union drbd_state
239apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
240{
241	union drbd_state ns;
242	ns.i = (os.i & ~mask.i) | val.i;
243	return ns;
244}
245
246enum drbd_state_rv
247drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
248		  union drbd_state mask, union drbd_state val)
249{
250	unsigned long flags;
251	union drbd_state ns;
252	enum drbd_state_rv rv;
253
254	spin_lock_irqsave(&device->resource->req_lock, flags);
255	ns = apply_mask_val(drbd_read_state(device), mask, val);
256	rv = _drbd_set_state(device, ns, f, NULL);
257	spin_unlock_irqrestore(&device->resource->req_lock, flags);
258
259	return rv;
260}
261
262/**
263 * drbd_force_state() - Impose a change which happens outside our control on our state
264 * @device:	DRBD device.
265 * @mask:	mask of state bits to change.
266 * @val:	value of new state bits.
267 */
268void drbd_force_state(struct drbd_device *device,
269	union drbd_state mask, union drbd_state val)
270{
271	drbd_change_state(device, CS_HARD, mask, val);
272}
273
274static enum drbd_state_rv
275_req_st_cond(struct drbd_device *device, union drbd_state mask,
276	     union drbd_state val)
277{
278	union drbd_state os, ns;
279	unsigned long flags;
280	enum drbd_state_rv rv;
281
282	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
283		return SS_CW_SUCCESS;
284
285	if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
286		return SS_CW_FAILED_BY_PEER;
287
288	spin_lock_irqsave(&device->resource->req_lock, flags);
289	os = drbd_read_state(device);
290	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
291	rv = is_valid_transition(os, ns);
292	if (rv >= SS_SUCCESS)
293		rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
294
295	if (!cl_wide_st_chg(device, os, ns))
296		rv = SS_CW_NO_NEED;
297	if (rv == SS_UNKNOWN_ERROR) {
298		rv = is_valid_state(device, ns);
299		if (rv >= SS_SUCCESS) {
300			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
301			if (rv >= SS_SUCCESS)
302				rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
303		}
304	}
305	spin_unlock_irqrestore(&device->resource->req_lock, flags);
306
307	return rv;
308}
309
310/**
311 * drbd_req_state() - Perform an eventually cluster wide state change
312 * @device:	DRBD device.
313 * @mask:	mask of state bits to change.
314 * @val:	value of new state bits.
315 * @f:		flags
316 *
317 * Should not be called directly, use drbd_request_state() or
318 * _drbd_request_state().
319 */
320static enum drbd_state_rv
321drbd_req_state(struct drbd_device *device, union drbd_state mask,
322	       union drbd_state val, enum chg_state_flags f)
323{
324	struct completion done;
325	unsigned long flags;
326	union drbd_state os, ns;
327	enum drbd_state_rv rv;
328
329	init_completion(&done);
330
331	if (f & CS_SERIALIZE)
332		mutex_lock(device->state_mutex);
333
334	spin_lock_irqsave(&device->resource->req_lock, flags);
335	os = drbd_read_state(device);
336	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
337	rv = is_valid_transition(os, ns);
338	if (rv < SS_SUCCESS) {
339		spin_unlock_irqrestore(&device->resource->req_lock, flags);
340		goto abort;
341	}
342
343	if (cl_wide_st_chg(device, os, ns)) {
344		rv = is_valid_state(device, ns);
345		if (rv == SS_SUCCESS)
346			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
347		spin_unlock_irqrestore(&device->resource->req_lock, flags);
348
349		if (rv < SS_SUCCESS) {
350			if (f & CS_VERBOSE)
351				print_st_err(device, os, ns, rv);
352			goto abort;
353		}
354
355		if (drbd_send_state_req(first_peer_device(device), mask, val)) {
356			rv = SS_CW_FAILED_BY_PEER;
357			if (f & CS_VERBOSE)
358				print_st_err(device, os, ns, rv);
359			goto abort;
360		}
361
362		wait_event(device->state_wait,
363			(rv = _req_st_cond(device, mask, val)));
364
365		if (rv < SS_SUCCESS) {
366			if (f & CS_VERBOSE)
367				print_st_err(device, os, ns, rv);
368			goto abort;
369		}
370		spin_lock_irqsave(&device->resource->req_lock, flags);
371		ns = apply_mask_val(drbd_read_state(device), mask, val);
372		rv = _drbd_set_state(device, ns, f, &done);
373	} else {
374		rv = _drbd_set_state(device, ns, f, &done);
375	}
376
377	spin_unlock_irqrestore(&device->resource->req_lock, flags);
378
379	if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
380		D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
381		wait_for_completion(&done);
382	}
383
384abort:
385	if (f & CS_SERIALIZE)
386		mutex_unlock(device->state_mutex);
387
388	return rv;
389}
390
391/**
392 * _drbd_request_state() - Request a state change (with flags)
393 * @device:	DRBD device.
394 * @mask:	mask of state bits to change.
395 * @val:	value of new state bits.
396 * @f:		flags
397 *
398 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
399 * flag, or when logging of failed state change requests is not desired.
400 */
401enum drbd_state_rv
402_drbd_request_state(struct drbd_device *device, union drbd_state mask,
403		    union drbd_state val, enum chg_state_flags f)
404{
405	enum drbd_state_rv rv;
406
407	wait_event(device->state_wait,
408		   (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
409
410	return rv;
411}
412
413static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
414{
415	drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
416	    name,
417	    drbd_conn_str(ns.conn),
418	    drbd_role_str(ns.role),
419	    drbd_role_str(ns.peer),
420	    drbd_disk_str(ns.disk),
421	    drbd_disk_str(ns.pdsk),
422	    is_susp(ns) ? 's' : 'r',
423	    ns.aftr_isp ? 'a' : '-',
424	    ns.peer_isp ? 'p' : '-',
425	    ns.user_isp ? 'u' : '-',
426	    ns.susp_fen ? 'F' : '-',
427	    ns.susp_nod ? 'N' : '-'
428	    );
429}
430
431void print_st_err(struct drbd_device *device, union drbd_state os,
432	          union drbd_state ns, enum drbd_state_rv err)
433{
434	if (err == SS_IN_TRANSIENT_STATE)
435		return;
436	drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
437	print_st(device, " state", os);
438	print_st(device, "wanted", ns);
439}
440
441static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
442			       enum chg_state_flags flags)
443{
444	char *pbp;
445	pbp = pb;
446	*pbp = 0;
447
448	if (ns.role != os.role && flags & CS_DC_ROLE)
449		pbp += sprintf(pbp, "role( %s -> %s ) ",
450			       drbd_role_str(os.role),
451			       drbd_role_str(ns.role));
452	if (ns.peer != os.peer && flags & CS_DC_PEER)
453		pbp += sprintf(pbp, "peer( %s -> %s ) ",
454			       drbd_role_str(os.peer),
455			       drbd_role_str(ns.peer));
456	if (ns.conn != os.conn && flags & CS_DC_CONN)
457		pbp += sprintf(pbp, "conn( %s -> %s ) ",
458			       drbd_conn_str(os.conn),
459			       drbd_conn_str(ns.conn));
460	if (ns.disk != os.disk && flags & CS_DC_DISK)
461		pbp += sprintf(pbp, "disk( %s -> %s ) ",
462			       drbd_disk_str(os.disk),
463			       drbd_disk_str(ns.disk));
464	if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
465		pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
466			       drbd_disk_str(os.pdsk),
467			       drbd_disk_str(ns.pdsk));
468
469	return pbp - pb;
470}
471
472static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
473				 enum chg_state_flags flags)
474{
475	char pb[300];
476	char *pbp = pb;
477
478	pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
479
480	if (ns.aftr_isp != os.aftr_isp)
481		pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
482			       os.aftr_isp,
483			       ns.aftr_isp);
484	if (ns.peer_isp != os.peer_isp)
485		pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
486			       os.peer_isp,
487			       ns.peer_isp);
488	if (ns.user_isp != os.user_isp)
489		pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
490			       os.user_isp,
491			       ns.user_isp);
492
493	if (pbp != pb)
494		drbd_info(device, "%s\n", pb);
495}
496
497static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
498				 enum chg_state_flags flags)
499{
500	char pb[300];
501	char *pbp = pb;
502
503	pbp += print_state_change(pbp, os, ns, flags);
504
505	if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
506		pbp += sprintf(pbp, "susp( %d -> %d ) ",
507			       is_susp(os),
508			       is_susp(ns));
509
510	if (pbp != pb)
511		drbd_info(connection, "%s\n", pb);
512}
513
514
515/**
516 * is_valid_state() - Returns an SS_ error code if ns is not valid
517 * @device:	DRBD device.
518 * @ns:		State to consider.
519 */
520static enum drbd_state_rv
521is_valid_state(struct drbd_device *device, union drbd_state ns)
522{
523	/* See drbd_state_sw_errors in drbd_strings.c */
524
525	enum drbd_fencing_p fp;
526	enum drbd_state_rv rv = SS_SUCCESS;
527	struct net_conf *nc;
528
529	rcu_read_lock();
530	fp = FP_DONT_CARE;
531	if (get_ldev(device)) {
532		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
533		put_ldev(device);
534	}
535
536	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
537	if (nc) {
538		if (!nc->two_primaries && ns.role == R_PRIMARY) {
539			if (ns.peer == R_PRIMARY)
540				rv = SS_TWO_PRIMARIES;
541			else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
542				rv = SS_O_VOL_PEER_PRI;
543		}
544	}
545
546	if (rv <= 0)
547		/* already found a reason to abort */;
548	else if (ns.role == R_SECONDARY && device->open_cnt)
549		rv = SS_DEVICE_IN_USE;
550
551	else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
552		rv = SS_NO_UP_TO_DATE_DISK;
553
554	else if (fp >= FP_RESOURCE &&
555		 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
556		rv = SS_PRIMARY_NOP;
557
558	else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
559		rv = SS_NO_UP_TO_DATE_DISK;
560
561	else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
562		rv = SS_NO_LOCAL_DISK;
563
564	else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
565		rv = SS_NO_REMOTE_DISK;
566
567	else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
568		rv = SS_NO_UP_TO_DATE_DISK;
569
570	else if ((ns.conn == C_CONNECTED ||
571		  ns.conn == C_WF_BITMAP_S ||
572		  ns.conn == C_SYNC_SOURCE ||
573		  ns.conn == C_PAUSED_SYNC_S) &&
574		  ns.disk == D_OUTDATED)
575		rv = SS_CONNECTED_OUTDATES;
576
577	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
578		 (nc->verify_alg[0] == 0))
579		rv = SS_NO_VERIFY_ALG;
580
581	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
582		  first_peer_device(device)->connection->agreed_pro_version < 88)
583		rv = SS_NOT_SUPPORTED;
584
585	else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
586		rv = SS_NO_UP_TO_DATE_DISK;
587
588	else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
589                 ns.pdsk == D_UNKNOWN)
590		rv = SS_NEED_CONNECTION;
591
592	else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
593		rv = SS_CONNECTED_OUTDATES;
594
595	rcu_read_unlock();
596
597	return rv;
598}
599
600/**
601 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
602 * This function limits state transitions that may be declined by DRBD. I.e.
603 * user requests (aka soft transitions).
604 * @device:	DRBD device.
605 * @ns:		new state.
606 * @os:		old state.
607 */
608static enum drbd_state_rv
609is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
610{
611	enum drbd_state_rv rv = SS_SUCCESS;
612
613	if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
614	    os.conn > C_CONNECTED)
615		rv = SS_RESYNC_RUNNING;
616
617	if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
618		rv = SS_ALREADY_STANDALONE;
619
620	if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
621		rv = SS_IS_DISKLESS;
622
623	if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
624		rv = SS_NO_NET_CONFIG;
625
626	if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
627		rv = SS_LOWER_THAN_OUTDATED;
628
629	if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
630		rv = SS_IN_TRANSIENT_STATE;
631
632	/* if (ns.c os.conn && ns.c C_WF_REPORT_PARAMS)
633	   rv = SS_IN_TRANSIENT_STATE; */
634
635	/* While establishing a connection only allow cstate to change.
636	   Delay/refuse role changes, detach attach etc... */
637	if (test_bit(STATE_SENT, &connection->flags) &&
638	    !(os.conn == C_WF_REPORT_PARAMS ||
639	      (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
640		rv = SS_IN_TRANSIENT_STATE;
641
642	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
643		rv = SS_NEED_CONNECTION;
644
645	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
646	    ns.conn != os.conn && os.conn > C_CONNECTED)
647		rv = SS_RESYNC_RUNNING;
648
649	if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
650	    os.conn < C_CONNECTED)
651		rv = SS_NEED_CONNECTION;
652
653	if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
654	    && os.conn < C_WF_REPORT_PARAMS)
655		rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
656
657	if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
658	    os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
659		rv = SS_OUTDATE_WO_CONN;
660
661	return rv;
662}
663
664static enum drbd_state_rv
665is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
666{
667	/* no change -> nothing to do, at least for the connection part */
668	if (oc == nc)
669		return SS_NOTHING_TO_DO;
670
671	/* disconnect of an unconfigured connection does not make sense */
672	if (oc == C_STANDALONE && nc == C_DISCONNECTING)
673		return SS_ALREADY_STANDALONE;
674
675	/* from C_STANDALONE, we start with C_UNCONNECTED */
676	if (oc == C_STANDALONE && nc != C_UNCONNECTED)
677		return SS_NEED_CONNECTION;
678
679	/* When establishing a connection we need to go through WF_REPORT_PARAMS!
680	   Necessary to do the right thing upon invalidate-remote on a disconnected resource */
681	if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
682		return SS_NEED_CONNECTION;
683
684	/* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
685	if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
686		return SS_IN_TRANSIENT_STATE;
687
688	/* After C_DISCONNECTING only C_STANDALONE may follow */
689	if (oc == C_DISCONNECTING && nc != C_STANDALONE)
690		return SS_IN_TRANSIENT_STATE;
691
692	return SS_SUCCESS;
693}
694
695
696/**
697 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
698 * This limits hard state transitions. Hard state transitions are facts there are
699 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
700 * But those hard state transitions are still not allowed to do everything.
701 * @ns:		new state.
702 * @os:		old state.
703 */
704static enum drbd_state_rv
705is_valid_transition(union drbd_state os, union drbd_state ns)
706{
707	enum drbd_state_rv rv;
708
709	rv = is_valid_conn_transition(os.conn, ns.conn);
710
711	/* we cannot fail (again) if we already detached */
712	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
713		rv = SS_IS_DISKLESS;
714
715	return rv;
716}
717
718static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
719{
720	static const char *msg_table[] = {
721		[NO_WARNING] = "",
722		[ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
723		[ABORTED_RESYNC] = "Resync aborted.",
724		[CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
725		[IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
726		[IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
727	};
728
729	if (warn != NO_WARNING)
730		drbd_warn(device, "%s\n", msg_table[warn]);
731}
732
733/**
734 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
735 * @device:	DRBD device.
736 * @os:		old state.
737 * @ns:		new state.
738 * @warn_sync_abort:
739 *
740 * When we loose connection, we have to set the state of the peers disk (pdsk)
741 * to D_UNKNOWN. This rule and many more along those lines are in this function.
742 */
743static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
744				       union drbd_state ns, enum sanitize_state_warnings *warn)
745{
746	enum drbd_fencing_p fp;
747	enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
748
749	if (warn)
750		*warn = NO_WARNING;
751
752	fp = FP_DONT_CARE;
753	if (get_ldev(device)) {
754		rcu_read_lock();
755		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
756		rcu_read_unlock();
757		put_ldev(device);
758	}
759
760	/* Implications from connection to peer and peer_isp */
761	if (ns.conn < C_CONNECTED) {
762		ns.peer_isp = 0;
763		ns.peer = R_UNKNOWN;
764		if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
765			ns.pdsk = D_UNKNOWN;
766	}
767
768	/* Clear the aftr_isp when becoming unconfigured */
769	if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
770		ns.aftr_isp = 0;
771
772	/* An implication of the disk states onto the connection state */
773	/* Abort resync if a disk fails/detaches */
774	if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
775		if (warn)
776			*warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
777				ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
778		ns.conn = C_CONNECTED;
779	}
780
781	/* Connection breaks down before we finished "Negotiating" */
782	if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
783	    get_ldev_if_state(device, D_NEGOTIATING)) {
784		if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
785			ns.disk = device->new_state_tmp.disk;
786			ns.pdsk = device->new_state_tmp.pdsk;
787		} else {
788			if (warn)
789				*warn = CONNECTION_LOST_NEGOTIATING;
790			ns.disk = D_DISKLESS;
791			ns.pdsk = D_UNKNOWN;
792		}
793		put_ldev(device);
794	}
795
796	/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
797	if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
798		if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
799			ns.disk = D_UP_TO_DATE;
800		if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
801			ns.pdsk = D_UP_TO_DATE;
802	}
803
804	/* Implications of the connection stat on the disk states */
805	disk_min = D_DISKLESS;
806	disk_max = D_UP_TO_DATE;
807	pdsk_min = D_INCONSISTENT;
808	pdsk_max = D_UNKNOWN;
809	switch ((enum drbd_conns)ns.conn) {
810	case C_WF_BITMAP_T:
811	case C_PAUSED_SYNC_T:
812	case C_STARTING_SYNC_T:
813	case C_WF_SYNC_UUID:
814	case C_BEHIND:
815		disk_min = D_INCONSISTENT;
816		disk_max = D_OUTDATED;
817		pdsk_min = D_UP_TO_DATE;
818		pdsk_max = D_UP_TO_DATE;
819		break;
820	case C_VERIFY_S:
821	case C_VERIFY_T:
822		disk_min = D_UP_TO_DATE;
823		disk_max = D_UP_TO_DATE;
824		pdsk_min = D_UP_TO_DATE;
825		pdsk_max = D_UP_TO_DATE;
826		break;
827	case C_CONNECTED:
828		disk_min = D_DISKLESS;
829		disk_max = D_UP_TO_DATE;
830		pdsk_min = D_DISKLESS;
831		pdsk_max = D_UP_TO_DATE;
832		break;
833	case C_WF_BITMAP_S:
834	case C_PAUSED_SYNC_S:
835	case C_STARTING_SYNC_S:
836	case C_AHEAD:
837		disk_min = D_UP_TO_DATE;
838		disk_max = D_UP_TO_DATE;
839		pdsk_min = D_INCONSISTENT;
840		pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
841		break;
842	case C_SYNC_TARGET:
843		disk_min = D_INCONSISTENT;
844		disk_max = D_INCONSISTENT;
845		pdsk_min = D_UP_TO_DATE;
846		pdsk_max = D_UP_TO_DATE;
847		break;
848	case C_SYNC_SOURCE:
849		disk_min = D_UP_TO_DATE;
850		disk_max = D_UP_TO_DATE;
851		pdsk_min = D_INCONSISTENT;
852		pdsk_max = D_INCONSISTENT;
853		break;
854	case C_STANDALONE:
855	case C_DISCONNECTING:
856	case C_UNCONNECTED:
857	case C_TIMEOUT:
858	case C_BROKEN_PIPE:
859	case C_NETWORK_FAILURE:
860	case C_PROTOCOL_ERROR:
861	case C_TEAR_DOWN:
862	case C_WF_CONNECTION:
863	case C_WF_REPORT_PARAMS:
864	case C_MASK:
865		break;
866	}
867	if (ns.disk > disk_max)
868		ns.disk = disk_max;
869
870	if (ns.disk < disk_min) {
871		if (warn)
872			*warn = IMPLICITLY_UPGRADED_DISK;
873		ns.disk = disk_min;
874	}
875	if (ns.pdsk > pdsk_max)
876		ns.pdsk = pdsk_max;
877
878	if (ns.pdsk < pdsk_min) {
879		if (warn)
880			*warn = IMPLICITLY_UPGRADED_PDSK;
881		ns.pdsk = pdsk_min;
882	}
883
884	if (fp == FP_STONITH &&
885	    (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
886	    !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
887		ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
888
889	if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
890	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
891	    !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
892		ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
893
894	if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
895		if (ns.conn == C_SYNC_SOURCE)
896			ns.conn = C_PAUSED_SYNC_S;
897		if (ns.conn == C_SYNC_TARGET)
898			ns.conn = C_PAUSED_SYNC_T;
899	} else {
900		if (ns.conn == C_PAUSED_SYNC_S)
901			ns.conn = C_SYNC_SOURCE;
902		if (ns.conn == C_PAUSED_SYNC_T)
903			ns.conn = C_SYNC_TARGET;
904	}
905
906	return ns;
907}
908
909void drbd_resume_al(struct drbd_device *device)
910{
911	if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
912		drbd_info(device, "Resumed AL updates\n");
913}
914
915/* helper for __drbd_set_state */
916static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
917{
918	if (first_peer_device(device)->connection->agreed_pro_version < 90)
919		device->ov_start_sector = 0;
920	device->rs_total = drbd_bm_bits(device);
921	device->ov_position = 0;
922	if (cs == C_VERIFY_T) {
923		/* starting online verify from an arbitrary position
924		 * does not fit well into the existing protocol.
925		 * on C_VERIFY_T, we initialize ov_left and friends
926		 * implicitly in receive_DataRequest once the
927		 * first P_OV_REQUEST is received */
928		device->ov_start_sector = ~(sector_t)0;
929	} else {
930		unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
931		if (bit >= device->rs_total) {
932			device->ov_start_sector =
933				BM_BIT_TO_SECT(device->rs_total - 1);
934			device->rs_total = 1;
935		} else
936			device->rs_total -= bit;
937		device->ov_position = device->ov_start_sector;
938	}
939	device->ov_left = device->rs_total;
940}
941
942/**
943 * __drbd_set_state() - Set a new DRBD state
944 * @device:	DRBD device.
945 * @ns:		new state.
946 * @flags:	Flags
947 * @done:	Optional completion, that will get completed after the after_state_ch() finished
948 *
949 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
950 */
951enum drbd_state_rv
952__drbd_set_state(struct drbd_device *device, union drbd_state ns,
953	         enum chg_state_flags flags, struct completion *done)
954{
955	struct drbd_peer_device *peer_device = first_peer_device(device);
956	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
957	union drbd_state os;
958	enum drbd_state_rv rv = SS_SUCCESS;
959	enum sanitize_state_warnings ssw;
960	struct after_state_chg_work *ascw;
961
962	os = drbd_read_state(device);
963
964	ns = sanitize_state(device, os, ns, &ssw);
965	if (ns.i == os.i)
966		return SS_NOTHING_TO_DO;
967
968	rv = is_valid_transition(os, ns);
969	if (rv < SS_SUCCESS)
970		return rv;
971
972	if (!(flags & CS_HARD)) {
973		/*  pre-state-change checks ; only look at ns  */
974		/* See drbd_state_sw_errors in drbd_strings.c */
975
976		rv = is_valid_state(device, ns);
977		if (rv < SS_SUCCESS) {
978			/* If the old state was illegal as well, then let
979			   this happen...*/
980
981			if (is_valid_state(device, os) == rv)
982				rv = is_valid_soft_transition(os, ns, connection);
983		} else
984			rv = is_valid_soft_transition(os, ns, connection);
985	}
986
987	if (rv < SS_SUCCESS) {
988		if (flags & CS_VERBOSE)
989			print_st_err(device, os, ns, rv);
990		return rv;
991	}
992
993	print_sanitize_warnings(device, ssw);
994
995	drbd_pr_state_change(device, os, ns, flags);
996
997	/* Display changes to the susp* flags that where caused by the call to
998	   sanitize_state(). Only display it here if we where not called from
999	   _conn_request_state() */
1000	if (!(flags & CS_DC_SUSP))
1001		conn_pr_state_change(connection, os, ns,
1002				     (flags & ~CS_DC_MASK) | CS_DC_SUSP);
1003
1004	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1005	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1006	 * drbd_ldev_destroy() won't happen before our corresponding
1007	 * after_state_ch works run, where we put_ldev again. */
1008	if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1009	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1010		atomic_inc(&device->local_cnt);
1011
1012	if (!is_sync_state(os.conn) && is_sync_state(ns.conn))
1013		clear_bit(RS_DONE, &device->flags);
1014
1015	/* changes to local_cnt and device flags should be visible before
1016	 * changes to state, which again should be visible before anything else
1017	 * depending on that change happens. */
1018	smp_wmb();
1019	device->state.i = ns.i;
1020	device->resource->susp = ns.susp;
1021	device->resource->susp_nod = ns.susp_nod;
1022	device->resource->susp_fen = ns.susp_fen;
1023	smp_wmb();
1024
1025	/* put replicated vs not-replicated requests in seperate epochs */
1026	if (drbd_should_do_remote((union drbd_dev_state)os.i) !=
1027	    drbd_should_do_remote((union drbd_dev_state)ns.i))
1028		start_new_tl_epoch(connection);
1029
1030	if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1031		drbd_print_uuids(device, "attached to UUIDs");
1032
1033	/* Wake up role changes, that were delayed because of connection establishing */
1034	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
1035	    no_peer_wf_report_params(connection))
1036		clear_bit(STATE_SENT, &connection->flags);
1037
1038	wake_up(&device->misc_wait);
1039	wake_up(&device->state_wait);
1040	wake_up(&connection->ping_wait);
1041
1042	/* Aborted verify run, or we reached the stop sector.
1043	 * Log the last position, unless end-of-device. */
1044	if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1045	    ns.conn <= C_CONNECTED) {
1046		device->ov_start_sector =
1047			BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
1048		if (device->ov_left)
1049			drbd_info(device, "Online Verify reached sector %llu\n",
1050				(unsigned long long)device->ov_start_sector);
1051	}
1052
1053	if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1054	    (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1055		drbd_info(device, "Syncer continues.\n");
1056		device->rs_paused += (long)jiffies
1057				  -(long)device->rs_mark_time[device->rs_last_mark];
1058		if (ns.conn == C_SYNC_TARGET)
1059			mod_timer(&device->resync_timer, jiffies);
1060	}
1061
1062	if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1063	    (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1064		drbd_info(device, "Resync suspended\n");
1065		device->rs_mark_time[device->rs_last_mark] = jiffies;
1066	}
1067
1068	if (os.conn == C_CONNECTED &&
1069	    (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1070		unsigned long now = jiffies;
1071		int i;
1072
1073		set_ov_position(device, ns.conn);
1074		device->rs_start = now;
1075		device->rs_last_events = 0;
1076		device->rs_last_sect_ev = 0;
1077		device->ov_last_oos_size = 0;
1078		device->ov_last_oos_start = 0;
1079
1080		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1081			device->rs_mark_left[i] = device->ov_left;
1082			device->rs_mark_time[i] = now;
1083		}
1084
1085		drbd_rs_controller_reset(device);
1086
1087		if (ns.conn == C_VERIFY_S) {
1088			drbd_info(device, "Starting Online Verify from sector %llu\n",
1089					(unsigned long long)device->ov_position);
1090			mod_timer(&device->resync_timer, jiffies);
1091		}
1092	}
1093
1094	if (get_ldev(device)) {
1095		u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1096						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1097						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1098
1099		mdf &= ~MDF_AL_CLEAN;
1100		if (test_bit(CRASHED_PRIMARY, &device->flags))
1101			mdf |= MDF_CRASHED_PRIMARY;
1102		if (device->state.role == R_PRIMARY ||
1103		    (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
1104			mdf |= MDF_PRIMARY_IND;
1105		if (device->state.conn > C_WF_REPORT_PARAMS)
1106			mdf |= MDF_CONNECTED_IND;
1107		if (device->state.disk > D_INCONSISTENT)
1108			mdf |= MDF_CONSISTENT;
1109		if (device->state.disk > D_OUTDATED)
1110			mdf |= MDF_WAS_UP_TO_DATE;
1111		if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
1112			mdf |= MDF_PEER_OUT_DATED;
1113		if (mdf != device->ldev->md.flags) {
1114			device->ldev->md.flags = mdf;
1115			drbd_md_mark_dirty(device);
1116		}
1117		if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1118			drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
1119		put_ldev(device);
1120	}
1121
1122	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1123	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1124	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1125		set_bit(CONSIDER_RESYNC, &device->flags);
1126
1127	/* Receiver should clean up itself */
1128	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1129		drbd_thread_stop_nowait(&connection->receiver);
1130
1131	/* Now the receiver finished cleaning up itself, it should die */
1132	if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1133		drbd_thread_stop_nowait(&connection->receiver);
1134
1135	/* Upon network failure, we need to restart the receiver. */
1136	if (os.conn > C_WF_CONNECTION &&
1137	    ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1138		drbd_thread_restart_nowait(&connection->receiver);
1139
1140	/* Resume AL writing if we get a connection */
1141	if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1142		drbd_resume_al(device);
1143		connection->connect_cnt++;
1144	}
1145
1146	/* remember last attach time so request_timer_fn() won't
1147	 * kill newly established sessions while we are still trying to thaw
1148	 * previously frozen IO */
1149	if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1150	    ns.disk > D_NEGOTIATING)
1151		device->last_reattach_jif = jiffies;
1152
1153	ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1154	if (ascw) {
1155		ascw->os = os;
1156		ascw->ns = ns;
1157		ascw->flags = flags;
1158		ascw->w.cb = w_after_state_ch;
1159		ascw->device = device;
1160		ascw->done = done;
1161		drbd_queue_work(&connection->sender_work,
1162				&ascw->w);
1163	} else {
1164		drbd_err(device, "Could not kmalloc an ascw\n");
1165	}
1166
1167	return rv;
1168}
1169
1170static int w_after_state_ch(struct drbd_work *w, int unused)
1171{
1172	struct after_state_chg_work *ascw =
1173		container_of(w, struct after_state_chg_work, w);
1174	struct drbd_device *device = ascw->device;
1175
1176	after_state_ch(device, ascw->os, ascw->ns, ascw->flags);
1177	if (ascw->flags & CS_WAIT_COMPLETE)
1178		complete(ascw->done);
1179	kfree(ascw);
1180
1181	return 0;
1182}
1183
1184static void abw_start_sync(struct drbd_device *device, int rv)
1185{
1186	if (rv) {
1187		drbd_err(device, "Writing the bitmap failed not starting resync.\n");
1188		_drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
1189		return;
1190	}
1191
1192	switch (device->state.conn) {
1193	case C_STARTING_SYNC_T:
1194		_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1195		break;
1196	case C_STARTING_SYNC_S:
1197		drbd_start_resync(device, C_SYNC_SOURCE);
1198		break;
1199	}
1200}
1201
1202int drbd_bitmap_io_from_worker(struct drbd_device *device,
1203		int (*io_fn)(struct drbd_device *),
1204		char *why, enum bm_flag flags)
1205{
1206	int rv;
1207
1208	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
1209
1210	/* open coded non-blocking drbd_suspend_io(device); */
1211	set_bit(SUSPEND_IO, &device->flags);
1212
1213	drbd_bm_lock(device, why, flags);
1214	rv = io_fn(device);
1215	drbd_bm_unlock(device);
1216
1217	drbd_resume_io(device);
1218
1219	return rv;
1220}
1221
1222/**
1223 * after_state_ch() - Perform after state change actions that may sleep
1224 * @device:	DRBD device.
1225 * @os:		old state.
1226 * @ns:		new state.
1227 * @flags:	Flags
1228 */
1229static void after_state_ch(struct drbd_device *device, union drbd_state os,
1230			   union drbd_state ns, enum chg_state_flags flags)
1231{
1232	struct drbd_resource *resource = device->resource;
1233	struct drbd_peer_device *peer_device = first_peer_device(device);
1234	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1235	struct sib_info sib;
1236
1237	sib.sib_reason = SIB_STATE_CHANGE;
1238	sib.os = os;
1239	sib.ns = ns;
1240
1241	if ((os.disk != D_UP_TO_DATE || os.pdsk != D_UP_TO_DATE)
1242	&&  (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) {
1243		clear_bit(CRASHED_PRIMARY, &device->flags);
1244		if (device->p_uuid)
1245			device->p_uuid[UI_FLAGS] &= ~((u64)2);
1246	}
1247
1248	/* Inform userspace about the change... */
1249	drbd_bcast_event(device, &sib);
1250
1251	if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1252	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1253		drbd_khelper(device, "pri-on-incon-degr");
1254
1255	/* Here we have the actions that are performed after a
1256	   state change. This function might sleep */
1257
1258	if (ns.susp_nod) {
1259		enum drbd_req_event what = NOTHING;
1260
1261		spin_lock_irq(&device->resource->req_lock);
1262		if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
1263			what = RESEND;
1264
1265		if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1266		    conn_lowest_disk(connection) > D_NEGOTIATING)
1267			what = RESTART_FROZEN_DISK_IO;
1268
1269		if (resource->susp_nod && what != NOTHING) {
1270			_tl_restart(connection, what);
1271			_conn_request_state(connection,
1272					    (union drbd_state) { { .susp_nod = 1 } },
1273					    (union drbd_state) { { .susp_nod = 0 } },
1274					    CS_VERBOSE);
1275		}
1276		spin_unlock_irq(&device->resource->req_lock);
1277	}
1278
1279	if (ns.susp_fen) {
1280		spin_lock_irq(&device->resource->req_lock);
1281		if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
1282			/* case2: The connection was established again: */
1283			struct drbd_peer_device *peer_device;
1284			int vnr;
1285
1286			rcu_read_lock();
1287			idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1288				clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
1289			rcu_read_unlock();
1290			_tl_restart(connection, RESEND);
1291			_conn_request_state(connection,
1292					    (union drbd_state) { { .susp_fen = 1 } },
1293					    (union drbd_state) { { .susp_fen = 0 } },
1294					    CS_VERBOSE);
1295		}
1296		spin_unlock_irq(&device->resource->req_lock);
1297	}
1298
1299	/* Became sync source.  With protocol >= 96, we still need to send out
1300	 * the sync uuid now. Need to do that before any drbd_send_state, or
1301	 * the other side may go "paused sync" before receiving the sync uuids,
1302	 * which is unexpected. */
1303	if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1304	    (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1305	    connection->agreed_pro_version >= 96 && get_ldev(device)) {
1306		drbd_gen_and_send_sync_uuid(peer_device);
1307		put_ldev(device);
1308	}
1309
1310	/* Do not change the order of the if above and the two below... */
1311	if (os.pdsk == D_DISKLESS &&
1312	    ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) {      /* attach on the peer */
1313		/* we probably will start a resync soon.
1314		 * make sure those things are properly reset. */
1315		device->rs_total = 0;
1316		device->rs_failed = 0;
1317		atomic_set(&device->rs_pending_cnt, 0);
1318		drbd_rs_cancel_all(device);
1319
1320		drbd_send_uuids(peer_device);
1321		drbd_send_state(peer_device, ns);
1322	}
1323	/* No point in queuing send_bitmap if we don't have a connection
1324	 * anymore, so check also the _current_ state, not only the new state
1325	 * at the time this work was queued. */
1326	if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1327	    device->state.conn == C_WF_BITMAP_S)
1328		drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
1329				"send_bitmap (WFBitMapS)",
1330				BM_LOCKED_TEST_ALLOWED);
1331
1332	/* Lost contact to peer's copy of the data */
1333	if ((os.pdsk >= D_INCONSISTENT &&
1334	     os.pdsk != D_UNKNOWN &&
1335	     os.pdsk != D_OUTDATED)
1336	&&  (ns.pdsk < D_INCONSISTENT ||
1337	     ns.pdsk == D_UNKNOWN ||
1338	     ns.pdsk == D_OUTDATED)) {
1339		if (get_ldev(device)) {
1340			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1341			    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1342				if (drbd_suspended(device)) {
1343					set_bit(NEW_CUR_UUID, &device->flags);
1344				} else {
1345					drbd_uuid_new_current(device);
1346					drbd_send_uuids(peer_device);
1347				}
1348			}
1349			put_ldev(device);
1350		}
1351	}
1352
1353	if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
1354		if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
1355		    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1356			drbd_uuid_new_current(device);
1357			drbd_send_uuids(peer_device);
1358		}
1359		/* D_DISKLESS Peer becomes secondary */
1360		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1361			/* We may still be Primary ourselves.
1362			 * No harm done if the bitmap still changes,
1363			 * redirtied pages will follow later. */
1364			drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1365				"demote diskless peer", BM_LOCKED_SET_ALLOWED);
1366		put_ldev(device);
1367	}
1368
1369	/* Write out all changed bits on demote.
1370	 * Though, no need to da that just yet
1371	 * if there is a resync going on still */
1372	if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1373		device->state.conn <= C_CONNECTED && get_ldev(device)) {
1374		/* No changes to the bitmap expected this time, so assert that,
1375		 * even though no harm was done if it did change. */
1376		drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1377				"demote", BM_LOCKED_TEST_ALLOWED);
1378		put_ldev(device);
1379	}
1380
1381	/* Last part of the attaching process ... */
1382	if (ns.conn >= C_CONNECTED &&
1383	    os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1384		drbd_send_sizes(peer_device, 0, 0);  /* to start sync... */
1385		drbd_send_uuids(peer_device);
1386		drbd_send_state(peer_device, ns);
1387	}
1388
1389	/* We want to pause/continue resync, tell peer. */
1390	if (ns.conn >= C_CONNECTED &&
1391	     ((os.aftr_isp != ns.aftr_isp) ||
1392	      (os.user_isp != ns.user_isp)))
1393		drbd_send_state(peer_device, ns);
1394
1395	/* In case one of the isp bits got set, suspend other devices. */
1396	if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1397	    (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1398		suspend_other_sg(device);
1399
1400	/* Make sure the peer gets informed about eventual state
1401	   changes (ISP bits) while we were in WFReportParams. */
1402	if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1403		drbd_send_state(peer_device, ns);
1404
1405	if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1406		drbd_send_state(peer_device, ns);
1407
1408	/* We are in the progress to start a full sync... */
1409	if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1410	    (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1411		/* no other bitmap changes expected during this phase */
1412		drbd_queue_bitmap_io(device,
1413			&drbd_bmio_set_n_write, &abw_start_sync,
1414			"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1415
1416	/* first half of local IO error, failure to attach,
1417	 * or administrative detach */
1418	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1419		enum drbd_io_error_p eh = EP_PASS_ON;
1420		int was_io_error = 0;
1421		/* corresponding get_ldev was in __drbd_set_state, to serialize
1422		 * our cleanup here with the transition to D_DISKLESS.
1423		 * But is is still not save to dreference ldev here, since
1424		 * we might come from an failed Attach before ldev was set. */
1425		if (device->ldev) {
1426			rcu_read_lock();
1427			eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1428			rcu_read_unlock();
1429
1430			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
1431
1432			if (was_io_error && eh == EP_CALL_HELPER)
1433				drbd_khelper(device, "local-io-error");
1434
1435			/* Immediately allow completion of all application IO,
1436			 * that waits for completion from the local disk,
1437			 * if this was a force-detach due to disk_timeout
1438			 * or administrator request (drbdsetup detach --force).
1439			 * Do NOT abort otherwise.
1440			 * Aborting local requests may cause serious problems,
1441			 * if requests are completed to upper layers already,
1442			 * and then later the already submitted local bio completes.
1443			 * This can cause DMA into former bio pages that meanwhile
1444			 * have been re-used for other things.
1445			 * So aborting local requests may cause crashes,
1446			 * or even worse, silent data corruption.
1447			 */
1448			if (test_and_clear_bit(FORCE_DETACH, &device->flags))
1449				tl_abort_disk_io(device);
1450
1451			/* current state still has to be D_FAILED,
1452			 * there is only one way out: to D_DISKLESS,
1453			 * and that may only happen after our put_ldev below. */
1454			if (device->state.disk != D_FAILED)
1455				drbd_err(device,
1456					"ASSERT FAILED: disk is %s during detach\n",
1457					drbd_disk_str(device->state.disk));
1458
1459			if (ns.conn >= C_CONNECTED)
1460				drbd_send_state(peer_device, ns);
1461
1462			drbd_rs_cancel_all(device);
1463
1464			/* In case we want to get something to stable storage still,
1465			 * this may be the last chance.
1466			 * Following put_ldev may transition to D_DISKLESS. */
1467			drbd_md_sync(device);
1468		}
1469		put_ldev(device);
1470	}
1471
1472	/* second half of local IO error, failure to attach,
1473	 * or administrative detach,
1474	 * after local_cnt references have reached zero again */
1475	if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1476		/* We must still be diskless,
1477		 * re-attach has to be serialized with this! */
1478		if (device->state.disk != D_DISKLESS)
1479			drbd_err(device,
1480				 "ASSERT FAILED: disk is %s while going diskless\n",
1481				 drbd_disk_str(device->state.disk));
1482
1483		if (ns.conn >= C_CONNECTED)
1484			drbd_send_state(peer_device, ns);
1485		/* corresponding get_ldev in __drbd_set_state
1486		 * this may finally trigger drbd_ldev_destroy. */
1487		put_ldev(device);
1488	}
1489
1490	/* Notify peer that I had a local IO error, and did not detached.. */
1491	if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
1492		drbd_send_state(peer_device, ns);
1493
1494	/* Disks got bigger while they were detached */
1495	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1496	    test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
1497		if (ns.conn == C_CONNECTED)
1498			resync_after_online_grow(device);
1499	}
1500
1501	/* A resync finished or aborted, wake paused devices... */
1502	if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1503	    (os.peer_isp && !ns.peer_isp) ||
1504	    (os.user_isp && !ns.user_isp))
1505		resume_next_sg(device);
1506
1507	/* sync target done with resync.  Explicitly notify peer, even though
1508	 * it should (at least for non-empty resyncs) already know itself. */
1509	if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1510		drbd_send_state(peer_device, ns);
1511
1512	/* Verify finished, or reached stop sector.  Peer did not know about
1513	 * the stop sector, and we may even have changed the stop sector during
1514	 * verify to interrupt/stop early.  Send the new state. */
1515	if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
1516	&& verify_can_do_stop_sector(device))
1517		drbd_send_state(peer_device, ns);
1518
1519	/* This triggers bitmap writeout of potentially still unwritten pages
1520	 * if the resync finished cleanly, or aborted because of peer disk
1521	 * failure, or because of connection loss.
1522	 * For resync aborted because of local disk failure, we cannot do
1523	 * any bitmap writeout anymore.
1524	 * No harm done if some bits change during this phase.
1525	 */
1526	if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
1527		drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
1528			"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
1529		put_ldev(device);
1530	}
1531
1532	if (ns.disk == D_DISKLESS &&
1533	    ns.conn == C_STANDALONE &&
1534	    ns.role == R_SECONDARY) {
1535		if (os.aftr_isp != ns.aftr_isp)
1536			resume_next_sg(device);
1537	}
1538
1539	drbd_md_sync(device);
1540}
1541
1542struct after_conn_state_chg_work {
1543	struct drbd_work w;
1544	enum drbd_conns oc;
1545	union drbd_state ns_min;
1546	union drbd_state ns_max; /* new, max state, over all devices */
1547	enum chg_state_flags flags;
1548	struct drbd_connection *connection;
1549};
1550
1551static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1552{
1553	struct after_conn_state_chg_work *acscw =
1554		container_of(w, struct after_conn_state_chg_work, w);
1555	struct drbd_connection *connection = acscw->connection;
1556	enum drbd_conns oc = acscw->oc;
1557	union drbd_state ns_max = acscw->ns_max;
1558	struct drbd_peer_device *peer_device;
1559	int vnr;
1560
1561	kfree(acscw);
1562
1563	/* Upon network configuration, we need to start the receiver */
1564	if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
1565		drbd_thread_start(&connection->receiver);
1566
1567	if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1568		struct net_conf *old_conf;
1569
1570		mutex_lock(&connection->resource->conf_update);
1571		old_conf = connection->net_conf;
1572		connection->my_addr_len = 0;
1573		connection->peer_addr_len = 0;
1574		RCU_INIT_POINTER(connection->net_conf, NULL);
1575		conn_free_crypto(connection);
1576		mutex_unlock(&connection->resource->conf_update);
1577
1578		synchronize_rcu();
1579		kfree(old_conf);
1580	}
1581
1582	if (ns_max.susp_fen) {
1583		/* case1: The outdate peer handler is successful: */
1584		if (ns_max.pdsk <= D_OUTDATED) {
1585			rcu_read_lock();
1586			idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1587				struct drbd_device *device = peer_device->device;
1588				if (test_bit(NEW_CUR_UUID, &device->flags)) {
1589					drbd_uuid_new_current(device);
1590					clear_bit(NEW_CUR_UUID, &device->flags);
1591				}
1592			}
1593			rcu_read_unlock();
1594			spin_lock_irq(&connection->resource->req_lock);
1595			_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
1596			_conn_request_state(connection,
1597					    (union drbd_state) { { .susp_fen = 1 } },
1598					    (union drbd_state) { { .susp_fen = 0 } },
1599					    CS_VERBOSE);
1600			spin_unlock_irq(&connection->resource->req_lock);
1601		}
1602	}
1603	kref_put(&connection->kref, drbd_destroy_connection);
1604
1605	conn_md_sync(connection);
1606
1607	return 0;
1608}
1609
1610static void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
1611{
1612	enum chg_state_flags flags = ~0;
1613	struct drbd_peer_device *peer_device;
1614	int vnr, first_vol = 1;
1615	union drbd_dev_state os, cs = {
1616		{ .role = R_SECONDARY,
1617		  .peer = R_UNKNOWN,
1618		  .conn = connection->cstate,
1619		  .disk = D_DISKLESS,
1620		  .pdsk = D_UNKNOWN,
1621		} };
1622
1623	rcu_read_lock();
1624	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1625		struct drbd_device *device = peer_device->device;
1626		os = device->state;
1627
1628		if (first_vol) {
1629			cs = os;
1630			first_vol = 0;
1631			continue;
1632		}
1633
1634		if (cs.role != os.role)
1635			flags &= ~CS_DC_ROLE;
1636
1637		if (cs.peer != os.peer)
1638			flags &= ~CS_DC_PEER;
1639
1640		if (cs.conn != os.conn)
1641			flags &= ~CS_DC_CONN;
1642
1643		if (cs.disk != os.disk)
1644			flags &= ~CS_DC_DISK;
1645
1646		if (cs.pdsk != os.pdsk)
1647			flags &= ~CS_DC_PDSK;
1648	}
1649	rcu_read_unlock();
1650
1651	*pf |= CS_DC_MASK;
1652	*pf &= flags;
1653	(*pcs).i = cs.i;
1654}
1655
1656static enum drbd_state_rv
1657conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1658			 enum chg_state_flags flags)
1659{
1660	enum drbd_state_rv rv = SS_SUCCESS;
1661	union drbd_state ns, os;
1662	struct drbd_peer_device *peer_device;
1663	int vnr;
1664
1665	rcu_read_lock();
1666	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1667		struct drbd_device *device = peer_device->device;
1668		os = drbd_read_state(device);
1669		ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
1670
1671		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1672			ns.disk = os.disk;
1673
1674		if (ns.i == os.i)
1675			continue;
1676
1677		rv = is_valid_transition(os, ns);
1678
1679		if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
1680			rv = is_valid_state(device, ns);
1681			if (rv < SS_SUCCESS) {
1682				if (is_valid_state(device, os) == rv)
1683					rv = is_valid_soft_transition(os, ns, connection);
1684			} else
1685				rv = is_valid_soft_transition(os, ns, connection);
1686		}
1687
1688		if (rv < SS_SUCCESS) {
1689			if (flags & CS_VERBOSE)
1690				print_st_err(device, os, ns, rv);
1691			break;
1692		}
1693	}
1694	rcu_read_unlock();
1695
1696	return rv;
1697}
1698
1699static void
1700conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1701	       union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
1702{
1703	union drbd_state ns, os, ns_max = { };
1704	union drbd_state ns_min = {
1705		{ .role = R_MASK,
1706		  .peer = R_MASK,
1707		  .conn = val.conn,
1708		  .disk = D_MASK,
1709		  .pdsk = D_MASK
1710		} };
1711	struct drbd_peer_device *peer_device;
1712	enum drbd_state_rv rv;
1713	int vnr, number_of_volumes = 0;
1714
1715	if (mask.conn == C_MASK) {
1716		/* remember last connect time so request_timer_fn() won't
1717		 * kill newly established sessions while we are still trying to thaw
1718		 * previously frozen IO */
1719		if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
1720			connection->last_reconnect_jif = jiffies;
1721
1722		connection->cstate = val.conn;
1723	}
1724
1725	rcu_read_lock();
1726	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1727		struct drbd_device *device = peer_device->device;
1728		number_of_volumes++;
1729		os = drbd_read_state(device);
1730		ns = apply_mask_val(os, mask, val);
1731		ns = sanitize_state(device, os, ns, NULL);
1732
1733		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1734			ns.disk = os.disk;
1735
1736		rv = __drbd_set_state(device, ns, flags, NULL);
1737		if (rv < SS_SUCCESS)
1738			BUG();
1739
1740		ns.i = device->state.i;
1741		ns_max.role = max_role(ns.role, ns_max.role);
1742		ns_max.peer = max_role(ns.peer, ns_max.peer);
1743		ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
1744		ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
1745		ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
1746
1747		ns_min.role = min_role(ns.role, ns_min.role);
1748		ns_min.peer = min_role(ns.peer, ns_min.peer);
1749		ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
1750		ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
1751		ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
1752	}
1753	rcu_read_unlock();
1754
1755	if (number_of_volumes == 0) {
1756		ns_min = ns_max = (union drbd_state) { {
1757				.role = R_SECONDARY,
1758				.peer = R_UNKNOWN,
1759				.conn = val.conn,
1760				.disk = D_DISKLESS,
1761				.pdsk = D_UNKNOWN
1762			} };
1763	}
1764
1765	ns_min.susp = ns_max.susp = connection->resource->susp;
1766	ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
1767	ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
1768
1769	*pns_min = ns_min;
1770	*pns_max = ns_max;
1771}
1772
1773static enum drbd_state_rv
1774_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1775{
1776	enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */;
1777
1778	if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
1779		rv = SS_CW_SUCCESS;
1780
1781	if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
1782		rv = SS_CW_FAILED_BY_PEER;
1783
1784	err = conn_is_valid_transition(connection, mask, val, 0);
1785	if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
1786		return rv;
1787
1788	return err;
1789}
1790
1791enum drbd_state_rv
1792_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1793		    enum chg_state_flags flags)
1794{
1795	enum drbd_state_rv rv = SS_SUCCESS;
1796	struct after_conn_state_chg_work *acscw;
1797	enum drbd_conns oc = connection->cstate;
1798	union drbd_state ns_max, ns_min, os;
1799	bool have_mutex = false;
1800
1801	if (mask.conn) {
1802		rv = is_valid_conn_transition(oc, val.conn);
1803		if (rv < SS_SUCCESS)
1804			goto abort;
1805	}
1806
1807	rv = conn_is_valid_transition(connection, mask, val, flags);
1808	if (rv < SS_SUCCESS)
1809		goto abort;
1810
1811	if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1812	    !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1813
1814		/* This will be a cluster-wide state change.
1815		 * Need to give up the spinlock, grab the mutex,
1816		 * then send the state change request, ... */
1817		spin_unlock_irq(&connection->resource->req_lock);
1818		mutex_lock(&connection->cstate_mutex);
1819		have_mutex = true;
1820
1821		set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1822		if (conn_send_state_req(connection, mask, val)) {
1823			/* sending failed. */
1824			clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1825			rv = SS_CW_FAILED_BY_PEER;
1826			/* need to re-aquire the spin lock, though */
1827			goto abort_unlocked;
1828		}
1829
1830		if (val.conn == C_DISCONNECTING)
1831			set_bit(DISCONNECT_SENT, &connection->flags);
1832
1833		/* ... and re-aquire the spinlock.
1834		 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1835		 * conn_set_state() within the same spinlock. */
1836		spin_lock_irq(&connection->resource->req_lock);
1837		wait_event_lock_irq(connection->ping_wait,
1838				(rv = _conn_rq_cond(connection, mask, val)),
1839				connection->resource->req_lock);
1840		clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1841		if (rv < SS_SUCCESS)
1842			goto abort;
1843	}
1844
1845	conn_old_common_state(connection, &os, &flags);
1846	flags |= CS_DC_SUSP;
1847	conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
1848	conn_pr_state_change(connection, os, ns_max, flags);
1849
1850	acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1851	if (acscw) {
1852		acscw->oc = os.conn;
1853		acscw->ns_min = ns_min;
1854		acscw->ns_max = ns_max;
1855		acscw->flags = flags;
1856		acscw->w.cb = w_after_conn_state_ch;
1857		kref_get(&connection->kref);
1858		acscw->connection = connection;
1859		drbd_queue_work(&connection->sender_work, &acscw->w);
1860	} else {
1861		drbd_err(connection, "Could not kmalloc an acscw\n");
1862	}
1863
1864 abort:
1865	if (have_mutex) {
1866		/* mutex_unlock() "... must not be used in interrupt context.",
1867		 * so give up the spinlock, then re-aquire it */
1868		spin_unlock_irq(&connection->resource->req_lock);
1869 abort_unlocked:
1870		mutex_unlock(&connection->cstate_mutex);
1871		spin_lock_irq(&connection->resource->req_lock);
1872	}
1873	if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
1874		drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
1875		drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
1876		drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
1877	}
1878	return rv;
1879}
1880
1881enum drbd_state_rv
1882conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1883		   enum chg_state_flags flags)
1884{
1885	enum drbd_state_rv rv;
1886
1887	spin_lock_irq(&connection->resource->req_lock);
1888	rv = _conn_request_state(connection, mask, val, flags);
1889	spin_unlock_irq(&connection->resource->req_lock);
1890
1891	return rv;
1892}
1893