1 // SPDX-License-Identifier: MIT or LGPL-2.1-only
2
3 #include <config.h>
4
5 #include "ublksrv_priv.h"
6
7 #define CTRL_DEV "/dev/ublk-control"
8
9 #define CTRL_CMD_HAS_DATA 1
10 #define CTRL_CMD_HAS_BUF 2
11 #define CTRL_CMD_NO_TRANS 4
12
13 struct ublksrv_ctrl_cmd_data {
14 unsigned int cmd_op;
15 unsigned short flags;
16 unsigned short _pad;
17
18 __u64 data[1];
19 __u16 dev_path_len;
20 __u16 pad;
21 __u32 reserved;
22
23 __u64 addr;
24 __u32 len;
25 };
26
27 #define ublk_un_privileged_prep_data(dev, data) \
28 char buf[UBLKC_PATH_MAX]; \
29 if (ublk_is_unprivileged(dev)) { \
30 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV, \
31 dev->dev_info.dev_id); \
32 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA; \
33 data.len = sizeof(buf); \
34 data.dev_path_len = UBLKC_PATH_MAX; \
35 data.addr = (__u64)buf; \
36 }
37
38 static const unsigned int ctrl_cmd_op[] = {
39 [UBLK_CMD_GET_QUEUE_AFFINITY] = UBLK_U_CMD_GET_QUEUE_AFFINITY,
40 [UBLK_CMD_GET_DEV_INFO] = UBLK_U_CMD_GET_DEV_INFO,
41 [UBLK_CMD_ADD_DEV] = UBLK_U_CMD_ADD_DEV,
42 [UBLK_CMD_DEL_DEV] = UBLK_U_CMD_DEL_DEV,
43 [UBLK_CMD_START_DEV] = UBLK_U_CMD_START_DEV,
44 [UBLK_CMD_STOP_DEV] = UBLK_U_CMD_STOP_DEV,
45 [UBLK_CMD_SET_PARAMS] = UBLK_U_CMD_SET_PARAMS,
46 [UBLK_CMD_GET_PARAMS] = UBLK_U_CMD_GET_PARAMS,
47 [UBLK_CMD_START_USER_RECOVERY] = UBLK_U_CMD_START_USER_RECOVERY,
48 [UBLK_CMD_END_USER_RECOVERY] = UBLK_U_CMD_END_USER_RECOVERY,
49 [UBLK_CMD_GET_DEV_INFO2] = UBLK_U_CMD_GET_DEV_INFO2,
50 };
51
legacy_op_to_ioctl(unsigned int op)52 static unsigned int legacy_op_to_ioctl(unsigned int op)
53 {
54 assert(_IOC_TYPE(op) == 0);
55 assert(_IOC_DIR(op) == 0);
56 assert(_IOC_SIZE(op) == 0);
57 assert(op >= UBLK_CMD_GET_QUEUE_AFFINITY &&
58 op <= UBLK_CMD_GET_DEV_INFO2);
59
60 return ctrl_cmd_op[op];
61 }
62
63
64 /*******************ctrl dev operation ********************************/
ublksrv_ctrl_init_cmd(struct ublksrv_ctrl_dev * dev,struct io_uring_sqe * sqe,struct ublksrv_ctrl_cmd_data * data)65 static inline void ublksrv_ctrl_init_cmd(struct ublksrv_ctrl_dev *dev,
66 struct io_uring_sqe *sqe,
67 struct ublksrv_ctrl_cmd_data *data)
68 {
69 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
70 struct ublksrv_ctrl_cmd *cmd = (struct ublksrv_ctrl_cmd *)ublksrv_get_sqe_cmd(sqe);
71 unsigned int cmd_op = data->cmd_op;
72
73 sqe->fd = dev->ctrl_fd;
74 sqe->opcode = IORING_OP_URING_CMD;
75 sqe->ioprio = 0;
76
77 if (data->flags & CTRL_CMD_HAS_BUF) {
78 cmd->addr = data->addr;
79 cmd->len = data->len;
80 }
81
82 if (data->flags & CTRL_CMD_HAS_DATA) {
83 cmd->data[0] = data->data[0];
84 cmd->dev_path_len = data->dev_path_len;
85 }
86
87 cmd->dev_id = info->dev_id;
88 cmd->queue_id = -1;
89
90 if (!(data->flags & CTRL_CMD_NO_TRANS) &&
91 (info->flags & UBLK_F_CMD_IOCTL_ENCODE))
92 cmd_op = legacy_op_to_ioctl(cmd_op);
93 ublksrv_set_sqe_cmd_op(sqe, cmd_op);
94
95 io_uring_sqe_set_data(sqe, cmd);
96
97 ublk_ctrl_dbg(UBLK_DBG_CTRL_CMD, "dev %d cmd_op %x/%x, user_data %p\n",
98 dev->dev_info.dev_id, data->cmd_op, cmd_op, cmd);
99 }
100
__ublksrv_ctrl_cmd(struct ublksrv_ctrl_dev * dev,struct ublksrv_ctrl_cmd_data * data)101 static int __ublksrv_ctrl_cmd(struct ublksrv_ctrl_dev *dev,
102 struct ublksrv_ctrl_cmd_data *data)
103 {
104 struct io_uring_sqe *sqe;
105 struct io_uring_cqe *cqe;
106 int ret = -EINVAL;
107
108 sqe = io_uring_get_sqe(&dev->ring);
109 if (!sqe) {
110 fprintf(stderr, "can't get sqe ret %d\n", ret);
111 return ret;
112 }
113
114 ublksrv_ctrl_init_cmd(dev, sqe, data);
115
116 ret = io_uring_submit(&dev->ring);
117 if (ret < 0) {
118 fprintf(stderr, "uring submit ret %d\n", ret);
119 return ret;
120 }
121
122 ret = io_uring_wait_cqe(&dev->ring, &cqe);
123 if (ret < 0) {
124 fprintf(stderr, "wait cqe: %s\n", strerror(-ret));
125 return ret;
126 }
127 io_uring_cqe_seen(&dev->ring, cqe);
128
129 ublk_ctrl_dbg(UBLK_DBG_CTRL_CMD, "dev %d, ctrl cqe res %d, user_data %llx\n",
130 dev->dev_info.dev_id, cqe->res, cqe->user_data);
131 return cqe->res;
132 }
133
ublksrv_ctrl_deinit(struct ublksrv_ctrl_dev * dev)134 void ublksrv_ctrl_deinit(struct ublksrv_ctrl_dev *dev)
135 {
136 close(dev->ring.ring_fd);
137 close(dev->ctrl_fd);
138 free(dev->queues_cpuset);
139 free(dev);
140 }
141
ublksrv_ctrl_init(struct ublksrv_dev_data * data)142 struct ublksrv_ctrl_dev *ublksrv_ctrl_init(struct ublksrv_dev_data *data)
143 {
144 struct ublksrv_ctrl_dev *dev = (struct ublksrv_ctrl_dev *)calloc(1,
145 sizeof(*dev));
146 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
147 int ret;
148
149 dev->ctrl_fd = open(CTRL_DEV, O_RDWR);
150 if (dev->ctrl_fd < 0) {
151 fprintf(stderr, "control dev %s can't be opened: %m\n", CTRL_DEV);
152 exit(dev->ctrl_fd);
153 }
154
155 /* -1 means we ask ublk driver to allocate one free to us */
156 info->dev_id = data->dev_id;
157 info->nr_hw_queues = data->nr_hw_queues;
158 info->queue_depth = data->queue_depth;
159 info->max_io_buf_bytes = data->max_io_buf_bytes;
160 info->flags = data->flags;
161 info->ublksrv_flags = data->ublksrv_flags;
162
163 dev->run_dir = data->run_dir;
164 dev->tgt_type = data->tgt_type;
165 dev->tgt_ops = data->tgt_ops;
166 dev->tgt_argc = data->tgt_argc;
167 dev->tgt_argv = data->tgt_argv;
168
169 /* 32 is enough to send ctrl commands */
170 ret = ublksrv_setup_ring(&dev->ring, 32, 32, IORING_SETUP_SQE128);
171 if (ret < 0) {
172 fprintf(stderr, "queue_init: %s\n", strerror(-ret));
173 free(dev);
174 return NULL;
175 }
176
177 return dev;
178 }
179
180 /* queues_cpuset is only used for setting up queue pthread daemon */
ublksrv_ctrl_get_affinity(struct ublksrv_ctrl_dev * ctrl_dev)181 int ublksrv_ctrl_get_affinity(struct ublksrv_ctrl_dev *ctrl_dev)
182 {
183 struct ublksrv_ctrl_cmd_data data = {
184 .cmd_op = UBLK_CMD_GET_QUEUE_AFFINITY,
185 .flags = CTRL_CMD_HAS_DATA | CTRL_CMD_HAS_BUF,
186 };
187 unsigned char *buf;
188 int i, ret;
189 int len;
190 int path_len;
191
192 if (ublk_is_unprivileged(ctrl_dev))
193 path_len = UBLKC_PATH_MAX;
194 else
195 path_len = 0;
196
197 len = (sizeof(cpu_set_t) + path_len) * ctrl_dev->dev_info.nr_hw_queues;
198 buf = malloc(len);
199
200 if (!buf)
201 return -ENOMEM;
202
203 for (i = 0; i < ctrl_dev->dev_info.nr_hw_queues; i++) {
204 data.data[0] = i;
205 data.dev_path_len = path_len;
206 data.len = sizeof(cpu_set_t) + path_len;
207 data.addr = (__u64)&buf[i * data.len];
208
209 if (path_len)
210 snprintf((char *)data.addr, UBLKC_PATH_MAX, "%s%d",
211 UBLKC_DEV, ctrl_dev->dev_info.dev_id);
212
213 ret = __ublksrv_ctrl_cmd(ctrl_dev, &data);
214 if (ret < 0) {
215 free(buf);
216 return ret;
217 }
218 }
219 ctrl_dev->queues_cpuset = (cpu_set_t *)buf;
220
221 return 0;
222 }
223
224 /*
225 * Start the ublksrv device:
226 *
227 * 1) fork a daemon for handling IO command from driver
228 *
229 * 2) wait for the device becoming ready: the daemon should submit
230 * sqes to /dev/ublkcN, just like usb's urb usage, each request needs
231 * one sqe. If one IO request comes to kernel driver of /dev/ublkbN,
232 * the sqe for this request is completed, and the daemon gets notified.
233 * When every io request of driver gets its own sqe queued, we think
234 * /dev/ublkbN is ready to start
235 *
236 * 3) in current process context, sent START_DEV command to
237 * /dev/ublk-control with device id, which will cause ublk driver to
238 * expose /dev/ublkbN
239 */
ublksrv_ctrl_start_dev(struct ublksrv_ctrl_dev * ctrl_dev,int daemon_pid)240 int ublksrv_ctrl_start_dev(struct ublksrv_ctrl_dev *ctrl_dev,
241 int daemon_pid)
242 {
243 struct ublksrv_ctrl_cmd_data data = {
244 .cmd_op = UBLK_CMD_START_DEV,
245 .flags = CTRL_CMD_HAS_DATA,
246 };
247 int ret;
248
249 ublk_un_privileged_prep_data(ctrl_dev, data);
250
251 ctrl_dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid;
252
253 ret = __ublksrv_ctrl_cmd(ctrl_dev, &data);
254
255 return ret;
256 }
257
258 /*
259 * Stop the ublksrv device:
260 *
261 * 1) send STOP_DEV command to /dev/ublk-control with device id provided
262 *
263 * 2) ublk driver gets this command, freeze /dev/ublkbN, then complete all
264 * pending seq, meantime tell the daemon via cqe->res to not submit sqe
265 * any more, since we are being closed. Also delete /dev/ublkbN.
266 *
267 * 3) the ublk daemon figures out that all sqes are completed, and free,
268 * then close /dev/ublkcN and exit itself.
269 */
__ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)270 static int __ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev *dev, unsigned cmd_op)
271 {
272 struct ublksrv_ctrl_cmd_data data = {
273 .cmd_op = cmd_op,
274 .flags = CTRL_CMD_HAS_BUF | CTRL_CMD_NO_TRANS,
275 .addr = (__u64)&dev->dev_info,
276 .len = sizeof(struct ublksrv_ctrl_dev_info),
277 };
278
279 return __ublksrv_ctrl_cmd(dev, &data);
280 }
281
ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev * dev)282 int ublksrv_ctrl_add_dev(struct ublksrv_ctrl_dev *dev)
283 {
284 int ret = __ublksrv_ctrl_add_dev(dev, UBLK_U_CMD_ADD_DEV);
285
286 if (ret < 0)
287 return __ublksrv_ctrl_add_dev(dev, UBLK_CMD_ADD_DEV);
288
289 return ret;
290 }
291
ublksrv_ctrl_del_dev_async(struct ublksrv_ctrl_dev * dev)292 int ublksrv_ctrl_del_dev_async(struct ublksrv_ctrl_dev *dev)
293 {
294 struct ublksrv_ctrl_cmd_data data = {
295 .cmd_op = UBLK_U_CMD_DEL_DEV_ASYNC,
296 .flags = CTRL_CMD_NO_TRANS,
297 };
298
299 ublk_un_privileged_prep_data(dev, data);
300
301 return __ublksrv_ctrl_cmd(dev, &data);
302 }
303
ublksrv_ctrl_del_dev(struct ublksrv_ctrl_dev * dev)304 int ublksrv_ctrl_del_dev(struct ublksrv_ctrl_dev *dev)
305 {
306 struct ublksrv_ctrl_cmd_data data = {
307 .cmd_op = UBLK_CMD_DEL_DEV,
308 .flags = 0,
309 };
310
311 ublk_un_privileged_prep_data(dev, data);
312
313 return __ublksrv_ctrl_cmd(dev, &data);
314 }
315
__ublksrv_ctrl_get_info_no_trans(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)316 static int __ublksrv_ctrl_get_info_no_trans(struct ublksrv_ctrl_dev *dev,
317 unsigned cmd_op)
318 {
319 char buf[UBLKC_PATH_MAX + sizeof(dev->dev_info)];
320 struct ublksrv_ctrl_cmd_data data = {
321 .cmd_op = cmd_op,
322 .flags = CTRL_CMD_HAS_BUF | CTRL_CMD_NO_TRANS,
323 .addr = (__u64)&dev->dev_info,
324 .len = sizeof(struct ublksrv_ctrl_dev_info),
325 };
326 bool has_dev_path = false;
327 int ret;
328
329 if (ublk_is_unprivileged(dev) && _IOC_NR(data.cmd_op) == UBLK_CMD_GET_DEV_INFO)
330 return -EINVAL;
331
332 if (_IOC_NR(data.cmd_op) == UBLK_CMD_GET_DEV_INFO2) {
333 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
334 dev->dev_info.dev_id);
335 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
336 data.len = sizeof(buf);
337 data.dev_path_len = UBLKC_PATH_MAX;
338 data.addr = (__u64)buf;
339 has_dev_path = true;
340 }
341
342 ret = __ublksrv_ctrl_cmd(dev, &data);
343 if (ret >= 0 && has_dev_path)
344 memcpy(&dev->dev_info, &buf[UBLKC_PATH_MAX],
345 sizeof(dev->dev_info));
346 return ret;
347 }
348
__ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev * dev,unsigned cmd_op)349 static int __ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev *dev,
350 unsigned cmd_op)
351 {
352 unsigned new_code = legacy_op_to_ioctl(cmd_op);
353 int ret = __ublksrv_ctrl_get_info_no_trans(dev, new_code);
354
355 /*
356 * Try ioctl cmd encoding first, then fallback to legacy command
357 * opcode if ioctl encoding fails
358 */
359 if (ret < 0)
360 ret = __ublksrv_ctrl_get_info_no_trans(dev, cmd_op);
361
362 return ret;
363 }
364
365 /*
366 * Deal with userspace/kernel compatibility
367 *
368 * 1) if kernel is capable of handling UBLK_F_UNPRIVILEGED_DEV,
369 * - ublksrv supports UBLK_F_UNPRIVILEGED_DEV
370 * ublksrv should send UBLK_CMD_GET_DEV_INFO2, given anytime unprivileged
371 * application needs to query devices it owns, when the application has
372 * no idea if UBLK_F_UNPRIVILEGED_DEV is set given the capability info
373 * is stateless, and application always get it via control command
374 *
375 * - ublksrv doesn't support UBLK_F_UNPRIVILEGED_DEV
376 * UBLK_CMD_GET_DEV_INFO is always sent to kernel, and the feature of
377 * UBLK_F_UNPRIVILEGED_DEV isn't available for user
378 *
379 * 2) if kernel isn't capable of handling UBLK_F_UNPRIVILEGED_DEV
380 * - ublksrv supports UBLK_F_UNPRIVILEGED_DEV
381 * UBLK_CMD_GET_DEV_INFO2 is tried first, and will be failed, then
382 * UBLK_CMD_GET_DEV_INFO is retried given UBLK_F_UNPRIVILEGED_DEV
383 * can't be set
384 *
385 * - ublksrv doesn't support UBLK_F_UNPRIVILEGED_DEV
386 * UBLK_CMD_GET_DEV_INFO is always sent to kernel, and the feature of
387 * UBLK_F_UNPRIVILEGED_DEV isn't available for user
388 *
389 */
ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev * dev)390 int ublksrv_ctrl_get_info(struct ublksrv_ctrl_dev *dev)
391 {
392 int ret;
393
394 unsigned cmd_op =
395 #ifdef UBLK_CMD_GET_DEV_INFO2
396 UBLK_CMD_GET_DEV_INFO2;
397 #else
398 UBLK_CMD_GET_DEV_INFO;
399 #endif
400 ret = __ublksrv_ctrl_get_info(dev, cmd_op);
401
402 if (cmd_op == UBLK_CMD_GET_DEV_INFO)
403 return ret;
404
405 if (ret < 0) {
406 /* unprivileged does support GET_DEV_INFO2 */
407 if (ublk_is_unprivileged(dev))
408 return ret;
409 /*
410 * fallback to GET_DEV_INFO since driver may not support
411 * GET_DEV_INFO2
412 */
413 ret = __ublksrv_ctrl_get_info(dev, UBLK_CMD_GET_DEV_INFO);
414 }
415
416 return ret;
417 }
418
ublksrv_ctrl_stop_dev(struct ublksrv_ctrl_dev * dev)419 int ublksrv_ctrl_stop_dev(struct ublksrv_ctrl_dev *dev)
420 {
421 struct ublksrv_ctrl_cmd_data data = {
422 .cmd_op = UBLK_CMD_STOP_DEV,
423 };
424 int ret;
425
426 ublk_un_privileged_prep_data(dev, data);
427
428 ret = __ublksrv_ctrl_cmd(dev, &data);
429 return ret;
430 }
431
ublksrv_dev_state_desc(struct ublksrv_ctrl_dev * dev)432 static const char *ublksrv_dev_state_desc(struct ublksrv_ctrl_dev *dev)
433 {
434 switch (dev->dev_info.state) {
435 case UBLK_S_DEV_DEAD:
436 return "DEAD";
437 case UBLK_S_DEV_LIVE:
438 return "LIVE";
439 case UBLK_S_DEV_QUIESCED:
440 return "QUIESCED";
441 default:
442 return "UNKNOWN";
443 };
444 }
445
ublksrv_ctrl_dump(struct ublksrv_ctrl_dev * dev,const char * jbuf)446 void ublksrv_ctrl_dump(struct ublksrv_ctrl_dev *dev, const char *jbuf)
447 {
448 struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
449 int i, ret;
450 struct ublk_params p;
451
452 ret = ublksrv_ctrl_get_params(dev, &p);
453 if (ret < 0) {
454 fprintf(stderr, "failed to get params %m\n");
455 return;
456 }
457
458 printf("dev id %d: nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
459 info->dev_id,
460 info->nr_hw_queues, info->queue_depth,
461 1 << p.basic.logical_bs_shift, p.basic.dev_sectors);
462 printf("\tmax rq size %d daemon pid %d flags 0x%llx state %s\n",
463 info->max_io_buf_bytes,
464 info->ublksrv_pid, info->flags,
465 ublksrv_dev_state_desc(dev));
466 printf("\tublkc: %u:%d ublkb: %u:%u owner: %u:%u\n",
467 p.devt.char_major, p.devt.char_minor,
468 p.devt.disk_major, p.devt.disk_minor,
469 info->owner_uid, info->owner_gid);
470
471 if (jbuf) {
472 char buf[512];
473
474 for(i = 0; i < info->nr_hw_queues; i++) {
475 unsigned tid;
476
477 ublksrv_json_read_queue_info(jbuf, i, &tid, buf, 512);
478 printf("\tqueue %u: tid %d affinity(%s)\n",
479 i, tid, buf);
480 }
481
482 ublksrv_json_read_target_info(jbuf, buf, 512);
483 printf("\ttarget %s\n", buf);
484 }
485 }
486
ublksrv_ctrl_set_params(struct ublksrv_ctrl_dev * dev,struct ublk_params * params)487 int ublksrv_ctrl_set_params(struct ublksrv_ctrl_dev *dev,
488 struct ublk_params *params)
489 {
490 struct ublksrv_ctrl_cmd_data data = {
491 .cmd_op = UBLK_CMD_SET_PARAMS,
492 .flags = CTRL_CMD_HAS_BUF,
493 .addr = (__u64)params,
494 .len = sizeof(*params),
495 };
496 char buf[UBLKC_PATH_MAX + sizeof(*params)];
497
498 params->len = sizeof(*params);
499
500 if (ublk_is_unprivileged(dev)) {
501 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
502 dev->dev_info.dev_id);
503 memcpy(&buf[UBLKC_PATH_MAX], params, sizeof(*params));
504 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
505 data.len = sizeof(buf);
506 data.dev_path_len = UBLKC_PATH_MAX;
507 data.addr = (__u64)buf;
508 }
509
510 return __ublksrv_ctrl_cmd(dev, &data);
511 }
512
ublksrv_ctrl_get_params(struct ublksrv_ctrl_dev * dev,struct ublk_params * params)513 int ublksrv_ctrl_get_params(struct ublksrv_ctrl_dev *dev,
514 struct ublk_params *params)
515 {
516 struct ublksrv_ctrl_cmd_data data = {
517 .cmd_op = UBLK_CMD_GET_PARAMS,
518 .flags = CTRL_CMD_HAS_BUF,
519 .addr = (__u64)params,
520 .len = sizeof(*params),
521 };
522 char buf[UBLKC_PATH_MAX + sizeof(*params)];
523 int ret;
524
525 params->len = sizeof(*params);
526
527 if (ublk_is_unprivileged(dev)) {
528 snprintf(buf, UBLKC_PATH_MAX, "%s%d", UBLKC_DEV,
529 dev->dev_info.dev_id);
530 memcpy(&buf[UBLKC_PATH_MAX], params, sizeof(*params));
531 data.flags |= CTRL_CMD_HAS_BUF | CTRL_CMD_HAS_DATA;
532 data.len = sizeof(buf);
533 data.dev_path_len = UBLKC_PATH_MAX;
534 data.addr = (__u64)buf;
535 }
536
537 ret = __ublksrv_ctrl_cmd(dev, &data);
538 if (ret >= 0 && ublk_is_unprivileged(dev))
539 memcpy(params, &buf[UBLKC_PATH_MAX], sizeof(*params));
540
541 return 0;
542 }
543
ublksrv_ctrl_start_recovery(struct ublksrv_ctrl_dev * dev)544 int ublksrv_ctrl_start_recovery(struct ublksrv_ctrl_dev *dev)
545 {
546 struct ublksrv_ctrl_cmd_data data = {
547 .cmd_op = UBLK_CMD_START_USER_RECOVERY,
548 .flags = 0,
549 };
550 int ret;
551
552 ublk_un_privileged_prep_data(dev, data);
553
554 ret = __ublksrv_ctrl_cmd(dev, &data);
555 return ret;
556 }
557
ublksrv_ctrl_end_recovery(struct ublksrv_ctrl_dev * dev,int daemon_pid)558 int ublksrv_ctrl_end_recovery(struct ublksrv_ctrl_dev *dev, int daemon_pid)
559 {
560 struct ublksrv_ctrl_cmd_data data = {
561 .cmd_op = UBLK_CMD_END_USER_RECOVERY,
562 .flags = CTRL_CMD_HAS_DATA,
563 };
564 int ret;
565
566 ublk_un_privileged_prep_data(dev, data);
567
568 dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid;
569
570 ret = __ublksrv_ctrl_cmd(dev, &data);
571 return ret;
572 }
573
ublksrv_ctrl_get_features(struct ublksrv_ctrl_dev * dev,__u64 * features)574 int ublksrv_ctrl_get_features(struct ublksrv_ctrl_dev *dev,
575 __u64 *features)
576 {
577 struct ublksrv_ctrl_cmd_data data = {
578 .cmd_op = UBLK_U_CMD_GET_FEATURES,
579 .flags = CTRL_CMD_HAS_BUF,
580 .addr = (__u64)features,
581 .len = sizeof(*features),
582 };
583
584 return __ublksrv_ctrl_cmd(dev, &data);
585 }
586
ublksrv_ctrl_get_dev_info(const struct ublksrv_ctrl_dev * dev)587 const struct ublksrv_ctrl_dev_info *ublksrv_ctrl_get_dev_info(
588 const struct ublksrv_ctrl_dev *dev)
589 {
590 return &dev->dev_info;
591 }
592
ublksrv_ctrl_get_run_dir(const struct ublksrv_ctrl_dev * dev)593 const char *ublksrv_ctrl_get_run_dir(const struct ublksrv_ctrl_dev *dev)
594 {
595 return dev->run_dir;
596 }
597
ublksrv_ctrl_prep_recovery(struct ublksrv_ctrl_dev * dev,const char * tgt_type,const struct ublksrv_tgt_type * tgt_ops,const char * recovery_jbuf)598 void ublksrv_ctrl_prep_recovery(struct ublksrv_ctrl_dev *dev,
599 const char *tgt_type, const struct ublksrv_tgt_type *tgt_ops,
600 const char *recovery_jbuf)
601 {
602 dev->tgt_type = tgt_type;
603 dev->tgt_ops = tgt_ops;
604 dev->tgt_argc = -1;
605 dev->recovery_jbuf = recovery_jbuf;
606 }
607
ublksrv_ctrl_get_recovery_jbuf(const struct ublksrv_ctrl_dev * dev)608 const char *ublksrv_ctrl_get_recovery_jbuf(const struct ublksrv_ctrl_dev *dev)
609 {
610 return dev->recovery_jbuf;
611 }
612