libfuse
fuse_lowlevel.c
1 /*
2  FUSE: Filesystem in Userspace
3  Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 
5  Implementation of (most of) the low-level FUSE API. The session loop
6  functions are implemented in separate files.
7 
8  This program can be distributed under the terms of the GNU LGPLv2.
9  See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 
31 #ifndef F_LINUX_SPECIFIC_BASE
32 #define F_LINUX_SPECIFIC_BASE 1024
33 #endif
34 #ifndef F_SETPIPE_SZ
35 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36 #endif
37 
38 
39 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40 #define OFFSET_MAX 0x7fffffffffffffffLL
41 
42 #define container_of(ptr, type, member) ({ \
43  const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44  (type *)( (char *)__mptr - offsetof(type,member) );})
45 
46 struct fuse_pollhandle {
47  uint64_t kh;
48  struct fuse_session *se;
49 };
50 
51 static size_t pagesize;
52 
53 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54 {
55  pagesize = getpagesize();
56 }
57 
58 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59 {
60  attr->ino = stbuf->st_ino;
61  attr->mode = stbuf->st_mode;
62  attr->nlink = stbuf->st_nlink;
63  attr->uid = stbuf->st_uid;
64  attr->gid = stbuf->st_gid;
65  attr->rdev = stbuf->st_rdev;
66  attr->size = stbuf->st_size;
67  attr->blksize = stbuf->st_blksize;
68  attr->blocks = stbuf->st_blocks;
69  attr->atime = stbuf->st_atime;
70  attr->mtime = stbuf->st_mtime;
71  attr->ctime = stbuf->st_ctime;
72  attr->atimensec = ST_ATIM_NSEC(stbuf);
73  attr->mtimensec = ST_MTIM_NSEC(stbuf);
74  attr->ctimensec = ST_CTIM_NSEC(stbuf);
75 }
76 
77 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78 {
79  stbuf->st_mode = attr->mode;
80  stbuf->st_uid = attr->uid;
81  stbuf->st_gid = attr->gid;
82  stbuf->st_size = attr->size;
83  stbuf->st_atime = attr->atime;
84  stbuf->st_mtime = attr->mtime;
85  stbuf->st_ctime = attr->ctime;
86  ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87  ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88  ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89 }
90 
91 static size_t iov_length(const struct iovec *iov, size_t count)
92 {
93  size_t seg;
94  size_t ret = 0;
95 
96  for (seg = 0; seg < count; seg++)
97  ret += iov[seg].iov_len;
98  return ret;
99 }
100 
101 static void list_init_req(struct fuse_req *req)
102 {
103  req->next = req;
104  req->prev = req;
105 }
106 
107 static void list_del_req(struct fuse_req *req)
108 {
109  struct fuse_req *prev = req->prev;
110  struct fuse_req *next = req->next;
111  prev->next = next;
112  next->prev = prev;
113 }
114 
115 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116 {
117  struct fuse_req *prev = next->prev;
118  req->next = next;
119  req->prev = prev;
120  prev->next = req;
121  next->prev = req;
122 }
123 
124 static void destroy_req(fuse_req_t req)
125 {
126  pthread_mutex_destroy(&req->lock);
127  free(req);
128 }
129 
130 void fuse_free_req(fuse_req_t req)
131 {
132  int ctr;
133  struct fuse_session *se = req->se;
134 
135  pthread_mutex_lock(&se->lock);
136  req->u.ni.func = NULL;
137  req->u.ni.data = NULL;
138  list_del_req(req);
139  ctr = --req->ctr;
140  fuse_chan_put(req->ch);
141  req->ch = NULL;
142  pthread_mutex_unlock(&se->lock);
143  if (!ctr)
144  destroy_req(req);
145 }
146 
147 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
148 {
149  struct fuse_req *req;
150 
151  req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
152  if (req == NULL) {
153  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
154  } else {
155  req->se = se;
156  req->ctr = 1;
157  list_init_req(req);
158  fuse_mutex_init(&req->lock);
159  }
160 
161  return req;
162 }
163 
164 /* Send data. If *ch* is NULL, send via session master fd */
165 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
166  struct iovec *iov, int count)
167 {
168  struct fuse_out_header *out = iov[0].iov_base;
169 
170  out->len = iov_length(iov, count);
171  if (se->debug) {
172  if (out->unique == 0) {
173  fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
174  out->error, out->len);
175  } else if (out->error) {
176  fuse_log(FUSE_LOG_DEBUG,
177  " unique: %llu, error: %i (%s), outsize: %i\n",
178  (unsigned long long) out->unique, out->error,
179  strerror(-out->error), out->len);
180  } else {
181  fuse_log(FUSE_LOG_DEBUG,
182  " unique: %llu, success, outsize: %i\n",
183  (unsigned long long) out->unique, out->len);
184  }
185  }
186 
187  ssize_t res = writev(ch ? ch->fd : se->fd,
188  iov, count);
189  int err = errno;
190 
191  if (res == -1) {
192  assert(se != NULL);
193 
194  /* ENOENT means the operation was interrupted */
195  if (!fuse_session_exited(se) && err != ENOENT)
196  perror("fuse: writing device");
197  return -err;
198  }
199 
200  return 0;
201 }
202 
203 
204 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
205  int count)
206 {
207  struct fuse_out_header out;
208 
209  if (error <= -1000 || error > 0) {
210  fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
211  error = -ERANGE;
212  }
213 
214  out.unique = req->unique;
215  out.error = error;
216 
217  iov[0].iov_base = &out;
218  iov[0].iov_len = sizeof(struct fuse_out_header);
219 
220  return fuse_send_msg(req->se, req->ch, iov, count);
221 }
222 
223 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
224  int count)
225 {
226  int res;
227 
228  res = fuse_send_reply_iov_nofree(req, error, iov, count);
229  fuse_free_req(req);
230  return res;
231 }
232 
233 static int send_reply(fuse_req_t req, int error, const void *arg,
234  size_t argsize)
235 {
236  struct iovec iov[2];
237  int count = 1;
238  if (argsize) {
239  iov[1].iov_base = (void *) arg;
240  iov[1].iov_len = argsize;
241  count++;
242  }
243  return send_reply_iov(req, error, iov, count);
244 }
245 
246 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
247 {
248  int res;
249  struct iovec *padded_iov;
250 
251  padded_iov = malloc((count + 1) * sizeof(struct iovec));
252  if (padded_iov == NULL)
253  return fuse_reply_err(req, ENOMEM);
254 
255  memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
256  count++;
257 
258  res = send_reply_iov(req, 0, padded_iov, count);
259  free(padded_iov);
260 
261  return res;
262 }
263 
264 
265 /* `buf` is allowed to be empty so that the proper size may be
266  allocated by the caller */
267 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
268  const char *name, const struct stat *stbuf, off_t off)
269 {
270  (void)req;
271  size_t namelen;
272  size_t entlen;
273  size_t entlen_padded;
274  struct fuse_dirent *dirent;
275 
276  namelen = strlen(name);
277  entlen = FUSE_NAME_OFFSET + namelen;
278  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
279 
280  if ((buf == NULL) || (entlen_padded > bufsize))
281  return entlen_padded;
282 
283  dirent = (struct fuse_dirent*) buf;
284  dirent->ino = stbuf->st_ino;
285  dirent->off = off;
286  dirent->namelen = namelen;
287  dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
288  memcpy(dirent->name, name, namelen);
289  memset(dirent->name + namelen, 0, entlen_padded - entlen);
290 
291  return entlen_padded;
292 }
293 
294 static void convert_statfs(const struct statvfs *stbuf,
295  struct fuse_kstatfs *kstatfs)
296 {
297  kstatfs->bsize = stbuf->f_bsize;
298  kstatfs->frsize = stbuf->f_frsize;
299  kstatfs->blocks = stbuf->f_blocks;
300  kstatfs->bfree = stbuf->f_bfree;
301  kstatfs->bavail = stbuf->f_bavail;
302  kstatfs->files = stbuf->f_files;
303  kstatfs->ffree = stbuf->f_ffree;
304  kstatfs->namelen = stbuf->f_namemax;
305 }
306 
307 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
308 {
309  return send_reply(req, 0, arg, argsize);
310 }
311 
312 int fuse_reply_err(fuse_req_t req, int err)
313 {
314  return send_reply(req, -err, NULL, 0);
315 }
316 
317 void fuse_reply_none(fuse_req_t req)
318 {
319  fuse_free_req(req);
320 }
321 
322 static unsigned long calc_timeout_sec(double t)
323 {
324  if (t > (double) ULONG_MAX)
325  return ULONG_MAX;
326  else if (t < 0.0)
327  return 0;
328  else
329  return (unsigned long) t;
330 }
331 
332 static unsigned int calc_timeout_nsec(double t)
333 {
334  double f = t - (double) calc_timeout_sec(t);
335  if (f < 0.0)
336  return 0;
337  else if (f >= 0.999999999)
338  return 999999999;
339  else
340  return (unsigned int) (f * 1.0e9);
341 }
342 
343 static void fill_entry(struct fuse_entry_out *arg,
344  const struct fuse_entry_param *e)
345 {
346  arg->nodeid = e->ino;
347  arg->generation = e->generation;
348  arg->entry_valid = calc_timeout_sec(e->entry_timeout);
349  arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
350  arg->attr_valid = calc_timeout_sec(e->attr_timeout);
351  arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
352  convert_stat(&e->attr, &arg->attr);
353 }
354 
355 /* `buf` is allowed to be empty so that the proper size may be
356  allocated by the caller */
357 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
358  const char *name,
359  const struct fuse_entry_param *e, off_t off)
360 {
361  (void)req;
362  size_t namelen;
363  size_t entlen;
364  size_t entlen_padded;
365 
366  namelen = strlen(name);
367  entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
368  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
369  if ((buf == NULL) || (entlen_padded > bufsize))
370  return entlen_padded;
371 
372  struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
373  memset(&dp->entry_out, 0, sizeof(dp->entry_out));
374  fill_entry(&dp->entry_out, e);
375 
376  struct fuse_dirent *dirent = &dp->dirent;
377  dirent->ino = e->attr.st_ino;
378  dirent->off = off;
379  dirent->namelen = namelen;
380  dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
381  memcpy(dirent->name, name, namelen);
382  memset(dirent->name + namelen, 0, entlen_padded - entlen);
383 
384  return entlen_padded;
385 }
386 
387 static void fill_open(struct fuse_open_out *arg,
388  const struct fuse_file_info *f)
389 {
390  arg->fh = f->fh;
391  if (f->direct_io)
392  arg->open_flags |= FOPEN_DIRECT_IO;
393  if (f->keep_cache)
394  arg->open_flags |= FOPEN_KEEP_CACHE;
395  if (f->cache_readdir)
396  arg->open_flags |= FOPEN_CACHE_DIR;
397  if (f->nonseekable)
398  arg->open_flags |= FOPEN_NONSEEKABLE;
399 }
400 
401 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
402 {
403  struct fuse_entry_out arg;
404  size_t size = req->se->conn.proto_minor < 9 ?
405  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
406 
407  /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
408  negative entry */
409  if (!e->ino && req->se->conn.proto_minor < 4)
410  return fuse_reply_err(req, ENOENT);
411 
412  memset(&arg, 0, sizeof(arg));
413  fill_entry(&arg, e);
414  return send_reply_ok(req, &arg, size);
415 }
416 
417 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
418  const struct fuse_file_info *f)
419 {
420  char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
421  size_t entrysize = req->se->conn.proto_minor < 9 ?
422  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
423  struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
424  struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
425 
426  memset(buf, 0, sizeof(buf));
427  fill_entry(earg, e);
428  fill_open(oarg, f);
429  return send_reply_ok(req, buf,
430  entrysize + sizeof(struct fuse_open_out));
431 }
432 
433 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
434  double attr_timeout)
435 {
436  struct fuse_attr_out arg;
437  size_t size = req->se->conn.proto_minor < 9 ?
438  FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
439 
440  memset(&arg, 0, sizeof(arg));
441  arg.attr_valid = calc_timeout_sec(attr_timeout);
442  arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
443  convert_stat(attr, &arg.attr);
444 
445  return send_reply_ok(req, &arg, size);
446 }
447 
448 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
449 {
450  return send_reply_ok(req, linkname, strlen(linkname));
451 }
452 
453 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
454 {
455  struct fuse_open_out arg;
456 
457  memset(&arg, 0, sizeof(arg));
458  fill_open(&arg, f);
459  return send_reply_ok(req, &arg, sizeof(arg));
460 }
461 
462 int fuse_reply_write(fuse_req_t req, size_t count)
463 {
464  struct fuse_write_out arg;
465 
466  memset(&arg, 0, sizeof(arg));
467  arg.size = count;
468 
469  return send_reply_ok(req, &arg, sizeof(arg));
470 }
471 
472 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
473 {
474  return send_reply_ok(req, buf, size);
475 }
476 
477 static int fuse_send_data_iov_fallback(struct fuse_session *se,
478  struct fuse_chan *ch,
479  struct iovec *iov, int iov_count,
480  struct fuse_bufvec *buf,
481  size_t len)
482 {
483  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
484  void *mbuf;
485  int res;
486 
487  /* Optimize common case */
488  if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
489  !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
490  /* FIXME: also avoid memory copy if there are multiple buffers
491  but none of them contain an fd */
492 
493  iov[iov_count].iov_base = buf->buf[0].mem;
494  iov[iov_count].iov_len = len;
495  iov_count++;
496  return fuse_send_msg(se, ch, iov, iov_count);
497  }
498 
499  res = posix_memalign(&mbuf, pagesize, len);
500  if (res != 0)
501  return res;
502 
503  mem_buf.buf[0].mem = mbuf;
504  res = fuse_buf_copy(&mem_buf, buf, 0);
505  if (res < 0) {
506  free(mbuf);
507  return -res;
508  }
509  len = res;
510 
511  iov[iov_count].iov_base = mbuf;
512  iov[iov_count].iov_len = len;
513  iov_count++;
514  res = fuse_send_msg(se, ch, iov, iov_count);
515  free(mbuf);
516 
517  return res;
518 }
519 
520 struct fuse_ll_pipe {
521  size_t size;
522  int can_grow;
523  int pipe[2];
524 };
525 
526 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
527 {
528  close(llp->pipe[0]);
529  close(llp->pipe[1]);
530  free(llp);
531 }
532 
533 #ifdef HAVE_SPLICE
534 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
535 static int fuse_pipe(int fds[2])
536 {
537  int rv = pipe(fds);
538 
539  if (rv == -1)
540  return rv;
541 
542  if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
543  fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
544  fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
545  fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
546  close(fds[0]);
547  close(fds[1]);
548  rv = -1;
549  }
550  return rv;
551 }
552 #else
553 static int fuse_pipe(int fds[2])
554 {
555  return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
556 }
557 #endif
558 
559 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
560 {
561  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
562  if (llp == NULL) {
563  int res;
564 
565  llp = malloc(sizeof(struct fuse_ll_pipe));
566  if (llp == NULL)
567  return NULL;
568 
569  res = fuse_pipe(llp->pipe);
570  if (res == -1) {
571  free(llp);
572  return NULL;
573  }
574 
575  /*
576  *the default size is 16 pages on linux
577  */
578  llp->size = pagesize * 16;
579  llp->can_grow = 1;
580 
581  pthread_setspecific(se->pipe_key, llp);
582  }
583 
584  return llp;
585 }
586 #endif
587 
588 static void fuse_ll_clear_pipe(struct fuse_session *se)
589 {
590  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
591  if (llp) {
592  pthread_setspecific(se->pipe_key, NULL);
593  fuse_ll_pipe_free(llp);
594  }
595 }
596 
597 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
598 static int read_back(int fd, char *buf, size_t len)
599 {
600  int res;
601 
602  res = read(fd, buf, len);
603  if (res == -1) {
604  fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
605  return -EIO;
606  }
607  if (res != len) {
608  fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
609  return -EIO;
610  }
611  return 0;
612 }
613 
614 static int grow_pipe_to_max(int pipefd)
615 {
616  int max;
617  int res;
618  int maxfd;
619  char buf[32];
620 
621  maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
622  if (maxfd < 0)
623  return -errno;
624 
625  res = read(maxfd, buf, sizeof(buf) - 1);
626  if (res < 0) {
627  int saved_errno;
628 
629  saved_errno = errno;
630  close(maxfd);
631  return -saved_errno;
632  }
633  close(maxfd);
634  buf[res] = '\0';
635 
636  max = atoi(buf);
637  res = fcntl(pipefd, F_SETPIPE_SZ, max);
638  if (res < 0)
639  return -errno;
640  return max;
641 }
642 
643 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
644  struct iovec *iov, int iov_count,
645  struct fuse_bufvec *buf, unsigned int flags)
646 {
647  int res;
648  size_t len = fuse_buf_size(buf);
649  struct fuse_out_header *out = iov[0].iov_base;
650  struct fuse_ll_pipe *llp;
651  int splice_flags;
652  size_t pipesize;
653  size_t total_fd_size;
654  size_t idx;
655  size_t headerlen;
656  struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
657 
658  if (se->broken_splice_nonblock)
659  goto fallback;
660 
661  if (flags & FUSE_BUF_NO_SPLICE)
662  goto fallback;
663 
664  total_fd_size = 0;
665  for (idx = buf->idx; idx < buf->count; idx++) {
666  if (buf->buf[idx].flags & FUSE_BUF_IS_FD) {
667  total_fd_size = buf->buf[idx].size;
668  if (idx == buf->idx)
669  total_fd_size -= buf->off;
670  }
671  }
672  if (total_fd_size < 2 * pagesize)
673  goto fallback;
674 
675  if (se->conn.proto_minor < 14 ||
676  !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
677  goto fallback;
678 
679  llp = fuse_ll_get_pipe(se);
680  if (llp == NULL)
681  goto fallback;
682 
683 
684  headerlen = iov_length(iov, iov_count);
685 
686  out->len = headerlen + len;
687 
688  /*
689  * Heuristic for the required pipe size, does not work if the
690  * source contains less than page size fragments
691  */
692  pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
693 
694  if (llp->size < pipesize) {
695  if (llp->can_grow) {
696  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
697  if (res == -1) {
698  res = grow_pipe_to_max(llp->pipe[0]);
699  if (res > 0)
700  llp->size = res;
701  llp->can_grow = 0;
702  goto fallback;
703  }
704  llp->size = res;
705  }
706  if (llp->size < pipesize)
707  goto fallback;
708  }
709 
710 
711  res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
712  if (res == -1)
713  goto fallback;
714 
715  if (res != headerlen) {
716  res = -EIO;
717  fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
718  headerlen);
719  goto clear_pipe;
720  }
721 
722  pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
723  pipe_buf.buf[0].fd = llp->pipe[1];
724 
725  res = fuse_buf_copy(&pipe_buf, buf,
727  if (res < 0) {
728  if (res == -EAGAIN || res == -EINVAL) {
729  /*
730  * Should only get EAGAIN on kernels with
731  * broken SPLICE_F_NONBLOCK support (<=
732  * 2.6.35) where this error or a short read is
733  * returned even if the pipe itself is not
734  * full
735  *
736  * EINVAL might mean that splice can't handle
737  * this combination of input and output.
738  */
739  if (res == -EAGAIN)
740  se->broken_splice_nonblock = 1;
741 
742  pthread_setspecific(se->pipe_key, NULL);
743  fuse_ll_pipe_free(llp);
744  goto fallback;
745  }
746  res = -res;
747  goto clear_pipe;
748  }
749 
750  if (res != 0 && res < len) {
751  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
752  void *mbuf;
753  size_t now_len = res;
754  /*
755  * For regular files a short count is either
756  * 1) due to EOF, or
757  * 2) because of broken SPLICE_F_NONBLOCK (see above)
758  *
759  * For other inputs it's possible that we overflowed
760  * the pipe because of small buffer fragments.
761  */
762 
763  res = posix_memalign(&mbuf, pagesize, len);
764  if (res != 0)
765  goto clear_pipe;
766 
767  mem_buf.buf[0].mem = mbuf;
768  mem_buf.off = now_len;
769  res = fuse_buf_copy(&mem_buf, buf, 0);
770  if (res > 0) {
771  char *tmpbuf;
772  size_t extra_len = res;
773  /*
774  * Trickiest case: got more data. Need to get
775  * back the data from the pipe and then fall
776  * back to regular write.
777  */
778  tmpbuf = malloc(headerlen);
779  if (tmpbuf == NULL) {
780  free(mbuf);
781  res = ENOMEM;
782  goto clear_pipe;
783  }
784  res = read_back(llp->pipe[0], tmpbuf, headerlen);
785  free(tmpbuf);
786  if (res != 0) {
787  free(mbuf);
788  goto clear_pipe;
789  }
790  res = read_back(llp->pipe[0], mbuf, now_len);
791  if (res != 0) {
792  free(mbuf);
793  goto clear_pipe;
794  }
795  len = now_len + extra_len;
796  iov[iov_count].iov_base = mbuf;
797  iov[iov_count].iov_len = len;
798  iov_count++;
799  res = fuse_send_msg(se, ch, iov, iov_count);
800  free(mbuf);
801  return res;
802  }
803  free(mbuf);
804  res = now_len;
805  }
806  len = res;
807  out->len = headerlen + len;
808 
809  if (se->debug) {
810  fuse_log(FUSE_LOG_DEBUG,
811  " unique: %llu, success, outsize: %i (splice)\n",
812  (unsigned long long) out->unique, out->len);
813  }
814 
815  splice_flags = 0;
816  if ((flags & FUSE_BUF_SPLICE_MOVE) &&
817  (se->conn.want & FUSE_CAP_SPLICE_MOVE))
818  splice_flags |= SPLICE_F_MOVE;
819 
820  res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd,
821  NULL, out->len, splice_flags);
822  if (res == -1) {
823  res = -errno;
824  perror("fuse: splice from pipe");
825  goto clear_pipe;
826  }
827  if (res != out->len) {
828  res = -EIO;
829  fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
830  res, out->len);
831  goto clear_pipe;
832  }
833  return 0;
834 
835 clear_pipe:
836  fuse_ll_clear_pipe(se);
837  return res;
838 
839 fallback:
840  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
841 }
842 #else
843 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
844  struct iovec *iov, int iov_count,
845  struct fuse_bufvec *buf, unsigned int flags)
846 {
847  size_t len = fuse_buf_size(buf);
848  (void) flags;
849 
850  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
851 }
852 #endif
853 
854 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
855  enum fuse_buf_copy_flags flags)
856 {
857  struct iovec iov[2];
858  struct fuse_out_header out;
859  int res;
860 
861  iov[0].iov_base = &out;
862  iov[0].iov_len = sizeof(struct fuse_out_header);
863 
864  out.unique = req->unique;
865  out.error = 0;
866 
867  res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
868  if (res <= 0) {
869  fuse_free_req(req);
870  return res;
871  } else {
872  return fuse_reply_err(req, res);
873  }
874 }
875 
876 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
877 {
878  struct fuse_statfs_out arg;
879  size_t size = req->se->conn.proto_minor < 4 ?
880  FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
881 
882  memset(&arg, 0, sizeof(arg));
883  convert_statfs(stbuf, &arg.st);
884 
885  return send_reply_ok(req, &arg, size);
886 }
887 
888 int fuse_reply_xattr(fuse_req_t req, size_t count)
889 {
890  struct fuse_getxattr_out arg;
891 
892  memset(&arg, 0, sizeof(arg));
893  arg.size = count;
894 
895  return send_reply_ok(req, &arg, sizeof(arg));
896 }
897 
898 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
899 {
900  struct fuse_lk_out arg;
901 
902  memset(&arg, 0, sizeof(arg));
903  arg.lk.type = lock->l_type;
904  if (lock->l_type != F_UNLCK) {
905  arg.lk.start = lock->l_start;
906  if (lock->l_len == 0)
907  arg.lk.end = OFFSET_MAX;
908  else
909  arg.lk.end = lock->l_start + lock->l_len - 1;
910  }
911  arg.lk.pid = lock->l_pid;
912  return send_reply_ok(req, &arg, sizeof(arg));
913 }
914 
915 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
916 {
917  struct fuse_bmap_out arg;
918 
919  memset(&arg, 0, sizeof(arg));
920  arg.block = idx;
921 
922  return send_reply_ok(req, &arg, sizeof(arg));
923 }
924 
925 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
926  size_t count)
927 {
928  struct fuse_ioctl_iovec *fiov;
929  size_t i;
930 
931  fiov = malloc(sizeof(fiov[0]) * count);
932  if (!fiov)
933  return NULL;
934 
935  for (i = 0; i < count; i++) {
936  fiov[i].base = (uintptr_t) iov[i].iov_base;
937  fiov[i].len = iov[i].iov_len;
938  }
939 
940  return fiov;
941 }
942 
944  const struct iovec *in_iov, size_t in_count,
945  const struct iovec *out_iov, size_t out_count)
946 {
947  struct fuse_ioctl_out arg;
948  struct fuse_ioctl_iovec *in_fiov = NULL;
949  struct fuse_ioctl_iovec *out_fiov = NULL;
950  struct iovec iov[4];
951  size_t count = 1;
952  int res;
953 
954  memset(&arg, 0, sizeof(arg));
955  arg.flags |= FUSE_IOCTL_RETRY;
956  arg.in_iovs = in_count;
957  arg.out_iovs = out_count;
958  iov[count].iov_base = &arg;
959  iov[count].iov_len = sizeof(arg);
960  count++;
961 
962  if (req->se->conn.proto_minor < 16) {
963  if (in_count) {
964  iov[count].iov_base = (void *)in_iov;
965  iov[count].iov_len = sizeof(in_iov[0]) * in_count;
966  count++;
967  }
968 
969  if (out_count) {
970  iov[count].iov_base = (void *)out_iov;
971  iov[count].iov_len = sizeof(out_iov[0]) * out_count;
972  count++;
973  }
974  } else {
975  /* Can't handle non-compat 64bit ioctls on 32bit */
976  if (sizeof(void *) == 4 && req->ioctl_64bit) {
977  res = fuse_reply_err(req, EINVAL);
978  goto out;
979  }
980 
981  if (in_count) {
982  in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
983  if (!in_fiov)
984  goto enomem;
985 
986  iov[count].iov_base = (void *)in_fiov;
987  iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
988  count++;
989  }
990  if (out_count) {
991  out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
992  if (!out_fiov)
993  goto enomem;
994 
995  iov[count].iov_base = (void *)out_fiov;
996  iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
997  count++;
998  }
999  }
1000 
1001  res = send_reply_iov(req, 0, iov, count);
1002 out:
1003  free(in_fiov);
1004  free(out_fiov);
1005 
1006  return res;
1007 
1008 enomem:
1009  res = fuse_reply_err(req, ENOMEM);
1010  goto out;
1011 }
1012 
1013 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1014 {
1015  struct fuse_ioctl_out arg;
1016  struct iovec iov[3];
1017  size_t count = 1;
1018 
1019  memset(&arg, 0, sizeof(arg));
1020  arg.result = result;
1021  iov[count].iov_base = &arg;
1022  iov[count].iov_len = sizeof(arg);
1023  count++;
1024 
1025  if (size) {
1026  iov[count].iov_base = (char *) buf;
1027  iov[count].iov_len = size;
1028  count++;
1029  }
1030 
1031  return send_reply_iov(req, 0, iov, count);
1032 }
1033 
1034 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1035  int count)
1036 {
1037  struct iovec *padded_iov;
1038  struct fuse_ioctl_out arg;
1039  int res;
1040 
1041  padded_iov = malloc((count + 2) * sizeof(struct iovec));
1042  if (padded_iov == NULL)
1043  return fuse_reply_err(req, ENOMEM);
1044 
1045  memset(&arg, 0, sizeof(arg));
1046  arg.result = result;
1047  padded_iov[1].iov_base = &arg;
1048  padded_iov[1].iov_len = sizeof(arg);
1049 
1050  memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1051 
1052  res = send_reply_iov(req, 0, padded_iov, count + 2);
1053  free(padded_iov);
1054 
1055  return res;
1056 }
1057 
1058 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1059 {
1060  struct fuse_poll_out arg;
1061 
1062  memset(&arg, 0, sizeof(arg));
1063  arg.revents = revents;
1064 
1065  return send_reply_ok(req, &arg, sizeof(arg));
1066 }
1067 
1068 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1069 {
1070  char *name = (char *) inarg;
1071 
1072  if (req->se->op.lookup)
1073  req->se->op.lookup(req, nodeid, name);
1074  else
1075  fuse_reply_err(req, ENOSYS);
1076 }
1077 
1078 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1079 {
1080  struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1081 
1082  if (req->se->op.forget)
1083  req->se->op.forget(req, nodeid, arg->nlookup);
1084  else
1085  fuse_reply_none(req);
1086 }
1087 
1088 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1089  const void *inarg)
1090 {
1091  struct fuse_batch_forget_in *arg = (void *) inarg;
1092  struct fuse_forget_one *param = (void *) PARAM(arg);
1093  unsigned int i;
1094 
1095  (void) nodeid;
1096 
1097  if (req->se->op.forget_multi) {
1098  req->se->op.forget_multi(req, arg->count,
1099  (struct fuse_forget_data *) param);
1100  } else if (req->se->op.forget) {
1101  for (i = 0; i < arg->count; i++) {
1102  struct fuse_forget_one *forget = &param[i];
1103  struct fuse_req *dummy_req;
1104 
1105  dummy_req = fuse_ll_alloc_req(req->se);
1106  if (dummy_req == NULL)
1107  break;
1108 
1109  dummy_req->unique = req->unique;
1110  dummy_req->ctx = req->ctx;
1111  dummy_req->ch = NULL;
1112 
1113  req->se->op.forget(dummy_req, forget->nodeid,
1114  forget->nlookup);
1115  }
1116  fuse_reply_none(req);
1117  } else {
1118  fuse_reply_none(req);
1119  }
1120 }
1121 
1122 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1123 {
1124  struct fuse_file_info *fip = NULL;
1125  struct fuse_file_info fi;
1126 
1127  if (req->se->conn.proto_minor >= 9) {
1128  struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1129 
1130  if (arg->getattr_flags & FUSE_GETATTR_FH) {
1131  memset(&fi, 0, sizeof(fi));
1132  fi.fh = arg->fh;
1133  fip = &fi;
1134  }
1135  }
1136 
1137  if (req->se->op.getattr)
1138  req->se->op.getattr(req, nodeid, fip);
1139  else
1140  fuse_reply_err(req, ENOSYS);
1141 }
1142 
1143 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1144 {
1145  struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1146 
1147  if (req->se->op.setattr) {
1148  struct fuse_file_info *fi = NULL;
1149  struct fuse_file_info fi_store;
1150  struct stat stbuf;
1151  memset(&stbuf, 0, sizeof(stbuf));
1152  convert_attr(arg, &stbuf);
1153  if (arg->valid & FATTR_FH) {
1154  arg->valid &= ~FATTR_FH;
1155  memset(&fi_store, 0, sizeof(fi_store));
1156  fi = &fi_store;
1157  fi->fh = arg->fh;
1158  }
1159  arg->valid &=
1160  FUSE_SET_ATTR_MODE |
1161  FUSE_SET_ATTR_UID |
1162  FUSE_SET_ATTR_GID |
1163  FUSE_SET_ATTR_SIZE |
1164  FUSE_SET_ATTR_ATIME |
1165  FUSE_SET_ATTR_MTIME |
1166  FUSE_SET_ATTR_ATIME_NOW |
1167  FUSE_SET_ATTR_MTIME_NOW |
1168  FUSE_SET_ATTR_CTIME;
1169 
1170  req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1171  } else
1172  fuse_reply_err(req, ENOSYS);
1173 }
1174 
1175 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1176 {
1177  struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1178 
1179  if (req->se->op.access)
1180  req->se->op.access(req, nodeid, arg->mask);
1181  else
1182  fuse_reply_err(req, ENOSYS);
1183 }
1184 
1185 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1186 {
1187  (void) inarg;
1188 
1189  if (req->se->op.readlink)
1190  req->se->op.readlink(req, nodeid);
1191  else
1192  fuse_reply_err(req, ENOSYS);
1193 }
1194 
1195 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1196 {
1197  struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1198  char *name = PARAM(arg);
1199 
1200  if (req->se->conn.proto_minor >= 12)
1201  req->ctx.umask = arg->umask;
1202  else
1203  name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1204 
1205  if (req->se->op.mknod)
1206  req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1207  else
1208  fuse_reply_err(req, ENOSYS);
1209 }
1210 
1211 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1212 {
1213  struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1214 
1215  if (req->se->conn.proto_minor >= 12)
1216  req->ctx.umask = arg->umask;
1217 
1218  if (req->se->op.mkdir)
1219  req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1220  else
1221  fuse_reply_err(req, ENOSYS);
1222 }
1223 
1224 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1225 {
1226  char *name = (char *) inarg;
1227 
1228  if (req->se->op.unlink)
1229  req->se->op.unlink(req, nodeid, name);
1230  else
1231  fuse_reply_err(req, ENOSYS);
1232 }
1233 
1234 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1235 {
1236  char *name = (char *) inarg;
1237 
1238  if (req->se->op.rmdir)
1239  req->se->op.rmdir(req, nodeid, name);
1240  else
1241  fuse_reply_err(req, ENOSYS);
1242 }
1243 
1244 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1245 {
1246  char *name = (char *) inarg;
1247  char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1248 
1249  if (req->se->op.symlink)
1250  req->se->op.symlink(req, linkname, nodeid, name);
1251  else
1252  fuse_reply_err(req, ENOSYS);
1253 }
1254 
1255 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1256 {
1257  struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1258  char *oldname = PARAM(arg);
1259  char *newname = oldname + strlen(oldname) + 1;
1260 
1261  if (req->se->op.rename)
1262  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1263  0);
1264  else
1265  fuse_reply_err(req, ENOSYS);
1266 }
1267 
1268 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1269 {
1270  struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1271  char *oldname = PARAM(arg);
1272  char *newname = oldname + strlen(oldname) + 1;
1273 
1274  if (req->se->op.rename)
1275  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1276  arg->flags);
1277  else
1278  fuse_reply_err(req, ENOSYS);
1279 }
1280 
1281 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1282 {
1283  struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1284 
1285  if (req->se->op.link)
1286  req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1287  else
1288  fuse_reply_err(req, ENOSYS);
1289 }
1290 
1291 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1292 {
1293  struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1294 
1295  if (req->se->op.create) {
1296  struct fuse_file_info fi;
1297  char *name = PARAM(arg);
1298 
1299  memset(&fi, 0, sizeof(fi));
1300  fi.flags = arg->flags;
1301 
1302  if (req->se->conn.proto_minor >= 12)
1303  req->ctx.umask = arg->umask;
1304  else
1305  name = (char *) inarg + sizeof(struct fuse_open_in);
1306 
1307  req->se->op.create(req, nodeid, name, arg->mode, &fi);
1308  } else
1309  fuse_reply_err(req, ENOSYS);
1310 }
1311 
1312 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1313 {
1314  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1315  struct fuse_file_info fi;
1316 
1317  memset(&fi, 0, sizeof(fi));
1318  fi.flags = arg->flags;
1319 
1320  if (req->se->op.open)
1321  req->se->op.open(req, nodeid, &fi);
1322  else
1323  fuse_reply_open(req, &fi);
1324 }
1325 
1326 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1327 {
1328  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1329 
1330  if (req->se->op.read) {
1331  struct fuse_file_info fi;
1332 
1333  memset(&fi, 0, sizeof(fi));
1334  fi.fh = arg->fh;
1335  if (req->se->conn.proto_minor >= 9) {
1336  fi.lock_owner = arg->lock_owner;
1337  fi.flags = arg->flags;
1338  }
1339  req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1340  } else
1341  fuse_reply_err(req, ENOSYS);
1342 }
1343 
1344 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1345 {
1346  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1347  struct fuse_file_info fi;
1348  char *param;
1349 
1350  memset(&fi, 0, sizeof(fi));
1351  fi.fh = arg->fh;
1352  fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1353 
1354  if (req->se->conn.proto_minor < 9) {
1355  param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1356  } else {
1357  fi.lock_owner = arg->lock_owner;
1358  fi.flags = arg->flags;
1359  param = PARAM(arg);
1360  }
1361 
1362  if (req->se->op.write)
1363  req->se->op.write(req, nodeid, param, arg->size,
1364  arg->offset, &fi);
1365  else
1366  fuse_reply_err(req, ENOSYS);
1367 }
1368 
1369 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1370  const struct fuse_buf *ibuf)
1371 {
1372  struct fuse_session *se = req->se;
1373  struct fuse_bufvec bufv = {
1374  .buf[0] = *ibuf,
1375  .count = 1,
1376  };
1377  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1378  struct fuse_file_info fi;
1379 
1380  memset(&fi, 0, sizeof(fi));
1381  fi.fh = arg->fh;
1382  fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1383 
1384  if (se->conn.proto_minor < 9) {
1385  bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1386  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1387  FUSE_COMPAT_WRITE_IN_SIZE;
1388  assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1389  } else {
1390  fi.lock_owner = arg->lock_owner;
1391  fi.flags = arg->flags;
1392  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1393  bufv.buf[0].mem = PARAM(arg);
1394 
1395  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1396  sizeof(struct fuse_write_in);
1397  }
1398  if (bufv.buf[0].size < arg->size) {
1399  fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1400  fuse_reply_err(req, EIO);
1401  goto out;
1402  }
1403  bufv.buf[0].size = arg->size;
1404 
1405  se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1406 
1407 out:
1408  /* Need to reset the pipe if ->write_buf() didn't consume all data */
1409  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1410  fuse_ll_clear_pipe(se);
1411 }
1412 
1413 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1414 {
1415  struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1416  struct fuse_file_info fi;
1417 
1418  memset(&fi, 0, sizeof(fi));
1419  fi.fh = arg->fh;
1420  fi.flush = 1;
1421  if (req->se->conn.proto_minor >= 7)
1422  fi.lock_owner = arg->lock_owner;
1423 
1424  if (req->se->op.flush)
1425  req->se->op.flush(req, nodeid, &fi);
1426  else
1427  fuse_reply_err(req, ENOSYS);
1428 }
1429 
1430 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1431 {
1432  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1433  struct fuse_file_info fi;
1434 
1435  memset(&fi, 0, sizeof(fi));
1436  fi.flags = arg->flags;
1437  fi.fh = arg->fh;
1438  if (req->se->conn.proto_minor >= 8) {
1439  fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1440  fi.lock_owner = arg->lock_owner;
1441  }
1442  if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1443  fi.flock_release = 1;
1444  fi.lock_owner = arg->lock_owner;
1445  }
1446 
1447  if (req->se->op.release)
1448  req->se->op.release(req, nodeid, &fi);
1449  else
1450  fuse_reply_err(req, 0);
1451 }
1452 
1453 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1454 {
1455  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1456  struct fuse_file_info fi;
1457  int datasync = arg->fsync_flags & 1;
1458 
1459  memset(&fi, 0, sizeof(fi));
1460  fi.fh = arg->fh;
1461 
1462  if (req->se->op.fsync)
1463  req->se->op.fsync(req, nodeid, datasync, &fi);
1464  else
1465  fuse_reply_err(req, ENOSYS);
1466 }
1467 
1468 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1469 {
1470  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1471  struct fuse_file_info fi;
1472 
1473  memset(&fi, 0, sizeof(fi));
1474  fi.flags = arg->flags;
1475 
1476  if (req->se->op.opendir)
1477  req->se->op.opendir(req, nodeid, &fi);
1478  else
1479  fuse_reply_open(req, &fi);
1480 }
1481 
1482 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1483 {
1484  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1485  struct fuse_file_info fi;
1486 
1487  memset(&fi, 0, sizeof(fi));
1488  fi.fh = arg->fh;
1489 
1490  if (req->se->op.readdir)
1491  req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1492  else
1493  fuse_reply_err(req, ENOSYS);
1494 }
1495 
1496 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1497 {
1498  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1499  struct fuse_file_info fi;
1500 
1501  memset(&fi, 0, sizeof(fi));
1502  fi.fh = arg->fh;
1503 
1504  if (req->se->op.readdirplus)
1505  req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1506  else
1507  fuse_reply_err(req, ENOSYS);
1508 }
1509 
1510 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1511 {
1512  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1513  struct fuse_file_info fi;
1514 
1515  memset(&fi, 0, sizeof(fi));
1516  fi.flags = arg->flags;
1517  fi.fh = arg->fh;
1518 
1519  if (req->se->op.releasedir)
1520  req->se->op.releasedir(req, nodeid, &fi);
1521  else
1522  fuse_reply_err(req, 0);
1523 }
1524 
1525 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1526 {
1527  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1528  struct fuse_file_info fi;
1529  int datasync = arg->fsync_flags & 1;
1530 
1531  memset(&fi, 0, sizeof(fi));
1532  fi.fh = arg->fh;
1533 
1534  if (req->se->op.fsyncdir)
1535  req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1536  else
1537  fuse_reply_err(req, ENOSYS);
1538 }
1539 
1540 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1541 {
1542  (void) nodeid;
1543  (void) inarg;
1544 
1545  if (req->se->op.statfs)
1546  req->se->op.statfs(req, nodeid);
1547  else {
1548  struct statvfs buf = {
1549  .f_namemax = 255,
1550  .f_bsize = 512,
1551  };
1552  fuse_reply_statfs(req, &buf);
1553  }
1554 }
1555 
1556 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1557 {
1558  struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1559  char *name = PARAM(arg);
1560  char *value = name + strlen(name) + 1;
1561 
1562  if (req->se->op.setxattr)
1563  req->se->op.setxattr(req, nodeid, name, value, arg->size,
1564  arg->flags);
1565  else
1566  fuse_reply_err(req, ENOSYS);
1567 }
1568 
1569 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1570 {
1571  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1572 
1573  if (req->se->op.getxattr)
1574  req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1575  else
1576  fuse_reply_err(req, ENOSYS);
1577 }
1578 
1579 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1580 {
1581  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1582 
1583  if (req->se->op.listxattr)
1584  req->se->op.listxattr(req, nodeid, arg->size);
1585  else
1586  fuse_reply_err(req, ENOSYS);
1587 }
1588 
1589 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1590 {
1591  char *name = (char *) inarg;
1592 
1593  if (req->se->op.removexattr)
1594  req->se->op.removexattr(req, nodeid, name);
1595  else
1596  fuse_reply_err(req, ENOSYS);
1597 }
1598 
1599 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1600  struct flock *flock)
1601 {
1602  memset(flock, 0, sizeof(struct flock));
1603  flock->l_type = fl->type;
1604  flock->l_whence = SEEK_SET;
1605  flock->l_start = fl->start;
1606  if (fl->end == OFFSET_MAX)
1607  flock->l_len = 0;
1608  else
1609  flock->l_len = fl->end - fl->start + 1;
1610  flock->l_pid = fl->pid;
1611 }
1612 
1613 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1614 {
1615  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1616  struct fuse_file_info fi;
1617  struct flock flock;
1618 
1619  memset(&fi, 0, sizeof(fi));
1620  fi.fh = arg->fh;
1621  fi.lock_owner = arg->owner;
1622 
1623  convert_fuse_file_lock(&arg->lk, &flock);
1624  if (req->se->op.getlk)
1625  req->se->op.getlk(req, nodeid, &fi, &flock);
1626  else
1627  fuse_reply_err(req, ENOSYS);
1628 }
1629 
1630 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1631  const void *inarg, int sleep)
1632 {
1633  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1634  struct fuse_file_info fi;
1635  struct flock flock;
1636 
1637  memset(&fi, 0, sizeof(fi));
1638  fi.fh = arg->fh;
1639  fi.lock_owner = arg->owner;
1640 
1641  if (arg->lk_flags & FUSE_LK_FLOCK) {
1642  int op = 0;
1643 
1644  switch (arg->lk.type) {
1645  case F_RDLCK:
1646  op = LOCK_SH;
1647  break;
1648  case F_WRLCK:
1649  op = LOCK_EX;
1650  break;
1651  case F_UNLCK:
1652  op = LOCK_UN;
1653  break;
1654  }
1655  if (!sleep)
1656  op |= LOCK_NB;
1657 
1658  if (req->se->op.flock)
1659  req->se->op.flock(req, nodeid, &fi, op);
1660  else
1661  fuse_reply_err(req, ENOSYS);
1662  } else {
1663  convert_fuse_file_lock(&arg->lk, &flock);
1664  if (req->se->op.setlk)
1665  req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1666  else
1667  fuse_reply_err(req, ENOSYS);
1668  }
1669 }
1670 
1671 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1672 {
1673  do_setlk_common(req, nodeid, inarg, 0);
1674 }
1675 
1676 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1677 {
1678  do_setlk_common(req, nodeid, inarg, 1);
1679 }
1680 
1681 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1682 {
1683  struct fuse_req *curr;
1684 
1685  for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1686  if (curr->unique == req->u.i.unique) {
1687  fuse_interrupt_func_t func;
1688  void *data;
1689 
1690  curr->ctr++;
1691  pthread_mutex_unlock(&se->lock);
1692 
1693  /* Ugh, ugly locking */
1694  pthread_mutex_lock(&curr->lock);
1695  pthread_mutex_lock(&se->lock);
1696  curr->interrupted = 1;
1697  func = curr->u.ni.func;
1698  data = curr->u.ni.data;
1699  pthread_mutex_unlock(&se->lock);
1700  if (func)
1701  func(curr, data);
1702  pthread_mutex_unlock(&curr->lock);
1703 
1704  pthread_mutex_lock(&se->lock);
1705  curr->ctr--;
1706  if (!curr->ctr)
1707  destroy_req(curr);
1708 
1709  return 1;
1710  }
1711  }
1712  for (curr = se->interrupts.next; curr != &se->interrupts;
1713  curr = curr->next) {
1714  if (curr->u.i.unique == req->u.i.unique)
1715  return 1;
1716  }
1717  return 0;
1718 }
1719 
1720 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1721 {
1722  struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1723  struct fuse_session *se = req->se;
1724 
1725  (void) nodeid;
1726  if (se->debug)
1727  fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1728  (unsigned long long) arg->unique);
1729 
1730  req->u.i.unique = arg->unique;
1731 
1732  pthread_mutex_lock(&se->lock);
1733  if (find_interrupted(se, req))
1734  destroy_req(req);
1735  else
1736  list_add_req(req, &se->interrupts);
1737  pthread_mutex_unlock(&se->lock);
1738 }
1739 
1740 static struct fuse_req *check_interrupt(struct fuse_session *se,
1741  struct fuse_req *req)
1742 {
1743  struct fuse_req *curr;
1744 
1745  for (curr = se->interrupts.next; curr != &se->interrupts;
1746  curr = curr->next) {
1747  if (curr->u.i.unique == req->unique) {
1748  req->interrupted = 1;
1749  list_del_req(curr);
1750  free(curr);
1751  return NULL;
1752  }
1753  }
1754  curr = se->interrupts.next;
1755  if (curr != &se->interrupts) {
1756  list_del_req(curr);
1757  list_init_req(curr);
1758  return curr;
1759  } else
1760  return NULL;
1761 }
1762 
1763 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1764 {
1765  struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1766 
1767  if (req->se->op.bmap)
1768  req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1769  else
1770  fuse_reply_err(req, ENOSYS);
1771 }
1772 
1773 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1774 {
1775  struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1776  unsigned int flags = arg->flags;
1777  void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1778  struct fuse_file_info fi;
1779 
1780  if (flags & FUSE_IOCTL_DIR &&
1781  !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1782  fuse_reply_err(req, ENOTTY);
1783  return;
1784  }
1785 
1786  memset(&fi, 0, sizeof(fi));
1787  fi.fh = arg->fh;
1788 
1789  if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1790  !(flags & FUSE_IOCTL_32BIT)) {
1791  req->ioctl_64bit = 1;
1792  }
1793 
1794  if (req->se->op.ioctl)
1795  req->se->op.ioctl(req, nodeid, arg->cmd,
1796  (void *)(uintptr_t)arg->arg, &fi, flags,
1797  in_buf, arg->in_size, arg->out_size);
1798  else
1799  fuse_reply_err(req, ENOSYS);
1800 }
1801 
1802 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1803 {
1804  free(ph);
1805 }
1806 
1807 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1808 {
1809  struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1810  struct fuse_file_info fi;
1811 
1812  memset(&fi, 0, sizeof(fi));
1813  fi.fh = arg->fh;
1814  fi.poll_events = arg->events;
1815 
1816  if (req->se->op.poll) {
1817  struct fuse_pollhandle *ph = NULL;
1818 
1819  if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1820  ph = malloc(sizeof(struct fuse_pollhandle));
1821  if (ph == NULL) {
1822  fuse_reply_err(req, ENOMEM);
1823  return;
1824  }
1825  ph->kh = arg->kh;
1826  ph->se = req->se;
1827  }
1828 
1829  req->se->op.poll(req, nodeid, &fi, ph);
1830  } else {
1831  fuse_reply_err(req, ENOSYS);
1832  }
1833 }
1834 
1835 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1836 {
1837  struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1838  struct fuse_file_info fi;
1839 
1840  memset(&fi, 0, sizeof(fi));
1841  fi.fh = arg->fh;
1842 
1843  if (req->se->op.fallocate)
1844  req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1845  else
1846  fuse_reply_err(req, ENOSYS);
1847 }
1848 
1849 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1850 {
1851  struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1852  struct fuse_file_info fi_in, fi_out;
1853 
1854  memset(&fi_in, 0, sizeof(fi_in));
1855  fi_in.fh = arg->fh_in;
1856 
1857  memset(&fi_out, 0, sizeof(fi_out));
1858  fi_out.fh = arg->fh_out;
1859 
1860 
1861  if (req->se->op.copy_file_range)
1862  req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1863  &fi_in, arg->nodeid_out,
1864  arg->off_out, &fi_out, arg->len,
1865  arg->flags);
1866  else
1867  fuse_reply_err(req, ENOSYS);
1868 }
1869 
1870 static void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1871 {
1872  struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1873  struct fuse_init_out outarg;
1874  struct fuse_session *se = req->se;
1875  size_t bufsize = se->bufsize;
1876  size_t outargsize = sizeof(outarg);
1877 
1878  (void) nodeid;
1879  if (se->debug) {
1880  fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1881  if (arg->major == 7 && arg->minor >= 6) {
1882  fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1883  fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1884  arg->max_readahead);
1885  }
1886  }
1887  se->conn.proto_major = arg->major;
1888  se->conn.proto_minor = arg->minor;
1889  se->conn.capable = 0;
1890  se->conn.want = 0;
1891 
1892  memset(&outarg, 0, sizeof(outarg));
1893  outarg.major = FUSE_KERNEL_VERSION;
1894  outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1895 
1896  if (arg->major < 7) {
1897  fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1898  arg->major, arg->minor);
1899  fuse_reply_err(req, EPROTO);
1900  return;
1901  }
1902 
1903  if (arg->major > 7) {
1904  /* Wait for a second INIT request with a 7.X version */
1905  send_reply_ok(req, &outarg, sizeof(outarg));
1906  return;
1907  }
1908 
1909  if (arg->minor >= 6) {
1910  if (arg->max_readahead < se->conn.max_readahead)
1911  se->conn.max_readahead = arg->max_readahead;
1912  if (arg->flags & FUSE_ASYNC_READ)
1913  se->conn.capable |= FUSE_CAP_ASYNC_READ;
1914  if (arg->flags & FUSE_POSIX_LOCKS)
1915  se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1916  if (arg->flags & FUSE_ATOMIC_O_TRUNC)
1917  se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1918  if (arg->flags & FUSE_EXPORT_SUPPORT)
1919  se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1920  if (arg->flags & FUSE_DONT_MASK)
1921  se->conn.capable |= FUSE_CAP_DONT_MASK;
1922  if (arg->flags & FUSE_FLOCK_LOCKS)
1923  se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1924  if (arg->flags & FUSE_AUTO_INVAL_DATA)
1925  se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1926  if (arg->flags & FUSE_DO_READDIRPLUS)
1927  se->conn.capable |= FUSE_CAP_READDIRPLUS;
1928  if (arg->flags & FUSE_READDIRPLUS_AUTO)
1929  se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1930  if (arg->flags & FUSE_ASYNC_DIO)
1931  se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1932  if (arg->flags & FUSE_WRITEBACK_CACHE)
1933  se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1934  if (arg->flags & FUSE_NO_OPEN_SUPPORT)
1935  se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
1936  if (arg->flags & FUSE_PARALLEL_DIROPS)
1937  se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
1938  if (arg->flags & FUSE_POSIX_ACL)
1939  se->conn.capable |= FUSE_CAP_POSIX_ACL;
1940  if (arg->flags & FUSE_HANDLE_KILLPRIV)
1941  se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
1942  if (arg->flags & FUSE_NO_OPENDIR_SUPPORT)
1943  se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
1944  if (!(arg->flags & FUSE_MAX_PAGES)) {
1945  size_t max_bufsize =
1946  FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
1947  + FUSE_BUFFER_HEADER_SIZE;
1948  if (bufsize > max_bufsize) {
1949  bufsize = max_bufsize;
1950  }
1951  }
1952  } else {
1953  se->conn.max_readahead = 0;
1954  }
1955 
1956  if (se->conn.proto_minor >= 14) {
1957 #ifdef HAVE_SPLICE
1958 #ifdef HAVE_VMSPLICE
1959  se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
1960 #endif
1961  se->conn.capable |= FUSE_CAP_SPLICE_READ;
1962 #endif
1963  }
1964  if (se->conn.proto_minor >= 18)
1965  se->conn.capable |= FUSE_CAP_IOCTL_DIR;
1966 
1967  /* Default settings for modern filesystems.
1968  *
1969  * Most of these capabilities were disabled by default in
1970  * libfuse2 for backwards compatibility reasons. In libfuse3,
1971  * we can finally enable them by default (as long as they're
1972  * supported by the kernel).
1973  */
1974 #define LL_SET_DEFAULT(cond, cap) \
1975  if ((cond) && (se->conn.capable & (cap))) \
1976  se->conn.want |= (cap)
1977  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
1978  LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
1979  LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
1980  LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
1981  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
1982  LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
1983  LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
1984  LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
1985  LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
1987  LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
1988  LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
1989  LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
1991  se->conn.time_gran = 1;
1992 
1993  if (bufsize < FUSE_MIN_READ_BUFFER) {
1994  fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
1995  bufsize);
1996  bufsize = FUSE_MIN_READ_BUFFER;
1997  }
1998  se->bufsize = bufsize;
1999 
2000  if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2001  se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2002 
2003  se->got_init = 1;
2004  if (se->op.init)
2005  se->op.init(se->userdata, &se->conn);
2006 
2007  if (se->conn.want & (~se->conn.capable)) {
2008  fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2009  "0x%x that are not supported by kernel, aborting.\n",
2010  se->conn.want & (~se->conn.capable));
2011  fuse_reply_err(req, EPROTO);
2012  se->error = -EPROTO;
2013  fuse_session_exit(se);
2014  return;
2015  }
2016 
2017  unsigned max_read_mo = get_max_read(se->mo);
2018  if (se->conn.max_read != max_read_mo) {
2019  fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2020  "requested different maximum read size (%u vs %u)\n",
2021  se->conn.max_read, max_read_mo);
2022  fuse_reply_err(req, EPROTO);
2023  se->error = -EPROTO;
2024  fuse_session_exit(se);
2025  return;
2026  }
2027 
2028  if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2029  se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2030  }
2031  if (arg->flags & FUSE_MAX_PAGES) {
2032  outarg.flags |= FUSE_MAX_PAGES;
2033  outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2034  }
2035 
2036  /* Always enable big writes, this is superseded
2037  by the max_write option */
2038  outarg.flags |= FUSE_BIG_WRITES;
2039 
2040  if (se->conn.want & FUSE_CAP_ASYNC_READ)
2041  outarg.flags |= FUSE_ASYNC_READ;
2042  if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2043  outarg.flags |= FUSE_POSIX_LOCKS;
2044  if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2045  outarg.flags |= FUSE_ATOMIC_O_TRUNC;
2046  if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2047  outarg.flags |= FUSE_EXPORT_SUPPORT;
2048  if (se->conn.want & FUSE_CAP_DONT_MASK)
2049  outarg.flags |= FUSE_DONT_MASK;
2050  if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2051  outarg.flags |= FUSE_FLOCK_LOCKS;
2052  if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2053  outarg.flags |= FUSE_AUTO_INVAL_DATA;
2054  if (se->conn.want & FUSE_CAP_READDIRPLUS)
2055  outarg.flags |= FUSE_DO_READDIRPLUS;
2056  if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2057  outarg.flags |= FUSE_READDIRPLUS_AUTO;
2058  if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2059  outarg.flags |= FUSE_ASYNC_DIO;
2060  if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2061  outarg.flags |= FUSE_WRITEBACK_CACHE;
2062  if (se->conn.want & FUSE_CAP_POSIX_ACL)
2063  outarg.flags |= FUSE_POSIX_ACL;
2064  outarg.max_readahead = se->conn.max_readahead;
2065  outarg.max_write = se->conn.max_write;
2066  if (se->conn.proto_minor >= 13) {
2067  if (se->conn.max_background >= (1 << 16))
2068  se->conn.max_background = (1 << 16) - 1;
2069  if (se->conn.congestion_threshold > se->conn.max_background)
2070  se->conn.congestion_threshold = se->conn.max_background;
2071  if (!se->conn.congestion_threshold) {
2072  se->conn.congestion_threshold =
2073  se->conn.max_background * 3 / 4;
2074  }
2075 
2076  outarg.max_background = se->conn.max_background;
2077  outarg.congestion_threshold = se->conn.congestion_threshold;
2078  }
2079  if (se->conn.proto_minor >= 23)
2080  outarg.time_gran = se->conn.time_gran;
2081 
2082  if (se->debug) {
2083  fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2084  fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2085  fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2086  outarg.max_readahead);
2087  fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2088  fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2089  outarg.max_background);
2090  fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2091  outarg.congestion_threshold);
2092  fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2093  outarg.time_gran);
2094  }
2095  if (arg->minor < 5)
2096  outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2097  else if (arg->minor < 23)
2098  outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2099 
2100  send_reply_ok(req, &outarg, outargsize);
2101 }
2102 
2103 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2104 {
2105  struct fuse_session *se = req->se;
2106 
2107  (void) nodeid;
2108  (void) inarg;
2109 
2110  se->got_destroy = 1;
2111  if (se->op.destroy)
2112  se->op.destroy(se->userdata);
2113 
2114  send_reply_ok(req, NULL, 0);
2115 }
2116 
2117 static void list_del_nreq(struct fuse_notify_req *nreq)
2118 {
2119  struct fuse_notify_req *prev = nreq->prev;
2120  struct fuse_notify_req *next = nreq->next;
2121  prev->next = next;
2122  next->prev = prev;
2123 }
2124 
2125 static void list_add_nreq(struct fuse_notify_req *nreq,
2126  struct fuse_notify_req *next)
2127 {
2128  struct fuse_notify_req *prev = next->prev;
2129  nreq->next = next;
2130  nreq->prev = prev;
2131  prev->next = nreq;
2132  next->prev = nreq;
2133 }
2134 
2135 static void list_init_nreq(struct fuse_notify_req *nreq)
2136 {
2137  nreq->next = nreq;
2138  nreq->prev = nreq;
2139 }
2140 
2141 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2142  const void *inarg, const struct fuse_buf *buf)
2143 {
2144  struct fuse_session *se = req->se;
2145  struct fuse_notify_req *nreq;
2146  struct fuse_notify_req *head;
2147 
2148  pthread_mutex_lock(&se->lock);
2149  head = &se->notify_list;
2150  for (nreq = head->next; nreq != head; nreq = nreq->next) {
2151  if (nreq->unique == req->unique) {
2152  list_del_nreq(nreq);
2153  break;
2154  }
2155  }
2156  pthread_mutex_unlock(&se->lock);
2157 
2158  if (nreq != head)
2159  nreq->reply(nreq, req, nodeid, inarg, buf);
2160 }
2161 
2162 static int send_notify_iov(struct fuse_session *se, int notify_code,
2163  struct iovec *iov, int count)
2164 {
2165  struct fuse_out_header out;
2166 
2167  if (!se->got_init)
2168  return -ENOTCONN;
2169 
2170  out.unique = 0;
2171  out.error = notify_code;
2172  iov[0].iov_base = &out;
2173  iov[0].iov_len = sizeof(struct fuse_out_header);
2174 
2175  return fuse_send_msg(se, NULL, iov, count);
2176 }
2177 
2178 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2179 {
2180  if (ph != NULL) {
2181  struct fuse_notify_poll_wakeup_out outarg;
2182  struct iovec iov[2];
2183 
2184  outarg.kh = ph->kh;
2185 
2186  iov[1].iov_base = &outarg;
2187  iov[1].iov_len = sizeof(outarg);
2188 
2189  return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2190  } else {
2191  return 0;
2192  }
2193 }
2194 
2195 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2196  off_t off, off_t len)
2197 {
2198  struct fuse_notify_inval_inode_out outarg;
2199  struct iovec iov[2];
2200 
2201  if (!se)
2202  return -EINVAL;
2203 
2204  if (se->conn.proto_major < 6 || se->conn.proto_minor < 12)
2205  return -ENOSYS;
2206 
2207  outarg.ino = ino;
2208  outarg.off = off;
2209  outarg.len = len;
2210 
2211  iov[1].iov_base = &outarg;
2212  iov[1].iov_len = sizeof(outarg);
2213 
2214  return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2215 }
2216 
2217 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2218  const char *name, size_t namelen)
2219 {
2220  struct fuse_notify_inval_entry_out outarg;
2221  struct iovec iov[3];
2222 
2223  if (!se)
2224  return -EINVAL;
2225 
2226  if (se->conn.proto_major < 6 || se->conn.proto_minor < 12)
2227  return -ENOSYS;
2228 
2229  outarg.parent = parent;
2230  outarg.namelen = namelen;
2231  outarg.padding = 0;
2232 
2233  iov[1].iov_base = &outarg;
2234  iov[1].iov_len = sizeof(outarg);
2235  iov[2].iov_base = (void *)name;
2236  iov[2].iov_len = namelen + 1;
2237 
2238  return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2239 }
2240 
2241 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2242  fuse_ino_t parent, fuse_ino_t child,
2243  const char *name, size_t namelen)
2244 {
2245  struct fuse_notify_delete_out outarg;
2246  struct iovec iov[3];
2247 
2248  if (!se)
2249  return -EINVAL;
2250 
2251  if (se->conn.proto_major < 6 || se->conn.proto_minor < 18)
2252  return -ENOSYS;
2253 
2254  outarg.parent = parent;
2255  outarg.child = child;
2256  outarg.namelen = namelen;
2257  outarg.padding = 0;
2258 
2259  iov[1].iov_base = &outarg;
2260  iov[1].iov_len = sizeof(outarg);
2261  iov[2].iov_base = (void *)name;
2262  iov[2].iov_len = namelen + 1;
2263 
2264  return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2265 }
2266 
2267 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2268  off_t offset, struct fuse_bufvec *bufv,
2269  enum fuse_buf_copy_flags flags)
2270 {
2271  struct fuse_out_header out;
2272  struct fuse_notify_store_out outarg;
2273  struct iovec iov[3];
2274  size_t size = fuse_buf_size(bufv);
2275  int res;
2276 
2277  if (!se)
2278  return -EINVAL;
2279 
2280  if (se->conn.proto_major < 6 || se->conn.proto_minor < 15)
2281  return -ENOSYS;
2282 
2283  out.unique = 0;
2284  out.error = FUSE_NOTIFY_STORE;
2285 
2286  outarg.nodeid = ino;
2287  outarg.offset = offset;
2288  outarg.size = size;
2289  outarg.padding = 0;
2290 
2291  iov[0].iov_base = &out;
2292  iov[0].iov_len = sizeof(out);
2293  iov[1].iov_base = &outarg;
2294  iov[1].iov_len = sizeof(outarg);
2295 
2296  res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2297  if (res > 0)
2298  res = -res;
2299 
2300  return res;
2301 }
2302 
2303 struct fuse_retrieve_req {
2304  struct fuse_notify_req nreq;
2305  void *cookie;
2306 };
2307 
2308 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2309  fuse_req_t req, fuse_ino_t ino,
2310  const void *inarg,
2311  const struct fuse_buf *ibuf)
2312 {
2313  struct fuse_session *se = req->se;
2314  struct fuse_retrieve_req *rreq =
2315  container_of(nreq, struct fuse_retrieve_req, nreq);
2316  const struct fuse_notify_retrieve_in *arg = inarg;
2317  struct fuse_bufvec bufv = {
2318  .buf[0] = *ibuf,
2319  .count = 1,
2320  };
2321 
2322  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2323  bufv.buf[0].mem = PARAM(arg);
2324 
2325  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2326  sizeof(struct fuse_notify_retrieve_in);
2327 
2328  if (bufv.buf[0].size < arg->size) {
2329  fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2330  fuse_reply_none(req);
2331  goto out;
2332  }
2333  bufv.buf[0].size = arg->size;
2334 
2335  if (se->op.retrieve_reply) {
2336  se->op.retrieve_reply(req, rreq->cookie, ino,
2337  arg->offset, &bufv);
2338  } else {
2339  fuse_reply_none(req);
2340  }
2341 out:
2342  free(rreq);
2343  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2344  fuse_ll_clear_pipe(se);
2345 }
2346 
2347 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2348  size_t size, off_t offset, void *cookie)
2349 {
2350  struct fuse_notify_retrieve_out outarg;
2351  struct iovec iov[2];
2352  struct fuse_retrieve_req *rreq;
2353  int err;
2354 
2355  if (!se)
2356  return -EINVAL;
2357 
2358  if (se->conn.proto_major < 6 || se->conn.proto_minor < 15)
2359  return -ENOSYS;
2360 
2361  rreq = malloc(sizeof(*rreq));
2362  if (rreq == NULL)
2363  return -ENOMEM;
2364 
2365  pthread_mutex_lock(&se->lock);
2366  rreq->cookie = cookie;
2367  rreq->nreq.unique = se->notify_ctr++;
2368  rreq->nreq.reply = fuse_ll_retrieve_reply;
2369  list_add_nreq(&rreq->nreq, &se->notify_list);
2370  pthread_mutex_unlock(&se->lock);
2371 
2372  outarg.notify_unique = rreq->nreq.unique;
2373  outarg.nodeid = ino;
2374  outarg.offset = offset;
2375  outarg.size = size;
2376  outarg.padding = 0;
2377 
2378  iov[1].iov_base = &outarg;
2379  iov[1].iov_len = sizeof(outarg);
2380 
2381  err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2382  if (err) {
2383  pthread_mutex_lock(&se->lock);
2384  list_del_nreq(&rreq->nreq);
2385  pthread_mutex_unlock(&se->lock);
2386  free(rreq);
2387  }
2388 
2389  return err;
2390 }
2391 
2392 void *fuse_req_userdata(fuse_req_t req)
2393 {
2394  return req->se->userdata;
2395 }
2396 
2397 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2398 {
2399  return &req->ctx;
2400 }
2401 
2403  void *data)
2404 {
2405  pthread_mutex_lock(&req->lock);
2406  pthread_mutex_lock(&req->se->lock);
2407  req->u.ni.func = func;
2408  req->u.ni.data = data;
2409  pthread_mutex_unlock(&req->se->lock);
2410  if (req->interrupted && func)
2411  func(req, data);
2412  pthread_mutex_unlock(&req->lock);
2413 }
2414 
2416 {
2417  int interrupted;
2418 
2419  pthread_mutex_lock(&req->se->lock);
2420  interrupted = req->interrupted;
2421  pthread_mutex_unlock(&req->se->lock);
2422 
2423  return interrupted;
2424 }
2425 
2426 static struct {
2427  void (*func)(fuse_req_t, fuse_ino_t, const void *);
2428  const char *name;
2429 } fuse_ll_ops[] = {
2430  [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2431  [FUSE_FORGET] = { do_forget, "FORGET" },
2432  [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2433  [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2434  [FUSE_READLINK] = { do_readlink, "READLINK" },
2435  [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2436  [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2437  [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2438  [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2439  [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2440  [FUSE_RENAME] = { do_rename, "RENAME" },
2441  [FUSE_LINK] = { do_link, "LINK" },
2442  [FUSE_OPEN] = { do_open, "OPEN" },
2443  [FUSE_READ] = { do_read, "READ" },
2444  [FUSE_WRITE] = { do_write, "WRITE" },
2445  [FUSE_STATFS] = { do_statfs, "STATFS" },
2446  [FUSE_RELEASE] = { do_release, "RELEASE" },
2447  [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2448  [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2449  [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2450  [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2451  [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2452  [FUSE_FLUSH] = { do_flush, "FLUSH" },
2453  [FUSE_INIT] = { do_init, "INIT" },
2454  [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2455  [FUSE_READDIR] = { do_readdir, "READDIR" },
2456  [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2457  [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2458  [FUSE_GETLK] = { do_getlk, "GETLK" },
2459  [FUSE_SETLK] = { do_setlk, "SETLK" },
2460  [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2461  [FUSE_ACCESS] = { do_access, "ACCESS" },
2462  [FUSE_CREATE] = { do_create, "CREATE" },
2463  [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2464  [FUSE_BMAP] = { do_bmap, "BMAP" },
2465  [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2466  [FUSE_POLL] = { do_poll, "POLL" },
2467  [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2468  [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2469  [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2470  [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2471  [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2472  [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2473  [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2474  [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2475 };
2476 
2477 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2478 
2479 static const char *opname(enum fuse_opcode opcode)
2480 {
2481  if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2482  return "???";
2483  else
2484  return fuse_ll_ops[opcode].name;
2485 }
2486 
2487 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2488  struct fuse_bufvec *src)
2489 {
2490  ssize_t res = fuse_buf_copy(dst, src, 0);
2491  if (res < 0) {
2492  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2493  return res;
2494  }
2495  if ((size_t)res < fuse_buf_size(dst)) {
2496  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2497  return -1;
2498  }
2499  return 0;
2500 }
2501 
2502 void fuse_session_process_buf(struct fuse_session *se,
2503  const struct fuse_buf *buf)
2504 {
2505  fuse_session_process_buf_int(se, buf, NULL);
2506 }
2507 
2508 void fuse_session_process_buf_int(struct fuse_session *se,
2509  const struct fuse_buf *buf, struct fuse_chan *ch)
2510 {
2511  const size_t write_header_size = sizeof(struct fuse_in_header) +
2512  sizeof(struct fuse_write_in);
2513  struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2514  struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2515  struct fuse_in_header *in;
2516  const void *inarg;
2517  struct fuse_req *req;
2518  void *mbuf = NULL;
2519  int err;
2520  int res;
2521 
2522  if (buf->flags & FUSE_BUF_IS_FD) {
2523  if (buf->size < tmpbuf.buf[0].size)
2524  tmpbuf.buf[0].size = buf->size;
2525 
2526  mbuf = malloc(tmpbuf.buf[0].size);
2527  if (mbuf == NULL) {
2528  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2529  goto clear_pipe;
2530  }
2531  tmpbuf.buf[0].mem = mbuf;
2532 
2533  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2534  if (res < 0)
2535  goto clear_pipe;
2536 
2537  in = mbuf;
2538  } else {
2539  in = buf->mem;
2540  }
2541 
2542  if (se->debug) {
2543  fuse_log(FUSE_LOG_DEBUG,
2544  "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2545  (unsigned long long) in->unique,
2546  opname((enum fuse_opcode) in->opcode), in->opcode,
2547  (unsigned long long) in->nodeid, buf->size, in->pid);
2548  }
2549 
2550  req = fuse_ll_alloc_req(se);
2551  if (req == NULL) {
2552  struct fuse_out_header out = {
2553  .unique = in->unique,
2554  .error = -ENOMEM,
2555  };
2556  struct iovec iov = {
2557  .iov_base = &out,
2558  .iov_len = sizeof(struct fuse_out_header),
2559  };
2560 
2561  fuse_send_msg(se, ch, &iov, 1);
2562  goto clear_pipe;
2563  }
2564 
2565  req->unique = in->unique;
2566  req->ctx.uid = in->uid;
2567  req->ctx.gid = in->gid;
2568  req->ctx.pid = in->pid;
2569  req->ch = ch ? fuse_chan_get(ch) : NULL;
2570 
2571  err = EIO;
2572  if (!se->got_init) {
2573  enum fuse_opcode expected;
2574 
2575  expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2576  if (in->opcode != expected)
2577  goto reply_err;
2578  } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2579  goto reply_err;
2580 
2581  err = EACCES;
2582  /* Implement -o allow_root */
2583  if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2584  in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2585  in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2586  in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2587  in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2588  in->opcode != FUSE_NOTIFY_REPLY &&
2589  in->opcode != FUSE_READDIRPLUS)
2590  goto reply_err;
2591 
2592  err = ENOSYS;
2593  if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2594  goto reply_err;
2595  if (in->opcode != FUSE_INTERRUPT) {
2596  struct fuse_req *intr;
2597  pthread_mutex_lock(&se->lock);
2598  intr = check_interrupt(se, req);
2599  list_add_req(req, &se->list);
2600  pthread_mutex_unlock(&se->lock);
2601  if (intr)
2602  fuse_reply_err(intr, EAGAIN);
2603  }
2604 
2605  if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2606  (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2607  in->opcode != FUSE_NOTIFY_REPLY) {
2608  void *newmbuf;
2609 
2610  err = ENOMEM;
2611  newmbuf = realloc(mbuf, buf->size);
2612  if (newmbuf == NULL)
2613  goto reply_err;
2614  mbuf = newmbuf;
2615 
2616  tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2617  tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2618 
2619  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2620  err = -res;
2621  if (res < 0)
2622  goto reply_err;
2623 
2624  in = mbuf;
2625  }
2626 
2627  inarg = (void *) &in[1];
2628  if (in->opcode == FUSE_WRITE && se->op.write_buf)
2629  do_write_buf(req, in->nodeid, inarg, buf);
2630  else if (in->opcode == FUSE_NOTIFY_REPLY)
2631  do_notify_reply(req, in->nodeid, inarg, buf);
2632  else
2633  fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2634 
2635 out_free:
2636  free(mbuf);
2637  return;
2638 
2639 reply_err:
2640  fuse_reply_err(req, err);
2641 clear_pipe:
2642  if (buf->flags & FUSE_BUF_IS_FD)
2643  fuse_ll_clear_pipe(se);
2644  goto out_free;
2645 }
2646 
2647 #define LL_OPTION(n,o,v) \
2648  { n, offsetof(struct fuse_session, o), v }
2649 
2650 static const struct fuse_opt fuse_ll_opts[] = {
2651  LL_OPTION("debug", debug, 1),
2652  LL_OPTION("-d", debug, 1),
2653  LL_OPTION("--debug", debug, 1),
2654  LL_OPTION("allow_root", deny_others, 1),
2655  FUSE_OPT_END
2656 };
2657 
2658 void fuse_lowlevel_version(void)
2659 {
2660  printf("using FUSE kernel interface version %i.%i\n",
2661  FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2662  fuse_mount_version();
2663 }
2664 
2665 void fuse_lowlevel_help(void)
2666 {
2667  /* These are not all options, but the ones that are
2668  potentially of interest to an end-user */
2669  printf(
2670 " -o allow_other allow access by all users\n"
2671 " -o allow_root allow access by root\n"
2672 " -o auto_unmount auto unmount on process termination\n");
2673 }
2674 
2675 void fuse_session_destroy(struct fuse_session *se)
2676 {
2677  struct fuse_ll_pipe *llp;
2678 
2679  if (se->got_init && !se->got_destroy) {
2680  if (se->op.destroy)
2681  se->op.destroy(se->userdata);
2682  }
2683  llp = pthread_getspecific(se->pipe_key);
2684  if (llp != NULL)
2685  fuse_ll_pipe_free(llp);
2686  pthread_key_delete(se->pipe_key);
2687  pthread_mutex_destroy(&se->lock);
2688  free(se->cuse_data);
2689  if (se->fd != -1)
2690  close(se->fd);
2691  destroy_mount_opts(se->mo);
2692  free(se);
2693 }
2694 
2695 
2696 static void fuse_ll_pipe_destructor(void *data)
2697 {
2698  struct fuse_ll_pipe *llp = data;
2699  fuse_ll_pipe_free(llp);
2700 }
2701 
2702 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2703 {
2704  return fuse_session_receive_buf_int(se, buf, NULL);
2705 }
2706 
2707 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2708  struct fuse_chan *ch)
2709 {
2710  int err;
2711  ssize_t res;
2712 #ifdef HAVE_SPLICE
2713  size_t bufsize = se->bufsize;
2714  struct fuse_ll_pipe *llp;
2715  struct fuse_buf tmpbuf;
2716 
2717  if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2718  goto fallback;
2719 
2720  llp = fuse_ll_get_pipe(se);
2721  if (llp == NULL)
2722  goto fallback;
2723 
2724  if (llp->size < bufsize) {
2725  if (llp->can_grow) {
2726  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2727  if (res == -1) {
2728  llp->can_grow = 0;
2729  res = grow_pipe_to_max(llp->pipe[0]);
2730  if (res > 0)
2731  llp->size = res;
2732  goto fallback;
2733  }
2734  llp->size = res;
2735  }
2736  if (llp->size < bufsize)
2737  goto fallback;
2738  }
2739 
2740  res = splice(ch ? ch->fd : se->fd,
2741  NULL, llp->pipe[1], NULL, bufsize, 0);
2742  err = errno;
2743 
2744  if (fuse_session_exited(se))
2745  return 0;
2746 
2747  if (res == -1) {
2748  if (err == ENODEV) {
2749  /* Filesystem was unmounted, or connection was aborted
2750  via /sys/fs/fuse/connections */
2751  fuse_session_exit(se);
2752  return 0;
2753  }
2754  if (err != EINTR && err != EAGAIN)
2755  perror("fuse: splice from device");
2756  return -err;
2757  }
2758 
2759  if (res < sizeof(struct fuse_in_header)) {
2760  fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2761  return -EIO;
2762  }
2763 
2764  tmpbuf = (struct fuse_buf) {
2765  .size = res,
2766  .flags = FUSE_BUF_IS_FD,
2767  .fd = llp->pipe[0],
2768  };
2769 
2770  /*
2771  * Don't bother with zero copy for small requests.
2772  * fuse_loop_mt() needs to check for FORGET so this more than
2773  * just an optimization.
2774  */
2775  if (res < sizeof(struct fuse_in_header) +
2776  sizeof(struct fuse_write_in) + pagesize) {
2777  struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2778  struct fuse_bufvec dst = { .count = 1 };
2779 
2780  if (!buf->mem) {
2781  buf->mem = malloc(se->bufsize);
2782  if (!buf->mem) {
2783  fuse_log(FUSE_LOG_ERR,
2784  "fuse: failed to allocate read buffer\n");
2785  return -ENOMEM;
2786  }
2787  }
2788  buf->size = se->bufsize;
2789  buf->flags = 0;
2790  dst.buf[0] = *buf;
2791 
2792  res = fuse_buf_copy(&dst, &src, 0);
2793  if (res < 0) {
2794  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2795  strerror(-res));
2796  fuse_ll_clear_pipe(se);
2797  return res;
2798  }
2799  if (res < tmpbuf.size) {
2800  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2801  fuse_ll_clear_pipe(se);
2802  return -EIO;
2803  }
2804  assert(res == tmpbuf.size);
2805 
2806  } else {
2807  /* Don't overwrite buf->mem, as that would cause a leak */
2808  buf->fd = tmpbuf.fd;
2809  buf->flags = tmpbuf.flags;
2810  }
2811  buf->size = tmpbuf.size;
2812 
2813  return res;
2814 
2815 fallback:
2816 #endif
2817  if (!buf->mem) {
2818  buf->mem = malloc(se->bufsize);
2819  if (!buf->mem) {
2820  fuse_log(FUSE_LOG_ERR,
2821  "fuse: failed to allocate read buffer\n");
2822  return -ENOMEM;
2823  }
2824  }
2825 
2826 restart:
2827  res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2828  err = errno;
2829 
2830  if (fuse_session_exited(se))
2831  return 0;
2832  if (res == -1) {
2833  /* ENOENT means the operation was interrupted, it's safe
2834  to restart */
2835  if (err == ENOENT)
2836  goto restart;
2837 
2838  if (err == ENODEV) {
2839  /* Filesystem was unmounted, or connection was aborted
2840  via /sys/fs/fuse/connections */
2841  fuse_session_exit(se);
2842  return 0;
2843  }
2844  /* Errors occurring during normal operation: EINTR (read
2845  interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2846  umounted) */
2847  if (err != EINTR && err != EAGAIN)
2848  perror("fuse: reading device");
2849  return -err;
2850  }
2851  if ((size_t) res < sizeof(struct fuse_in_header)) {
2852  fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
2853  return -EIO;
2854  }
2855 
2856  buf->size = res;
2857 
2858  return res;
2859 }
2860 
2861 struct fuse_session *fuse_session_new(struct fuse_args *args,
2862  const struct fuse_lowlevel_ops *op,
2863  size_t op_size, void *userdata)
2864 {
2865  int err;
2866  struct fuse_session *se;
2867  struct mount_opts *mo;
2868 
2869  if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2870  fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
2871  op_size = sizeof(struct fuse_lowlevel_ops);
2872  }
2873 
2874  if (args->argc == 0) {
2875  fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
2876  return NULL;
2877  }
2878 
2879  se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
2880  if (se == NULL) {
2881  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2882  goto out1;
2883  }
2884  se->fd = -1;
2885  se->conn.max_write = UINT_MAX;
2886  se->conn.max_readahead = UINT_MAX;
2887 
2888  /* Parse options */
2889  if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
2890  goto out2;
2891  if(se->deny_others) {
2892  /* Allowing access only by root is done by instructing
2893  * kernel to allow access by everyone, and then restricting
2894  * access to root and mountpoint owner in libfuse.
2895  */
2896  // We may be adding the option a second time, but
2897  // that doesn't hurt.
2898  if(fuse_opt_add_arg(args, "-oallow_other") == -1)
2899  goto out2;
2900  }
2901  mo = parse_mount_opts(args);
2902  if (mo == NULL)
2903  goto out3;
2904 
2905  if(args->argc == 1 &&
2906  args->argv[0][0] == '-') {
2907  fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
2908  "will be ignored\n");
2909  } else if (args->argc != 1) {
2910  int i;
2911  fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
2912  for(i = 1; i < args->argc-1; i++)
2913  fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
2914  fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
2915  goto out4;
2916  }
2917 
2918  if (se->debug)
2919  fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
2920 
2921  se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
2922  FUSE_BUFFER_HEADER_SIZE;
2923 
2924  list_init_req(&se->list);
2925  list_init_req(&se->interrupts);
2926  list_init_nreq(&se->notify_list);
2927  se->notify_ctr = 1;
2928  fuse_mutex_init(&se->lock);
2929 
2930  err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
2931  if (err) {
2932  fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
2933  strerror(err));
2934  goto out5;
2935  }
2936 
2937  memcpy(&se->op, op, op_size);
2938  se->owner = getuid();
2939  se->userdata = userdata;
2940 
2941  se->mo = mo;
2942  return se;
2943 
2944 out5:
2945  pthread_mutex_destroy(&se->lock);
2946 out4:
2947  fuse_opt_free_args(args);
2948 out3:
2949  free(mo);
2950 out2:
2951  free(se);
2952 out1:
2953  return NULL;
2954 }
2955 
2956 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
2957 {
2958  int fd;
2959 
2960  /*
2961  * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
2962  * would ensue.
2963  */
2964  do {
2965  fd = open("/dev/null", O_RDWR);
2966  if (fd > 2)
2967  close(fd);
2968  } while (fd >= 0 && fd <= 2);
2969 
2970  /*
2971  * To allow FUSE daemons to run without privileges, the caller may open
2972  * /dev/fuse before launching the file system and pass on the file
2973  * descriptor by specifying /dev/fd/N as the mount point. Note that the
2974  * parent process takes care of performing the mount in this case.
2975  */
2976  fd = fuse_mnt_parse_fuse_fd(mountpoint);
2977  if (fd != -1) {
2978  if (fcntl(fd, F_GETFD) == -1) {
2979  fuse_log(FUSE_LOG_ERR,
2980  "fuse: Invalid file descriptor /dev/fd/%u\n",
2981  fd);
2982  return -1;
2983  }
2984  se->fd = fd;
2985  return 0;
2986  }
2987 
2988  /* Open channel */
2989  fd = fuse_kern_mount(mountpoint, se->mo);
2990  if (fd == -1)
2991  return -1;
2992  se->fd = fd;
2993 
2994  /* Save mountpoint */
2995  se->mountpoint = strdup(mountpoint);
2996  if (se->mountpoint == NULL)
2997  goto error_out;
2998 
2999  return 0;
3000 
3001 error_out:
3002  fuse_kern_unmount(mountpoint, fd);
3003  return -1;
3004 }
3005 
3006 int fuse_session_fd(struct fuse_session *se)
3007 {
3008  return se->fd;
3009 }
3010 
3011 void fuse_session_unmount(struct fuse_session *se)
3012 {
3013  if (se->mountpoint != NULL) {
3014  fuse_kern_unmount(se->mountpoint, se->fd);
3015  free(se->mountpoint);
3016  se->mountpoint = NULL;
3017  }
3018 }
3019 
3020 #ifdef linux
3021 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3022 {
3023  char *buf;
3024  size_t bufsize = 1024;
3025  char path[128];
3026  int ret;
3027  int fd;
3028  unsigned long pid = req->ctx.pid;
3029  char *s;
3030 
3031  sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3032 
3033 retry:
3034  buf = malloc(bufsize);
3035  if (buf == NULL)
3036  return -ENOMEM;
3037 
3038  ret = -EIO;
3039  fd = open(path, O_RDONLY);
3040  if (fd == -1)
3041  goto out_free;
3042 
3043  ret = read(fd, buf, bufsize);
3044  close(fd);
3045  if (ret < 0) {
3046  ret = -EIO;
3047  goto out_free;
3048  }
3049 
3050  if ((size_t)ret == bufsize) {
3051  free(buf);
3052  bufsize *= 4;
3053  goto retry;
3054  }
3055 
3056  ret = -EIO;
3057  s = strstr(buf, "\nGroups:");
3058  if (s == NULL)
3059  goto out_free;
3060 
3061  s += 8;
3062  ret = 0;
3063  while (1) {
3064  char *end;
3065  unsigned long val = strtoul(s, &end, 0);
3066  if (end == s)
3067  break;
3068 
3069  s = end;
3070  if (ret < size)
3071  list[ret] = val;
3072  ret++;
3073  }
3074 
3075 out_free:
3076  free(buf);
3077  return ret;
3078 }
3079 #else /* linux */
3080 /*
3081  * This is currently not implemented on other than Linux...
3082  */
3083 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3084 {
3085  (void) req; (void) size; (void) list;
3086  return -ENOSYS;
3087 }
3088 #endif
3089 
3090 void fuse_session_exit(struct fuse_session *se)
3091 {
3092  se->exited = 1;
3093 }
3094 
3095 void fuse_session_reset(struct fuse_session *se)
3096 {
3097  se->exited = 0;
3098  se->error = 0;
3099 }
3100 
3101 int fuse_session_exited(struct fuse_session *se)
3102 {
3103  return se->exited;
3104 }
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
size_t off
Definition: fuse_common.h:710
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:276
uint64_t fh
Definition: fuse_common.h:91
int fuse_session_fd(struct fuse_session *se)
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:163
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:308
unsigned int writepage
Definition: fuse_common.h:53
void fuse_session_unmount(struct fuse_session *se)
int argc
Definition: fuse_opt.h:111
unsigned int direct_io
Definition: fuse_common.h:56
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:238
void fuse_lowlevel_version(void)
uint32_t poll_events
Definition: fuse_common.h:98
fuse_buf_copy_flags
Definition: fuse_common.h:610
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:348
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:397
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
struct stat attr
Definition: fuse_lowlevel.h:88
int fuse_req_interrupted(fuse_req_t req)
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:336
unsigned int keep_cache
Definition: fuse_common.h:63
int fuse_reply_poll(fuse_req_t req, unsigned revents)
Definition: fuse_lowlevel.h:59
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:171
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
int fuse_reply_err(fuse_req_t req, int err)
uint64_t lock_owner
Definition: fuse_common.h:94
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_session_reset(struct fuse_session *se)
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:139
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
void * fuse_req_userdata(fuse_req_t req)
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:246
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:216
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
void fuse_reply_none(fuse_req_t req)
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:209
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:147
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_destroy(struct fuse_session *se)
void fuse_session_exit(struct fuse_session *se)
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_readlink(fuse_req_t req, const char *link)
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:187
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:156
char ** argv
Definition: fuse_opt.h:114
size_t idx
Definition: fuse_common.h:705
size_t count
Definition: fuse_common.h:700
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:179
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
unsigned int nonseekable
Definition: fuse_common.h:72
enum fuse_buf_flags flags
Definition: fuse_common.h:664
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
unsigned int flush
Definition: fuse_common.h:68
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
void fuse_lowlevel_help(void)
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:327
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
#define FUSE_OPT_END
Definition: fuse_opt.h:104
uint64_t generation
Definition: fuse_lowlevel.h:79
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:298
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:54
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
unsigned int cache_readdir
Definition: fuse_common.h:83
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
int fuse_session_exited(struct fuse_session *se)
int fuse_reply_xattr(fuse_req_t req, size_t count)
void * mem
Definition: fuse_common.h:671
struct fuse_buf buf[1]
Definition: fuse_common.h:715
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:281
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:285
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_write(fuse_req_t req, size_t count)
size_t size
Definition: fuse_common.h:659
double entry_timeout
double attr_timeout
Definition: fuse_lowlevel.h:94
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:196
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:33
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:265