2012-06-06 10:45:07 +01:00
|
|
|
#include "serve.h"
|
2012-06-06 11:27:52 +01:00
|
|
|
#include "client.h"
|
2012-05-17 20:14:22 +01:00
|
|
|
#include "nbdtypes.h"
|
|
|
|
#include "ioutil.h"
|
2013-02-13 13:43:52 +00:00
|
|
|
#include "sockutil.h"
|
2012-05-17 20:14:22 +01:00
|
|
|
#include "util.h"
|
2012-05-18 13:24:35 +01:00
|
|
|
#include "bitset.h"
|
2012-05-23 00:42:14 +01:00
|
|
|
#include "control.h"
|
2012-06-06 12:41:03 +01:00
|
|
|
#include "self_pipe.h"
|
2012-05-17 20:14:22 +01:00
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/mman.h>
|
2012-05-18 18:44:34 +01:00
|
|
|
#include <sys/un.h>
|
2012-05-17 20:14:22 +01:00
|
|
|
#include <fcntl.h>
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
2012-05-31 11:33:31 +01:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <netinet/tcp.h>
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
struct server *server_create(struct flexnbd *flexnbd,
|
|
|
|
char *s_ip_address,
|
|
|
|
char *s_port,
|
|
|
|
char *s_file,
|
|
|
|
int default_deny,
|
|
|
|
int acl_entries,
|
|
|
|
char **s_acl_entries,
|
|
|
|
int max_nbd_clients,
|
|
|
|
int use_killswitch, int success)
|
|
|
|
{
|
|
|
|
NULLCHECK(flexnbd);
|
|
|
|
struct server *out;
|
|
|
|
out = xmalloc(sizeof(struct server));
|
|
|
|
out->flexnbd = flexnbd;
|
|
|
|
out->success = success;
|
|
|
|
out->max_nbd_clients = max_nbd_clients;
|
|
|
|
out->use_killswitch = use_killswitch;
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
server_allow_new_clients(out);
|
|
|
|
|
|
|
|
out->nbd_client =
|
|
|
|
xmalloc(max_nbd_clients * sizeof(struct client_tbl_entry));
|
|
|
|
out->tcp_backlog = 10; /* does this need to be settable? */
|
|
|
|
|
|
|
|
FATAL_IF_NULL(s_ip_address, "No IP address supplied");
|
|
|
|
FATAL_IF_NULL(s_port, "No port number supplied");
|
|
|
|
FATAL_IF_NULL(s_file, "No filename supplied");
|
|
|
|
NULLCHECK(s_ip_address);
|
|
|
|
FATAL_IF_ZERO(parse_ip_to_sockaddr
|
|
|
|
(&out->bind_to.generic, s_ip_address),
|
|
|
|
"Couldn't parse server address '%s' (use 0 if "
|
|
|
|
"you want to bind to all IPs)", s_ip_address);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
out->acl = acl_create(acl_entries, s_acl_entries, default_deny);
|
|
|
|
if (out->acl && out->acl->len != acl_entries) {
|
|
|
|
fatal("Bad ACL entry '%s'", s_acl_entries[out->acl->len]);
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
parse_port(s_port, &out->bind_to.v4);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
out->filename = s_file;
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
out->l_acl = flexthread_mutex_create();
|
|
|
|
out->l_start_mirror = flexthread_mutex_create();
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
out->mirror_can_start = 1;
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
out->close_signal = self_pipe_create();
|
|
|
|
out->acl_updated_signal = self_pipe_create();
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(out->close_signal);
|
|
|
|
NULLCHECK(out->acl_updated_signal);
|
2018-01-11 10:03:16 +00:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
log_context = s_file;
|
|
|
|
|
|
|
|
return out;
|
2012-06-08 18:03:41 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_destroy(struct server *serve)
|
2012-06-08 18:03:41 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
self_pipe_destroy(serve->acl_updated_signal);
|
|
|
|
serve->acl_updated_signal = NULL;
|
|
|
|
self_pipe_destroy(serve->close_signal);
|
|
|
|
serve->close_signal = NULL;
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
flexthread_mutex_destroy(serve->l_start_mirror);
|
|
|
|
flexthread_mutex_destroy(serve->l_acl);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (serve->acl) {
|
|
|
|
acl_destroy(serve->acl);
|
|
|
|
serve->acl = NULL;
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
free(serve->nbd_client);
|
|
|
|
free(serve);
|
2012-06-08 18:03:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_unlink(struct server *serve)
|
2012-07-23 10:22:25 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
NULLCHECK(serve->filename);
|
2012-07-23 10:22:25 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
FATAL_IF_NEGATIVE(unlink(serve->filename),
|
|
|
|
"Failed to unlink %s: %s",
|
|
|
|
serve->filename, strerror(errno));
|
2012-07-23 10:22:25 +01:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-06-08 11:02:40 +01:00
|
|
|
#define SERVER_LOCK( s, f, msg ) \
|
2012-06-11 16:08:19 +01:00
|
|
|
do { NULLCHECK( s ); \
|
2012-07-11 09:43:16 +01:00
|
|
|
FATAL_IF( 0 != flexthread_mutex_lock( s->f ), msg ); } while (0)
|
2012-06-08 11:02:40 +01:00
|
|
|
#define SERVER_UNLOCK( s, f, msg ) \
|
2012-06-11 16:08:19 +01:00
|
|
|
do { NULLCHECK( s ); \
|
2012-07-11 09:43:16 +01:00
|
|
|
FATAL_IF( 0 != flexthread_mutex_unlock( s->f ), msg ); } while (0)
|
2012-06-08 11:02:40 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_lock_acl(struct server *serve)
|
2012-06-08 11:02:40 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("ACL locking");
|
2012-07-11 09:43:16 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
SERVER_LOCK(serve, l_acl, "Problem with ACL lock");
|
2012-06-06 13:29:13 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_unlock_acl(struct server *serve)
|
2012-06-06 13:29:13 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("ACL unlocking");
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
SERVER_UNLOCK(serve, l_acl, "Problem with ACL unlock");
|
2012-06-08 11:02:40 +01:00
|
|
|
}
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2012-06-08 11:02:40 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_acl_locked(struct server *serve)
|
2012-07-11 09:43:16 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return flexthread_mutex_held(serve->l_acl);
|
2012-07-11 09:43:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_lock_start_mirror(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("Mirror start locking");
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
SERVER_LOCK(serve, l_start_mirror, "Problem with start mirror lock");
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_unlock_start_mirror(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("Mirror start unlocking");
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
SERVER_UNLOCK(serve, l_start_mirror,
|
|
|
|
"Problem with start mirror unlock");
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_start_mirror_locked(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return flexthread_mutex_held(serve->l_start_mirror);
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
2012-06-08 18:03:41 +01:00
|
|
|
/** Return the actual port the server bound to. This is used because we
|
|
|
|
* are allowed to pass "0" on the command-line.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_port(struct server *server)
|
2012-06-08 18:03:41 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(server);
|
|
|
|
union mysockaddr addr;
|
|
|
|
socklen_t len = sizeof(addr.v4);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (getsockname(server->server_fd, &addr.v4, &len) < 0) {
|
|
|
|
fatal("Failed to get the port number.");
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return be16toh(addr.v4.sin_port);
|
2012-06-06 13:29:13 +01:00
|
|
|
}
|
|
|
|
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Prepares a listening socket for the NBD server, binding etc. */
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_open_server_socket(struct server *params)
|
|
|
|
{
|
|
|
|
NULLCHECK(params);
|
|
|
|
|
|
|
|
params->server_fd =
|
|
|
|
socket(params->bind_to.generic.sa_family ==
|
|
|
|
AF_INET ? PF_INET : PF_INET6, SOCK_STREAM, 0);
|
|
|
|
|
|
|
|
FATAL_IF_NEGATIVE(params->server_fd, "Couldn't create server socket");
|
|
|
|
|
|
|
|
/* We need SO_REUSEADDR so that when we switch from listening to
|
|
|
|
* serving we don't have to change address if we don't want to.
|
|
|
|
*
|
|
|
|
* If this fails, it's not necessarily bad in principle, but at
|
|
|
|
* this point in the code we can't tell if it's going to be a
|
|
|
|
* problem. It's also indicative of something odd going on, so
|
|
|
|
* we barf.
|
|
|
|
*/
|
|
|
|
FATAL_IF_NEGATIVE(sock_set_reuseaddr(params->server_fd, 1),
|
|
|
|
"Couldn't set SO_REUSEADDR");
|
|
|
|
|
|
|
|
/* TCP_NODELAY makes everything not be slow. If we can't set
|
|
|
|
* this, again, there's something odd going on which we don't
|
|
|
|
* understand.
|
|
|
|
*/
|
|
|
|
FATAL_IF_NEGATIVE(sock_set_tcp_nodelay(params->server_fd, 1),
|
|
|
|
"Couldn't set TCP_NODELAY");
|
|
|
|
|
|
|
|
/* If we can't bind, presumably that's because someone else is
|
|
|
|
* squatting on our ip/port combo, or the ip isn't yet
|
|
|
|
* configured. Ideally we want to retry this. */
|
|
|
|
FATAL_UNLESS_ZERO(sock_try_bind
|
|
|
|
(params->server_fd, ¶ms->bind_to.generic),
|
|
|
|
SHOW_ERRNO("Failed to bind() socket")
|
2013-02-13 13:43:52 +00:00
|
|
|
);
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
FATAL_IF_NEGATIVE(listen(params->server_fd, params->tcp_backlog),
|
|
|
|
"Couldn't listen on server socket");
|
2012-05-17 20:14:22 +01:00
|
|
|
}
|
|
|
|
|
2012-06-08 18:03:41 +01:00
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int tryjoin_client_thread(struct client_tbl_entry *entry,
|
|
|
|
int (*joinfunc) (pthread_t, void **))
|
2012-06-07 11:44:19 +01:00
|
|
|
{
|
2012-06-07 14:25:30 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(entry);
|
|
|
|
NULLCHECK(joinfunc);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int was_closed = 0;
|
|
|
|
void *status = NULL;
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (entry->thread != 0) {
|
|
|
|
char s_client_address[128];
|
|
|
|
|
|
|
|
sockaddr_address_string(&entry->address.generic,
|
|
|
|
&s_client_address[0], 128);
|
|
|
|
|
|
|
|
debug("%s(%p,...)",
|
|
|
|
joinfunc == pthread_join ? "joining" : "tryjoining",
|
|
|
|
entry->thread);
|
|
|
|
int join_errno = joinfunc(entry->thread, &status);
|
|
|
|
|
|
|
|
/* join_errno can legitimately be ESRCH if the thread is
|
|
|
|
* already dead, but the client still needs tidying up. */
|
|
|
|
if (join_errno != 0 && !entry->client->stopped) {
|
|
|
|
debug("join_errno was %s, stopped was %d",
|
|
|
|
strerror(join_errno), entry->client->stopped);
|
|
|
|
FATAL_UNLESS(join_errno == EBUSY,
|
|
|
|
"Problem with joining thread %p: %s",
|
|
|
|
entry->thread, strerror(join_errno));
|
|
|
|
} else if (join_errno == 0) {
|
|
|
|
debug("nbd thread %016x exited (%s) with status %ld",
|
|
|
|
entry->thread, s_client_address, (uintptr_t) status);
|
|
|
|
client_destroy(entry->client);
|
|
|
|
entry->client = NULL;
|
|
|
|
entry->thread = 0;
|
|
|
|
was_closed = 1;
|
2012-06-07 11:44:19 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return was_closed;
|
2012-06-07 11:44:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-07 14:25:30 +01:00
|
|
|
/**
|
|
|
|
* Check to see if a client thread has finished, and if so, tidy up
|
|
|
|
* after it.
|
|
|
|
* Returns 1 if the thread was cleaned up and the slot freed, 0
|
|
|
|
* otherwise.
|
|
|
|
*
|
|
|
|
* It's important that client_destroy gets called in the same thread
|
|
|
|
* which signals the client threads to stop. This avoids the
|
|
|
|
* possibility of sending a stop signal via a signal which has already
|
|
|
|
* been destroyed. However, it means that stopped client threads,
|
|
|
|
* including their signal pipes, won't be cleaned up until the next new
|
|
|
|
* client connection attempt.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int cleanup_client_thread(struct client_tbl_entry *entry)
|
2012-06-07 14:25:30 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
return tryjoin_client_thread(entry, pthread_tryjoin_np);
|
2012-06-07 14:25:30 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void cleanup_client_threads(struct client_tbl_entry *entries,
|
|
|
|
size_t entries_len)
|
2012-06-21 17:11:12 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < entries_len; i++) {
|
|
|
|
cleanup_client_thread(&entries[i]);
|
|
|
|
}
|
2012-06-21 17:11:12 +01:00
|
|
|
}
|
|
|
|
|
2012-06-07 14:25:30 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Join a client thread after having sent a stop signal to it.
|
|
|
|
* This function will not return until pthread_join has returned, so
|
|
|
|
* ensures that the client thread is dead.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int join_client_thread(struct client_tbl_entry *entry)
|
2012-06-07 14:25:30 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
return tryjoin_client_thread(entry, pthread_join);
|
2012-06-07 14:25:30 +01:00
|
|
|
}
|
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** We can only accommodate MAX_NBD_CLIENTS connections at once. This function
|
|
|
|
* goes through the current list, waits for any threads that have finished
|
|
|
|
* and returns the next slot free (or -1 if there are none).
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int cleanup_and_find_client_slot(struct server *params)
|
2012-05-27 14:40:16 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int slot = -1, i;
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
cleanup_client_threads(params->nbd_client, params->max_nbd_clients);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
for (i = 0; i < params->max_nbd_clients; i++) {
|
|
|
|
if (params->nbd_client[i].thread == 0 && slot == -1) {
|
|
|
|
slot = i;
|
|
|
|
break;
|
2012-05-27 14:40:16 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return slot;
|
2012-05-27 14:40:16 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_count_clients(struct server *params)
|
2013-09-23 13:37:13 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
int i, count = 0;
|
|
|
|
|
|
|
|
cleanup_client_threads(params->nbd_client, params->max_nbd_clients);
|
2013-09-23 13:37:13 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
for (i = 0; i < params->max_nbd_clients; i++) {
|
|
|
|
if (params->nbd_client[i].thread != 0) {
|
|
|
|
count++;
|
2013-09-23 13:37:13 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
2013-09-23 13:37:13 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return count;
|
2013-09-23 13:37:13 +01:00
|
|
|
}
|
|
|
|
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2012-06-08 11:02:40 +01:00
|
|
|
/** Check whether the address client_address is allowed or not according
|
|
|
|
* to the current acl. If params->acl is NULL, the result will be 1,
|
|
|
|
* otherwise it will be the result of acl_includes().
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_acl_accepts(struct server *params,
|
|
|
|
union mysockaddr *client_address)
|
2012-06-07 11:44:19 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
NULLCHECK(client_address);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
struct acl *acl;
|
|
|
|
int accepted;
|
2012-06-08 11:02:40 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
server_lock_acl(params);
|
|
|
|
{
|
|
|
|
acl = params->acl;
|
|
|
|
accepted = acl ? acl_includes(acl, client_address) : 1;
|
|
|
|
}
|
|
|
|
server_unlock_acl(params);
|
2012-06-07 17:47:43 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return accepted;
|
2012-06-07 11:44:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_should_accept_client(struct server *params,
|
|
|
|
union mysockaddr *client_address,
|
|
|
|
char *s_client_address,
|
|
|
|
size_t s_client_address_len)
|
2012-06-07 11:44:19 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
NULLCHECK(client_address);
|
|
|
|
NULLCHECK(s_client_address);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
const char *result =
|
|
|
|
sockaddr_address_string(&client_address->generic, s_client_address,
|
|
|
|
s_client_address_len);
|
2013-02-13 13:43:52 +00:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (NULL == result) {
|
|
|
|
warn("Rejecting client %s: Bad client_address", s_client_address);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!server_acl_accepts(params, client_address)) {
|
|
|
|
warn("Rejecting client %s: Access control error",
|
|
|
|
s_client_address);
|
|
|
|
debug("We %s have an acl, and default_deny is %s",
|
|
|
|
(params->acl ? "do" : "do not"),
|
|
|
|
(params->acl->default_deny ? "true" : "false"));
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return 1;
|
2012-06-07 11:44:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-21 17:11:12 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int spawn_client_thread(struct client *client_params,
|
|
|
|
pthread_t * out_thread)
|
2012-06-21 17:11:12 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
int result =
|
|
|
|
pthread_create(out_thread, NULL, client_serve, client_params);
|
2012-06-21 17:11:12 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return result;
|
2012-06-21 17:11:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Dispatch function for accepting an NBD connection and starting a thread
|
|
|
|
* to handle it. Rejects the connection if there is an ACL, and the far end's
|
|
|
|
* address doesn't match, or if there are too many clients already connected.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
void accept_nbd_client(struct server *params,
|
|
|
|
int client_fd, union mysockaddr *client_address)
|
2012-05-18 23:39:16 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
NULLCHECK(client_address);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
struct client *client_params;
|
|
|
|
int slot;
|
|
|
|
char s_client_address[64] = { 0 };
|
2012-06-01 14:48:34 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
FATAL_IF_NEGATIVE(sock_set_keepalive_params
|
|
|
|
(client_fd, CLIENT_KEEPALIVE_TIME,
|
|
|
|
CLIENT_KEEPALIVE_INTVL, CLIENT_KEEPALIVE_PROBES),
|
|
|
|
"Error setting keepalive parameters on client socket fd %d",
|
|
|
|
client_fd);
|
2018-01-10 13:49:22 +00:00
|
|
|
|
2012-06-01 14:48:34 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (!server_should_accept_client
|
|
|
|
(params, client_address, s_client_address, 64)) {
|
|
|
|
FATAL_IF_NEGATIVE(close(client_fd),
|
|
|
|
"Error closing client socket fd %d", client_fd);
|
|
|
|
debug("Closed client socket fd %d", client_fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
slot = cleanup_and_find_client_slot(params);
|
|
|
|
if (slot < 0) {
|
|
|
|
warn("too many clients to accept connection");
|
|
|
|
FATAL_IF_NEGATIVE(close(client_fd),
|
|
|
|
"Error closing client socket fd %d", client_fd);
|
|
|
|
debug("Closed client socket fd %d", client_fd);
|
|
|
|
return;
|
|
|
|
}
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
info("Client %s accepted on fd %d.", s_client_address, client_fd);
|
|
|
|
client_params = client_create(params, client_fd);
|
2012-06-07 14:25:30 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
params->nbd_client[slot].client = client_params;
|
|
|
|
memcpy(¶ms->nbd_client[slot].address, client_address,
|
|
|
|
sizeof(union mysockaddr));
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
pthread_t *thread = ¶ms->nbd_client[slot].thread;
|
2012-06-21 17:11:12 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (0 != spawn_client_thread(client_params, thread)) {
|
|
|
|
debug("Thread creation problem.");
|
|
|
|
client_destroy(client_params);
|
|
|
|
FATAL_IF_NEGATIVE(close(client_fd),
|
|
|
|
"Error closing client socket fd %d", client_fd);
|
|
|
|
debug("Closed client socket fd %d", client_fd);
|
|
|
|
return;
|
|
|
|
}
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("nbd thread %p started (%s)", params->nbd_client[slot].thread,
|
|
|
|
s_client_address);
|
2012-05-18 23:39:16 +01:00
|
|
|
}
|
|
|
|
|
2012-06-06 11:27:52 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_audit_clients(struct server *serve)
|
2012-06-08 18:03:41 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int i;
|
|
|
|
struct client_tbl_entry *entry;
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* There's an apparent race here. If the acl updates while
|
|
|
|
* we're traversing the nbd_clients array, the earlier entries
|
|
|
|
* won't have been audited against the later acl. This isn't a
|
|
|
|
* problem though, because in order to update the acl
|
|
|
|
* server_replace_acl must have been called, so the
|
|
|
|
* server_accept loop will see a second acl_updated signal as
|
|
|
|
* soon as it hits select, and a second audit will be run.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < serve->max_nbd_clients; i++) {
|
|
|
|
entry = &serve->nbd_client[i];
|
|
|
|
if (0 == entry->thread) {
|
|
|
|
continue;
|
2012-06-08 18:03:41 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_acl_accepts(serve, &entry->address)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
client_signal_stop(entry->client);
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_is_closed(struct server *serve)
|
2012-06-06 11:27:52 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return fd_is_closed(serve->server_fd);
|
2012-06-06 11:27:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_close_clients(struct server *params)
|
2012-06-07 14:25:30 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
info("closing all clients");
|
2012-06-07 14:25:30 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int i; /* , j; */
|
|
|
|
struct client_tbl_entry *entry;
|
2012-06-07 14:25:30 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
for (i = 0; i < params->max_nbd_clients; i++) {
|
|
|
|
entry = ¶ms->nbd_client[i];
|
2012-06-07 14:25:30 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (entry->thread != 0) {
|
|
|
|
debug("Stop signaling client %p", entry->client);
|
|
|
|
client_signal_stop(entry->client);
|
2012-06-07 14:25:30 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
|
|
|
/* We don't join the clients here. When we enter the final
|
|
|
|
* mirror pass, we get the IO lock, then wait for the server_fd
|
|
|
|
* to close before sending the data, to be sure that no new
|
|
|
|
* clients can be accepted which might think they've written
|
|
|
|
* to the disc. However, an existing client thread can be
|
|
|
|
* waiting for the IO lock already, so if we try to join it
|
|
|
|
* here, we deadlock.
|
|
|
|
*
|
|
|
|
* The client threads will be joined in serve_cleanup.
|
|
|
|
*
|
|
|
|
*/
|
2012-06-07 14:25:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-08 18:03:41 +01:00
|
|
|
/** Replace the current acl with a new one. The old one will be thrown
|
|
|
|
* away.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_replace_acl(struct server *serve, struct acl *new_acl)
|
2012-06-08 10:32:33 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
NULLCHECK(new_acl);
|
2012-06-08 10:32:33 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* We need to lock around updates to the acl in case we try to
|
|
|
|
* destroy the old acl while checking against it.
|
|
|
|
*/
|
|
|
|
server_lock_acl(serve);
|
|
|
|
{
|
|
|
|
struct acl *old_acl = serve->acl;
|
|
|
|
serve->acl = new_acl;
|
|
|
|
/* We should always have an old_acl, but just in case... */
|
|
|
|
if (old_acl) {
|
|
|
|
acl_destroy(old_acl);
|
2012-06-08 11:02:40 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
|
|
|
server_unlock_acl(serve);
|
2012-06-08 10:32:33 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
self_pipe_signal(serve->acl_updated_signal);
|
2012-06-08 10:32:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_prevent_mirror_start(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
serve->mirror_can_start = 0;
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_allow_mirror_start(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
serve->mirror_can_start = 1;
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Only call this with the mirror start lock held */
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_mirror_can_start(struct server *serve)
|
2012-10-04 14:41:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return serve->mirror_can_start;
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-09 17:20:39 +01:00
|
|
|
/* Queries to see if we are currently mirroring. If we are, we need
|
|
|
|
* to communicate that via the process exit status. because otherwise
|
|
|
|
* the supervisor will assume the migration completed.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
int serve_shutdown_is_graceful(struct server *params)
|
|
|
|
{
|
|
|
|
int is_mirroring = 0;
|
|
|
|
server_lock_start_mirror(params);
|
|
|
|
{
|
|
|
|
if (server_is_mirroring(params)) {
|
|
|
|
is_mirroring = 1;
|
|
|
|
warn("Stop signal received while mirroring.");
|
|
|
|
server_prevent_mirror_start(params);
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
|
|
|
server_unlock_start_mirror(params);
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return !is_mirroring;
|
2012-10-04 14:41:55 +01:00
|
|
|
}
|
|
|
|
|
2012-06-21 17:11:12 +01:00
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Accept either an NBD or control socket connection, dispatch appropriately */
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_accept(struct server *params)
|
|
|
|
{
|
|
|
|
NULLCHECK(params);
|
|
|
|
debug("accept loop starting");
|
|
|
|
union mysockaddr client_address;
|
|
|
|
fd_set fds;
|
|
|
|
socklen_t socklen = sizeof(client_address);
|
|
|
|
/* We select on this fd to receive OS signals (only a few of
|
|
|
|
* which we're interested in, see flexnbd.c */
|
|
|
|
int signal_fd = flexnbd_signal_fd(params->flexnbd);
|
|
|
|
int should_continue = 1;
|
|
|
|
|
|
|
|
FD_ZERO(&fds);
|
|
|
|
FD_SET(params->server_fd, &fds);
|
|
|
|
if (0 < signal_fd) {
|
|
|
|
FD_SET(signal_fd, &fds);
|
|
|
|
}
|
|
|
|
self_pipe_fd_set(params->close_signal, &fds);
|
|
|
|
self_pipe_fd_set(params->acl_updated_signal, &fds);
|
|
|
|
|
|
|
|
FATAL_IF_NEGATIVE(sock_try_select(FD_SETSIZE, &fds, NULL, NULL, NULL),
|
|
|
|
SHOW_ERRNO("select() failed")
|
2013-02-14 16:38:45 +00:00
|
|
|
);
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (self_pipe_fd_isset(params->close_signal, &fds)) {
|
|
|
|
server_close_clients(params);
|
|
|
|
should_continue = 0;
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2012-10-09 17:20:39 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (0 < signal_fd && FD_ISSET(signal_fd, &fds)) {
|
|
|
|
debug("Stop signal received.");
|
|
|
|
server_close_clients(params);
|
|
|
|
params->success = params->success
|
|
|
|
&& serve_shutdown_is_graceful(params);
|
|
|
|
should_continue = 0;
|
|
|
|
}
|
2012-06-21 17:11:12 +01:00
|
|
|
|
2012-06-27 15:45:33 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (self_pipe_fd_isset(params->acl_updated_signal, &fds)) {
|
|
|
|
self_pipe_signal_clear(params->acl_updated_signal);
|
|
|
|
server_audit_clients(params);
|
|
|
|
}
|
2012-06-08 18:03:41 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (FD_ISSET(params->server_fd, &fds)) {
|
|
|
|
int client_fd =
|
|
|
|
accept(params->server_fd, &client_address.generic, &socklen);
|
2013-09-10 16:03:26 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (params->allow_new_clients) {
|
|
|
|
debug("Accepted nbd client socket fd %d", client_fd);
|
|
|
|
accept_nbd_client(params, client_fd, &client_address);
|
|
|
|
} else {
|
|
|
|
debug("New NBD client socket %d not allowed", client_fd);
|
|
|
|
sock_try_close(client_fd);
|
2012-09-20 13:37:48 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
2012-06-11 13:49:35 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return should_continue;
|
2012-06-08 18:03:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_accept_loop(struct server *params)
|
2012-06-08 18:03:41 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
while (server_accept(params));
|
2012-05-17 20:14:22 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void *build_allocation_map_thread(void *serve_uncast)
|
2012-10-07 21:55:01 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve_uncast);
|
2012-10-07 21:55:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
struct server *serve = (struct server *) serve_uncast;
|
2012-10-07 21:55:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve->filename);
|
|
|
|
NULLCHECK(serve->allocation_map);
|
2012-10-09 17:54:00 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int fd = open(serve->filename, O_RDONLY);
|
|
|
|
FATAL_IF_NEGATIVE(fd, "Couldn't open %s", serve->filename);
|
2012-10-08 14:54:10 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (build_allocation_map(serve->allocation_map, fd)) {
|
|
|
|
serve->allocation_map_built = 1;
|
|
|
|
} else {
|
|
|
|
/* We can operate without it, but we can't free it without a race.
|
|
|
|
* All that happens if we leave it is that it gradually builds up an
|
|
|
|
* *incomplete* record of writes. Nobody will use it, as
|
|
|
|
* allocation_map_built == 0 for the lifetime of the process.
|
|
|
|
*
|
|
|
|
* The stream functionality can still be relied on. We don't need to
|
|
|
|
* worry about mirroring waiting for the allocation map to finish,
|
|
|
|
* because we already copy every byte at least once. If that changes in
|
|
|
|
* the future, we'll need to wait for the allocation map to finish or
|
|
|
|
* fail before we can complete the migration.
|
|
|
|
*/
|
|
|
|
serve->allocation_map_not_built = 1;
|
|
|
|
warn("Didn't build allocation map for %s", serve->filename);
|
|
|
|
}
|
2012-10-07 21:55:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
close(fd);
|
|
|
|
return NULL;
|
2012-10-07 21:55:01 +01:00
|
|
|
}
|
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Initialisation function that sets up the initial allocation map, i.e. so
|
|
|
|
* we know which blocks of the file are allocated.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_init_allocation_map(struct server *params)
|
2012-05-18 13:24:35 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
NULLCHECK(params->filename);
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int fd = open(params->filename, O_RDONLY);
|
|
|
|
off64_t size;
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
FATAL_IF_NEGATIVE(fd, "Couldn't open %s", params->filename);
|
|
|
|
size = lseek64(fd, 0, SEEK_END);
|
2018-02-08 16:31:28 +00:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* If discs are not in multiples of 512, then odd things happen,
|
|
|
|
* resulting in reads/writes past the ends of files.
|
|
|
|
*/
|
|
|
|
if (size != (size & ~0x1ff)) {
|
|
|
|
warn("file does not fit into 512-byte sectors; the end of the file will be ignored.");
|
|
|
|
size &= ~0x1ff;
|
|
|
|
}
|
2018-02-08 16:31:28 +00:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
params->size = size;
|
|
|
|
FATAL_IF_NEGATIVE(size, "Couldn't find size of %s", params->filename);
|
2012-10-09 17:54:00 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
params->allocation_map =
|
|
|
|
bitset_alloc(params->size, block_allocation_resolution);
|
2012-10-09 17:54:00 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int ok = pthread_create(¶ms->allocation_map_builder_thread,
|
|
|
|
NULL,
|
|
|
|
build_allocation_map_thread,
|
|
|
|
params);
|
2012-10-09 17:54:00 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
FATAL_IF_NEGATIVE(ok, "Couldn't create thread");
|
2012-05-18 13:24:35 +01:00
|
|
|
}
|
|
|
|
|
2012-06-06 12:41:03 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_forbid_new_clients(struct server *serve)
|
2013-09-10 16:03:26 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
serve->allow_new_clients = 0;
|
|
|
|
return;
|
2013-09-10 16:03:26 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_allow_new_clients(struct server *serve)
|
2013-09-10 16:03:26 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
serve->allow_new_clients = 1;
|
|
|
|
return;
|
2013-09-10 16:03:26 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_join_clients(struct server *serve)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
void *status;
|
2013-09-10 16:03:26 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
for (i = 0; i < serve->max_nbd_clients; i++) {
|
|
|
|
pthread_t thread_id = serve->nbd_client[i].thread;
|
2013-09-10 16:03:26 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (thread_id != 0) {
|
|
|
|
debug("joining thread %p", thread_id);
|
|
|
|
int err = pthread_join(thread_id, &status);
|
|
|
|
if (0 == err) {
|
|
|
|
serve->nbd_client[i].thread = 0;
|
|
|
|
} else {
|
|
|
|
warn("Error %s (%i) joining thread %p", strerror(err), err,
|
|
|
|
thread_id);
|
|
|
|
}
|
2013-09-10 16:03:26 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
}
|
2013-09-10 16:03:26 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return;
|
2013-09-10 16:03:26 +01:00
|
|
|
}
|
|
|
|
|
2012-06-06 12:41:03 +01:00
|
|
|
/* Tell the server to close all the things. */
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_signal_close(struct server *serve)
|
2012-06-06 12:41:03 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
info("signalling close");
|
|
|
|
self_pipe_signal(serve->close_signal);
|
2012-06-06 12:41:03 +01:00
|
|
|
}
|
|
|
|
|
2012-10-08 14:54:10 +01:00
|
|
|
|
2012-06-13 13:44:21 +01:00
|
|
|
/* Block until the server closes the server_fd.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_wait_for_close(struct server *serve)
|
2012-06-13 13:44:21 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
while (!fd_is_closed(serve->server_fd)) {
|
|
|
|
usleep(10000);
|
|
|
|
}
|
2012-06-13 13:44:21 +01:00
|
|
|
}
|
|
|
|
|
2012-07-23 10:22:25 +01:00
|
|
|
/* We've just had an DISCONNECT pair, so we need to shut down
|
2012-06-21 18:01:50 +01:00
|
|
|
* and signal our listener that we can safely take over.
|
|
|
|
*/
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_control_arrived(struct server *serve)
|
2012-06-21 18:01:50 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("server_control_arrived");
|
|
|
|
NULLCHECK(serve);
|
2012-06-21 18:01:50 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (!serve->success) {
|
|
|
|
serve->success = 1;
|
|
|
|
serve_signal_close(serve);
|
|
|
|
}
|
2012-06-21 18:01:50 +01:00
|
|
|
}
|
|
|
|
|
2012-06-06 12:41:03 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void flexnbd_stop_control(struct flexnbd *flexnbd);
|
2012-10-09 17:20:39 +01:00
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Closes sockets, frees memory and waits for all client threads to finish */
|
2018-02-20 10:05:35 +00:00
|
|
|
void serve_cleanup(struct server *params,
|
|
|
|
int fatal __attribute__ ((unused)))
|
2012-05-29 04:03:28 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
|
|
|
void *status;
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
info("cleaning up");
|
2012-06-07 11:44:19 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (params->server_fd) {
|
|
|
|
close(params->server_fd);
|
|
|
|
}
|
2012-06-06 12:41:03 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* need to stop background build if we're killed very early on */
|
|
|
|
pthread_cancel(params->allocation_map_builder_thread);
|
|
|
|
pthread_join(params->allocation_map_builder_thread, &status);
|
2012-10-09 17:20:39 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int need_mirror_lock;
|
|
|
|
need_mirror_lock = !server_start_mirror_locked(params);
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (need_mirror_lock) {
|
|
|
|
server_lock_start_mirror(params);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
if (server_is_mirroring(params)) {
|
|
|
|
server_abandon_mirror(params);
|
2012-06-09 02:25:12 +01:00
|
|
|
}
|
2018-02-20 10:05:35 +00:00
|
|
|
server_prevent_mirror_start(params);
|
|
|
|
}
|
|
|
|
if (need_mirror_lock) {
|
|
|
|
server_unlock_start_mirror(params);
|
|
|
|
}
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
server_join_clients(params);
|
2012-07-11 09:43:16 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (params->allocation_map) {
|
|
|
|
bitset_free(params->allocation_map);
|
|
|
|
}
|
2013-09-17 17:30:33 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_start_mirror_locked(params)) {
|
|
|
|
server_unlock_start_mirror(params);
|
|
|
|
}
|
2012-10-04 14:41:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_acl_locked(params)) {
|
|
|
|
server_unlock_acl(params);
|
|
|
|
}
|
2012-07-11 09:43:16 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* if( params->flexnbd ) { */
|
|
|
|
/* if ( params->flexnbd->control ) { */
|
|
|
|
/* flexnbd_stop_control( params->flexnbd ); */
|
|
|
|
/* } */
|
|
|
|
/* flexnbd_destroy( params->flexnbd ); */
|
|
|
|
/* } */
|
2012-10-09 17:20:39 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* server_destroy( params ); */
|
2012-10-09 17:20:39 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
debug("Cleanup done");
|
2012-06-22 10:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_is_in_control(struct server *serve)
|
2012-06-22 10:05:41 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return serve->success;
|
2012-05-29 04:03:28 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_is_mirroring(struct server *serve)
|
2012-07-17 16:30:49 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return ! !serve->mirror_super;
|
2012-07-17 16:30:49 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
uint64_t server_mirror_bytes_remaining(struct server * serve)
|
2013-09-23 13:49:01 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_is_mirroring(serve)) {
|
|
|
|
uint64_t bytes_to_xfer =
|
|
|
|
bitset_stream_queued_bytes(serve->allocation_map,
|
|
|
|
BITSET_STREAM_SET) + (serve->size -
|
|
|
|
serve->
|
|
|
|
mirror->
|
|
|
|
offset);
|
2013-09-23 13:49:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return bytes_to_xfer;
|
|
|
|
}
|
2013-09-23 13:49:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return 0;
|
2013-09-23 13:49:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Given historic bps measurements and number of bytes left to transfer, give
|
|
|
|
* an estimate of how many seconds are remaining before the migration is
|
|
|
|
* complete, assuming no new bytes are written.
|
|
|
|
*/
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
uint64_t server_mirror_eta(struct server * serve)
|
2013-09-23 13:49:01 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_is_mirroring(serve)) {
|
|
|
|
uint64_t bytes_to_xfer = server_mirror_bytes_remaining(serve);
|
|
|
|
return bytes_to_xfer / (server_mirror_bps(serve) + 1);
|
|
|
|
}
|
2013-10-24 15:11:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return 0;
|
2013-10-24 15:11:55 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
uint64_t server_mirror_bps(struct server * serve)
|
2013-10-24 15:11:55 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
if (server_is_mirroring(serve)) {
|
|
|
|
uint64_t duration_ms =
|
|
|
|
monotonic_time_ms() - serve->mirror->migration_started;
|
2013-10-24 15:11:55 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return serve->mirror->all_dirty / ((duration_ms / 1000) + 1);
|
|
|
|
}
|
2013-09-23 13:49:01 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return 0;
|
2013-09-23 13:49:01 +01:00
|
|
|
}
|
2012-10-09 17:35:20 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
void mirror_super_destroy(struct mirror_super *super);
|
2012-10-09 17:35:20 +01:00
|
|
|
|
2012-10-04 14:41:55 +01:00
|
|
|
/* This must only be called with the start_mirror lock held */
|
2018-02-20 10:05:35 +00:00
|
|
|
void server_abandon_mirror(struct server *serve)
|
|
|
|
{
|
|
|
|
NULLCHECK(serve);
|
|
|
|
if (serve->mirror_super) {
|
|
|
|
/* FIXME: AWOOGA! RACE!
|
|
|
|
* We can set abandon_signal after mirror_super has checked it, but
|
|
|
|
* before the reset. However, mirror_reset doesn't clear abandon_signal
|
|
|
|
* so it'll just terminate early on the next pass. */
|
|
|
|
ERROR_UNLESS(self_pipe_signal(serve->mirror->abandon_signal),
|
|
|
|
"Failed to signal abandon to mirror");
|
|
|
|
|
|
|
|
pthread_t tid = serve->mirror_super->thread;
|
|
|
|
pthread_join(tid, NULL);
|
|
|
|
debug("Mirror thread %p pthread_join returned", tid);
|
|
|
|
|
|
|
|
server_allow_mirror_start(serve);
|
|
|
|
mirror_super_destroy(serve->mirror_super);
|
|
|
|
|
|
|
|
serve->mirror = NULL;
|
|
|
|
serve->mirror_super = NULL;
|
|
|
|
|
|
|
|
debug("Mirror supervisor done.");
|
|
|
|
}
|
2012-07-17 16:30:49 +01:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int server_default_deny(struct server *serve)
|
2012-06-27 15:45:33 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(serve);
|
|
|
|
return acl_default_deny(serve->acl);
|
2012-06-27 15:45:33 +01:00
|
|
|
}
|
2012-06-22 10:05:41 +01:00
|
|
|
|
2012-05-29 04:03:28 +01:00
|
|
|
/** Full lifecycle of the server */
|
2018-02-20 10:05:35 +00:00
|
|
|
int do_serve(struct server *params, struct self_pipe *open_signal)
|
2012-05-17 20:14:22 +01:00
|
|
|
{
|
2018-02-20 10:05:35 +00:00
|
|
|
NULLCHECK(params);
|
2012-06-21 18:01:50 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
int success;
|
2012-09-20 13:37:48 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
error_set_handler((cleanup_handler *) serve_cleanup, params);
|
|
|
|
serve_open_server_socket(params);
|
2012-10-09 17:35:20 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
/* Only signal that we are open for business once the server
|
|
|
|
socket is open */
|
|
|
|
if (NULL != open_signal) {
|
|
|
|
self_pipe_signal(open_signal);
|
|
|
|
}
|
2012-10-09 17:35:20 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
serve_init_allocation_map(params);
|
|
|
|
serve_accept_loop(params);
|
|
|
|
success = params->success;
|
|
|
|
serve_cleanup(params, 0);
|
2012-06-21 18:01:50 +01:00
|
|
|
|
2018-02-20 10:05:35 +00:00
|
|
|
return success;
|
2012-05-17 20:14:22 +01:00
|
|
|
}
|