flexnbd: Avoid a SIGSEGV when the allocation map fails to build.
In the event of a fiemap ioctl failing (when the file is on a tmpfs, for instance), we would free() serve->allocation_map, but it would remain not NULL, leading to segfaults in client.c when responding to write requests. Keeping the free() behaviour is more hassle than it's worth, as there are synchronization problems with setting serve->allocation_map to NULL, so we just omit the free() instead to avoid the segfault. This is safe because we never consult the map until allocation_map_built is set to true, and we never do that when the builder thread fails.
This commit is contained in:
22
src/client.c
22
src/client.c
@@ -6,7 +6,6 @@
|
||||
#include "nbdtypes.h"
|
||||
#include "self_pipe.h"
|
||||
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@@ -15,10 +14,6 @@
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
struct client *client_create( struct server *serve, int socket )
|
||||
{
|
||||
NULLCHECK( serve );
|
||||
@@ -60,7 +55,7 @@ void client_destroy( struct client *client )
|
||||
/**
|
||||
* So waiting on client->socket is len bytes of data, and we must write it all
|
||||
* to client->mapped. However while doing do we must consult the bitmap
|
||||
* client->block_allocation_map, which is a bitmap where one bit represents
|
||||
* client->serve->allocation_map, which is a bitmap where one bit represents
|
||||
* block_allocation_resolution bytes. Where a bit isn't set, there are no
|
||||
* disc blocks allocated for that portion of the file, and we'd like to keep
|
||||
* it that way.
|
||||
@@ -72,6 +67,8 @@ void client_destroy( struct client *client )
|
||||
void write_not_zeroes(struct client* client, uint64_t from, int len)
|
||||
{
|
||||
NULLCHECK( client );
|
||||
NULLCHECK( client->serve );
|
||||
NULLCHECK( client->serve->allocation_map );
|
||||
|
||||
struct bitset_mapping *map = client->serve->allocation_map;
|
||||
|
||||
@@ -141,7 +138,7 @@ void write_not_zeroes(struct client* client, uint64_t from, int len)
|
||||
* hand-optimized something specific.
|
||||
*/
|
||||
|
||||
int all_zeros = (zerobuffer[0] == 0) &&
|
||||
int all_zeros = (zerobuffer[0] == 0) &&
|
||||
(0 == memcmp( zerobuffer, zerobuffer+1, blockrun-1 ));
|
||||
|
||||
if ( !all_zeros ) {
|
||||
@@ -425,15 +422,14 @@ void client_reply_to_write( struct client* client, struct nbd_request request )
|
||||
request.from,
|
||||
request.len
|
||||
);
|
||||
|
||||
/* Ensure this updated block is written in the event of a mirror op */
|
||||
server_dirty(client->serve, request.from, request.len);
|
||||
/* the allocation_map is shared between client threads, and may be
|
||||
* being built. But AFAICT this is safe, to accurately reflect the
|
||||
* fact that we've caused block allocation to occur, though we will
|
||||
* never consult the allocation_map until the builder thread has
|
||||
* finished.
|
||||
* being built. We need to reflect the write in it, as it may be in
|
||||
* a position the builder has already gone over.
|
||||
*/
|
||||
bitset_set_range(client->serve->allocation_map,
|
||||
request.from, request.len);
|
||||
bitset_set_range(client->serve->allocation_map, request.from, request.len);
|
||||
}
|
||||
|
||||
if (1) /* not sure whether this is necessary... */
|
||||
|
14
src/ioutil.c
14
src/ioutil.c
@@ -29,7 +29,7 @@ int build_allocation_map(struct bitset_mapping* allocation_map, int fd)
|
||||
memset(&fiemap_static, 0, sizeof(fiemap_static));
|
||||
|
||||
for (offset = 0; offset < allocation_map->size; ) {
|
||||
|
||||
|
||||
unsigned int i;
|
||||
|
||||
fiemap->fm_start = offset;
|
||||
@@ -45,9 +45,7 @@ int build_allocation_map(struct bitset_mapping* allocation_map, int fd)
|
||||
|
||||
if ( ioctl( fd, FS_IOC_FIEMAP, fiemap ) < 0 ) {
|
||||
debug( "Couldn't get fiemap, returning no allocation_map" );
|
||||
free(allocation_map);
|
||||
allocation_map = NULL;
|
||||
break;
|
||||
return 0; /* it's up to the caller to free the map */
|
||||
}
|
||||
else {
|
||||
for ( i = 0; i < fiemap->fm_mapped_extents; i++ ) {
|
||||
@@ -55,9 +53,9 @@ int build_allocation_map(struct bitset_mapping* allocation_map, int fd)
|
||||
fiemap->fm_extents[i].fe_logical,
|
||||
fiemap->fm_extents[i].fe_length );
|
||||
}
|
||||
|
||||
|
||||
/* must move the offset on, but careful not to jump max_length
|
||||
|
||||
|
||||
/* must move the offset on, but careful not to jump max_length
|
||||
* if we've actually hit max_offsets.
|
||||
*/
|
||||
if (fiemap->fm_mapped_extents > 0) {
|
||||
@@ -73,7 +71,7 @@ int build_allocation_map(struct bitset_mapping* allocation_map, int fd)
|
||||
}
|
||||
|
||||
debug("Successfully built allocation map");
|
||||
return NULL != allocation_map;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -773,6 +773,11 @@ void* build_allocation_map_thread(void* serve_uncast)
|
||||
serve->allocation_map_built = 1;
|
||||
}
|
||||
else {
|
||||
/* We can operate without it, but we can't free it without a race.
|
||||
* All that happens if we leave it is that it gradually builds up an
|
||||
* *incomplete* record of writes. Nobody will use it, as
|
||||
* allocation_map_built == 0 for the lifetime of the process.
|
||||
*/
|
||||
warn( "Didn't build allocation map for %s", serve->filename );
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user