/*
* Empire - A multi-player, client/server Internet based war game.
- * Copyright (C) 1986-2008, Dave Pare, Jeff Bailey, Thomas Ruschak,
- * Ken Stevens, Steve McClure
+ * Copyright (C) 1986-2011, Dave Pare, Jeff Bailey, Thomas Ruschak,
+ * Ken Stevens, Steve McClure, Markus Armbruster
*
- * This program is free software; you can redistribute it and/or modify
+ * Empire is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* ---
*
* ---
*
* file.c: Operations on Empire tables (`files' for historical reasons)
- *
+ *
* Known contributors to this file:
* Dave Pare, 1989
* Steve McClure, 2000
- * Markus Armbruster, 2005-2008
+ * Markus Armbruster, 2005-2011
*/
#include <config.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#ifdef _WIN32
+#include <io.h>
+#include <share.h>
+#endif
#include "file.h"
#include "match.h"
#include "misc.h"
#include "nsc.h"
#include "prototypes.h"
+static int open_locked(char *, int, mode_t);
static int ef_realloc_cache(struct empfile *, int);
static int fillcache(struct empfile *, int);
static int do_read(struct empfile *, void *, int, int);
static int do_write(struct empfile *, void *, int, int);
static unsigned get_seqno(struct empfile *, int);
static void new_seqno(struct empfile *, void *);
+static void must_be_fresh(struct empfile *, void *);
static void do_blank(struct empfile *, void *, int, int);
static int ef_check(int);
+static unsigned ef_generation;
+
/*
* Open the file-backed table TYPE (EF_SECTOR, ...).
* HOW are flags to control operation. Naturally, immutable flags are
ef_open(int type, int how, int nelt)
{
struct empfile *ep;
- struct flock lock;
- int oflags, fd, fsiz, nslots;
+ int oflags, fd, fsiz, fids, nslots;
if (ef_check(type) < 0)
return 0;
oflags = O_RDONLY;
if (how & EFF_CREATE)
oflags |= O_CREAT | O_TRUNC;
-#if defined(_WIN32)
- oflags |= O_BINARY;
-#endif
- if ((fd = open(ep->file, oflags, S_IRWUG)) < 0) {
+ fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ if (fd < 0) {
logerror("Can't open %s (%s)", ep->file, strerror(errno));
return 0;
}
- lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = lock.l_len = 0;
- if (fcntl(fd, F_SETLK, &lock) == -1) {
- logerror("Can't lock %s (%s)", ep->file, strerror(errno));
- close(fd);
- return 0;
- }
-
/* get file size */
fsiz = fsize(fd);
if (fsiz % ep->size) {
close(fd);
return 0;
}
- ep->fids = fsiz / ep->size;
- if (nelt >= 0 && nelt != ep->fids) {
+ fids = fsiz / ep->size;
+ if (nelt >= 0 && nelt != fids) {
logerror("Can't open %s (got %d records instead of %d)",
- ep->file, ep->fids, nelt);
+ ep->file, fids, nelt);
close(fd);
return 0;
}
if (ep->flags & EFF_STATIC) {
/* ep->cache already points to space for ep->csize elements */
if (how & EFF_MEM) {
- if (ep->fids > ep->csize) {
- logerror("Can't open %s: file larger than %d bytes",
- ep->file, ep->fids * ep->size);
+ if (fids > ep->csize) {
+ logerror("Can't open %s (file larger than %d records)",
+ ep->file, ep->csize);
close(fd);
return 0;
}
if (CANT_HAPPEN(ep->cache))
free(ep->cache);
if (how & EFF_MEM)
- nslots = ep->fids;
+ nslots = fids;
else
nslots = blksize(fd) / ep->size;
if (!ef_realloc_cache(ep, nslots)) {
}
ep->baseid = 0;
ep->cids = 0;
+ ep->fids = fids;
ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
ep->fd = fd;
/* map file into cache */
- if ((how & EFF_MEM) && ep->fids) {
- if (fillcache(ep, 0) != ep->fids) {
+ if ((how & EFF_MEM) && fids) {
+ if (fillcache(ep, 0) != fids) {
ep->cids = 0; /* prevent cache flush */
- ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
ef_close(type);
return 0;
}
}
+ if (ep->onresize)
+ ep->onresize(type);
return 1;
}
+static int
+open_locked(char *name, int oflags, mode_t mode)
+{
+ int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
+ int fd;
+
+#ifdef _WIN32
+ fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
+ mode);
+ if (fd < 0)
+ return -1;
+#else /* !_WIN32 */
+ struct flock lock;
+
+ fd = open(name, oflags, mode);
+ if (fd < 0)
+ return -1;
+
+ lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = lock.l_len = 0;
+ if (fcntl(fd, F_SETLK, &lock) == -1) {
+ close(fd);
+ return -1;
+ }
+#endif /* !_WIN32 */
+ return fd;
+}
+
/*
* Reallocate cache for table EP to hold COUNT slots.
* The table must not be allocated statically.
}
/*
- * Close the file-backed table TYPE (EF_SECTOR, ...).
+ * Close the open table TYPE (EF_SECTOR, ...).
* Return non-zero on success, zero on failure.
*/
int
return 0;
ep = &empfile[type];
- if (EF_IS_VIEW(type))
+ if (EF_IS_VIEW(type)) {
ep->cache = NULL;
- else {
+ ep->csize = 0;
+ } else {
if (!ef_flush(type))
retval = 0;
- ep->flags &= EFF_IMMUTABLE;
if (!(ep->flags & EFF_STATIC)) {
free(ep->cache);
ep->cache = NULL;
+ ep->csize = 0;
}
if (close(ep->fd) < 0) {
logerror("Error closing %s (%s)", ep->file, strerror(errno));
}
ep->fd = -1;
}
+ ep->flags &= EFF_IMMUTABLE;
ep->baseid = ep->cids = ep->fids = 0;
+ if (ep->onresize)
+ ep->onresize(type);
return retval;
}
/*
* Read element ID from table TYPE into buffer INTO.
* FIXME pass buffer size!
+ * INTO is marked fresh with ef_mark_fresh().
* Return non-zero on success, zero on failure.
*/
int
cachep = ep->cache + (id - ep->baseid) * ep->size;
}
memcpy(into, cachep, ep->size);
+ ef_mark_fresh(type, into);
if (ep->postread)
ep->postread(id, into);
* cache straight to disk.
* Cannot write beyond the end of fully cached table (flags & EFF_MEM).
* Can write at the end of partially cached table.
+ * FROM must be fresh; see ef_make_stale().
* Return non-zero on success, zero on failure.
*/
int
ep = &empfile[type];
if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
return 0;
- if (id >= ep->baseid && id < ep->baseid + ep->cids)
+ new_seqno(ep, from);
+ if (id >= ep->fids) {
+ /* beyond end of file */
+ if (CANT_HAPPEN((ep->flags & EFF_MEM) || id > ep->fids))
+ return 0; /* not implemented */
+ /* write at end of file extends it */
+ ep->fids = id + 1;
+ if (ep->onresize)
+ ep->onresize(type);
+ }
+ if (id >= ep->baseid && id < ep->baseid + ep->cids) {
cachep = ep->cache + (id - ep->baseid) * ep->size;
- else
+ if (cachep != from)
+ must_be_fresh(ep, from);
+ } else
cachep = NULL;
if (ep->prewrite)
ep->prewrite(id, cachep, from);
- if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
- return 0; /* not implemented */
- new_seqno(ep, from);
if (ep->fd >= 0) {
if (do_write(ep, from, id, 1) < 0)
return 0;
}
if (cachep && cachep != from) /* update the cache if necessary */
memcpy(cachep, from, ep->size);
- if (id >= ep->fids) {
- /* write beyond end of file extends it, take note */
- ep->fids = id + 1;
- }
return 1;
}
* Increment sequence number in BUF, which is about to be written to EP.
* Do nothing if table is not EFF_TYPED (it has no sequence number
* then).
+ * Else, BUF's sequence number must match the one in EP's cache. If
+ * it doesn't, we're about to clobber a previous write.
*/
static void
new_seqno(struct empfile *ep, void *buf)
if (!(ep->flags & EFF_TYPED))
return;
old_seqno = get_seqno(ep, elt->uid);
- if (CANT_HAPPEN(old_seqno != elt->seqno))
- old_seqno = MAX(old_seqno, elt->seqno);
+ CANT_HAPPEN(old_seqno != elt->seqno);
elt->seqno = old_seqno + 1;
}
+/*
+ * Make all copies stale.
+ * Only fresh copies may be written back to the cache.
+ * To be called by functions that may yield the processor.
+ * Writing an copy when there has been a yield since it was read is
+ * unsafe, because we could clobber another thread's write then.
+ * Robust code must assume the that any function that may yield does
+ * yield. Marking copies stale there lets us catch unsafe writes.
+ */
+void
+ef_make_stale(void)
+{
+ ef_generation++;
+}
+
+/* Mark copy of an element of table TYPE in BUF fresh. */
+void
+ef_mark_fresh(int type, void *buf)
+{
+ struct empfile *ep;
+
+ if (ef_check(type) < 0)
+ return;
+ ep = &empfile[type];
+ if (!(ep->flags & EFF_TYPED))
+ return;
+ ((struct emptypedstr *)buf)->generation = ef_generation;
+}
+
+static void
+must_be_fresh(struct empfile *ep, void *buf)
+{
+ struct emptypedstr *elt = buf;
+
+ if (!(ep->flags & EFF_TYPED))
+ return;
+ CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
+}
+
/*
* Extend table TYPE by COUNT elements.
* Any pointers obtained from ef_ptr() become invalid.
}
}
ep->fids = id + count;
+ if (ep->onresize)
+ ep->onresize(type);
return 1;
}
/*
- * Initialize element ID for EP in BUF.
+ * Initialize element ID for table TYPE in BUF.
* FIXME pass buffer size!
+ * BUF is marked fresh with ef_mark_fresh().
*/
void
ef_blank(int type, int id, void *buf)
elt = buf;
elt->seqno = get_seqno(ep, elt->uid);
}
+ ef_mark_fresh(type, buf);
}
/*
ep->cids = count - ep->baseid;
}
+ if (ep->onresize)
+ ep->onresize(type);
return 1;
}