2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2011, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure, Markus Armbruster
6 * Empire is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * See files README, COPYING and CREDITS in the root of the source
22 * tree for related information and legal notices. It is expected
23 * that future projects/authors will amend these files as needed.
27 * file.c: Operations on Empire tables (`files' for historical reasons)
29 * Known contributors to this file:
32 * Markus Armbruster, 2005-2011
40 #include <sys/types.h>
50 #include "prototypes.h"
52 static int open_locked(char *, int, mode_t);
53 static int ef_realloc_cache(struct empfile *, int);
54 static int fillcache(struct empfile *, int);
55 static int do_read(struct empfile *, void *, int, int);
56 static int do_write(struct empfile *, void *, int, int);
57 static unsigned get_seqno(struct empfile *, int);
58 static void new_seqno(struct empfile *, void *);
59 static void must_be_fresh(struct empfile *, void *);
60 static void do_blank(struct empfile *, void *, int, int);
61 static int ef_check(int);
63 static unsigned ef_generation;
66 * Open the file-backed table TYPE (EF_SECTOR, ...).
67 * HOW are flags to control operation. Naturally, immutable flags are
69 * The table must not be already open.
70 * Return non-zero on success, zero on failure.
73 ef_open(int type, int how)
76 int oflags, fd, fsiz, fids, nslots;
78 if (ef_check(type) < 0)
80 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
81 how &= ~EFF_IMMUTABLE;
85 if (CANT_HAPPEN(!ep->file || ep->base != EF_BAD || ep->fd >= 0))
88 if (how & EFF_PRIVATE)
91 oflags |= O_CREAT | O_TRUNC;
92 fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
94 logerror("Can't open %s (%s)", ep->file, strerror(errno));
100 if (fsiz % ep->size) {
101 logerror("Can't open %s (file size not a multiple of record size %d)",
106 fids = fsiz / ep->size;
107 if (ep->nent >= 0 && ep->nent != fids && !(how & EFF_CREATE)) {
108 logerror("Can't open %s (got %d records instead of %d)",
109 ep->file, fids, ep->nent);
115 if (ep->flags & EFF_STATIC) {
116 /* ep->cache already points to space for ep->csize elements */
118 if (fids > ep->csize) {
119 CANT_HAPPEN(ep->nent >= 0); /* insufficient static cache */
120 logerror("Can't open %s (file larger than %d records)",
121 ep->file, ep->csize);
127 if (CANT_HAPPEN(ep->cache))
132 nslots = blksize(fd) / ep->size;
133 if (!ef_realloc_cache(ep, nslots)) {
134 logerror("Can't map %s (%s)", ep->file, strerror(errno));
142 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
145 /* map file into cache */
146 if ((how & EFF_MEM) && fids) {
147 if (fillcache(ep, 0) != fids) {
148 ep->cids = 0; /* prevent cache flush */
160 open_locked(char *name, int oflags, mode_t mode)
162 int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
166 fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
173 fd = open(name, oflags, mode);
177 lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
178 lock.l_whence = SEEK_SET;
179 lock.l_start = lock.l_len = 0;
180 if (fcntl(fd, F_SETLK, &lock) == -1) {
189 * Reallocate cache for table EP to hold COUNT slots.
190 * The table must not be allocated statically.
191 * The cache may still be unmapped.
192 * If reallocation succeeds, any pointers obtained from ef_ptr()
194 * If it fails, the cache is unchanged, and errno is set.
195 * Return non-zero on success, zero on failure.
198 ef_realloc_cache(struct empfile *ep, int count)
202 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
204 if (CANT_HAPPEN(count < 0))
208 * Avoid zero slots, because that can lead to null cache, which
209 * would be interpreted as unmapped cache.
213 cache = realloc(ep->cache, count * ep->size);
223 * Open the table TYPE, which is a view of a base table
224 * The table must not be already open.
225 * Return non-zero on success, zero on failure.
226 * Beware: views work only as long as the base table doesn't change size!
227 * You must close the view before closing its base table.
230 ef_open_view(int type)
235 if (ef_check(type) < 0)
239 if (ef_check(base) < 0)
241 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)
243 || ep->size != empfile[base].size
244 || ep->nent != empfile[base].nent
245 || ep->cache || ep->oninit || ep->postread
246 || ep->prewrite || ep->onresize))
249 ep->cache = empfile[base].cache;
250 ep->csize = empfile[base].csize;
251 ep->flags |= EFF_MEM;
252 ep->baseid = empfile[base].baseid;
253 ep->cids = empfile[base].cids;
254 ep->fids = empfile[base].fids;
259 * Close the open table TYPE (EF_SECTOR, ...).
260 * Return non-zero on success, zero on failure.
268 if (ef_check(type) < 0)
272 if (EF_IS_VIEW(type)) {
278 if (!(ep->flags & EFF_STATIC)) {
283 if (close(ep->fd) < 0) {
284 logerror("Error closing %s (%s)", ep->file, strerror(errno));
289 ep->flags &= EFF_IMMUTABLE;
290 ep->baseid = ep->cids = ep->fids = 0;
297 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
298 * Do nothing if the table is privately mapped.
299 * Update timestamps of written elements if table is EFF_TYPED.
300 * Return non-zero on success, zero on failure.
307 if (ef_check(type) < 0)
310 if (ep->flags & EFF_PRIVATE)
311 return 1; /* nothing to do */
312 if (CANT_HAPPEN(ep->fd < 0))
315 * We don't know which cache entries are dirty. ef_write() writes
316 * through, but direct updates through ef_ptr() don't. They are
317 * allowed only with EFF_MEM. Assume the whole cash is dirty
320 if (ep->flags & EFF_MEM) {
321 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
329 * Return pointer to element ID in table TYPE if it exists, else NULL.
330 * The table must be fully cached, i.e. flags & EFF_MEM.
331 * The caller is responsible for flushing changes he makes.
334 ef_ptr(int type, int id)
338 if (ef_check(type) < 0)
341 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
343 if (id < 0 || id >= ep->fids)
345 return ep->cache + ep->size * id;
349 * Read element ID from table TYPE into buffer INTO.
350 * FIXME pass buffer size!
351 * INTO is marked fresh with ef_mark_fresh().
352 * Return non-zero on success, zero on failure.
355 ef_read(int type, int id, void *into)
360 if (ef_check(type) < 0)
363 if (CANT_HAPPEN(!ep->cache))
365 if (id < 0 || id >= ep->fids)
368 if (ep->flags & EFF_MEM) {
369 cachep = ep->cache + id * ep->size;
371 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
372 if (fillcache(ep, id) < 1)
375 cachep = ep->cache + (id - ep->baseid) * ep->size;
377 memcpy(into, cachep, ep->size);
378 ef_mark_fresh(type, into);
381 ep->postread(id, into);
386 * Fill cache of file-backed EP with elements starting at ID.
387 * If any were read, return their number.
388 * Else return -1 and leave the cache unchanged.
391 fillcache(struct empfile *ep, int id)
395 if (CANT_HAPPEN(!ep->cache))
398 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
408 do_read(struct empfile *ep, void *buf, int id, int count)
413 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
416 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
417 logerror("Error seeking %s to elt %d (%s)",
418 ep->file, id, strerror(errno));
423 n = count * ep->size;
425 ret = read(ep->fd, p, n);
427 if (errno != EINTR) {
428 logerror("Error reading %s elt %d (%s)",
430 id + (int)((p - (char *)buf) / ep->size),
434 } else if (ret == 0) {
435 logerror("Unexpected EOF reading %s elt %d",
436 ep->file, id + (int)((p - (char *)buf) / ep->size));
444 return (p - (char *)buf) / ep->size;
448 * Write COUNT elements starting at ID from BUF to file-backed EP.
449 * Update the timestamp if the table is EFF_TYPED.
450 * Don't actually write if table is privately mapped.
451 * Return 0 on success, -1 on error (file may be corrupt then).
454 do_write(struct empfile *ep, void *buf, int id, int count)
458 struct emptypedstr *elt;
461 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
464 if (ep->flags & EFF_TYPED) {
465 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
466 for (i = 0; i < count; i++) {
468 * TODO Oopses here could be due to bad data corruption.
469 * Fail instead of attempting to recover?
471 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
472 if (CANT_HAPPEN(elt->ef_type != ep->uid))
473 elt->ef_type = ep->uid;
474 if (CANT_HAPPEN(elt->uid != id + i))
476 if (now != (time_t)-1)
477 elt->timestamp = now;
481 if (ep->flags & EFF_PRIVATE)
484 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
485 logerror("Error seeking %s to elt %d (%s)",
486 ep->file, id, strerror(errno));
491 n = count * ep->size;
493 ret = write(ep->fd, p, n);
495 if (errno != EINTR) {
496 logerror("Error writing %s elt %d (%s)",
498 id + (int)((p - (char *)buf) / ep->size),
512 * Write element ID into table TYPE from buffer FROM.
513 * FIXME pass buffer size!
514 * Update timestamp in FROM if table is EFF_TYPED.
515 * If table is file-backed and not privately mapped, write through
516 * cache straight to disk.
517 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
518 * Can write at the end of partially cached table.
519 * FROM must be fresh; see ef_make_stale().
520 * Return non-zero on success, zero on failure.
523 ef_write(int type, int id, void *from)
528 if (ef_check(type) < 0)
531 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
534 if (id >= ep->fids) {
535 /* beyond end of file */
536 if (CANT_HAPPEN((ep->flags & EFF_MEM) || id > ep->fids))
537 return 0; /* not implemented */
538 /* write at end of file extends it */
543 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
544 cachep = ep->cache + (id - ep->baseid) * ep->size;
546 must_be_fresh(ep, from);
550 ep->prewrite(id, cachep, from);
552 if (do_write(ep, from, id, 1) < 0)
555 if (cachep && cachep != from) /* update the cache if necessary */
556 memcpy(cachep, from, ep->size);
562 * BUF is an element of table TYPE.
563 * ID is its new element ID.
564 * If table is EFF_TYPED, change id and sequence number stored in BUF.
568 ef_set_uid(int type, void *buf, int uid)
570 struct emptypedstr *elt;
573 if (ef_check(type) < 0)
576 if (!(ep->flags & EFF_TYPED))
582 elt->seqno = get_seqno(ep, uid);
586 * Return sequence number of element ID in table EP.
587 * Return zero if table is not EFF_TYPED (it has no sequence number
591 get_seqno(struct empfile *ep, int id)
593 struct emptypedstr *elt;
595 if (!(ep->flags & EFF_TYPED))
597 if (id < 0 || id >= ep->fids)
599 if (id >= ep->baseid && id < ep->baseid + ep->cids)
600 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
602 /* need a buffer, steal last cache slot */
603 if (ep->cids == ep->csize)
605 elt = (void *)(ep->cache + ep->cids * ep->size);
606 if (do_read(ep, elt, id, 1) < 0)
607 return 0; /* deep trouble */
613 * Increment sequence number in BUF, which is about to be written to EP.
614 * Do nothing if table is not EFF_TYPED (it has no sequence number
616 * Else, BUF's sequence number must match the one in EP's cache. If
617 * it doesn't, we're about to clobber a previous write.
620 new_seqno(struct empfile *ep, void *buf)
622 struct emptypedstr *elt = buf;
625 if (!(ep->flags & EFF_TYPED))
627 old_seqno = get_seqno(ep, elt->uid);
628 CANT_HAPPEN(old_seqno != elt->seqno);
629 elt->seqno = old_seqno + 1;
633 * Make all copies stale.
634 * Only fresh copies may be written back to the cache.
635 * To be called by functions that may yield the processor.
636 * Writing an copy when there has been a yield since it was read is
637 * unsafe, because we could clobber another thread's write then.
638 * Robust code must assume the that any function that may yield does
639 * yield. Marking copies stale there lets us catch unsafe writes.
647 /* Mark copy of an element of table TYPE in BUF fresh. */
649 ef_mark_fresh(int type, void *buf)
653 if (ef_check(type) < 0)
656 if (!(ep->flags & EFF_TYPED))
658 ((struct emptypedstr *)buf)->generation = ef_generation;
662 must_be_fresh(struct empfile *ep, void *buf)
664 struct emptypedstr *elt = buf;
666 if (!(ep->flags & EFF_TYPED))
668 CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
672 * Extend table TYPE by COUNT elements.
673 * Any pointers obtained from ef_ptr() become invalid.
674 * Return non-zero on success, zero on failure.
677 ef_extend(int type, int count)
681 int need_sentinel, i, id;
683 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
686 if (CANT_HAPPEN(count < 0))
690 if (ep->flags & EFF_MEM) {
691 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
692 if (id + count + need_sentinel > ep->csize) {
693 if (ep->flags & EFF_STATIC) {
694 logerror("Can't extend %s beyond %d elements",
695 ep->name, ep->csize - need_sentinel);
698 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
699 logerror("Can't extend %s to %d elements (%s)",
700 ep->name, id + count, strerror(errno));
704 p = ep->cache + id * ep->size;
705 do_blank(ep, p, id, count);
707 if (do_write(ep, p, id, count) < 0)
711 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
712 ep->cids = id + count;
714 /* need a buffer, steal last cache slot */
715 if (ep->cids == ep->csize)
717 p = ep->cache + ep->cids * ep->size;
718 for (i = 0; i < count; i++) {
719 do_blank(ep, p, id + i, 1);
720 if (do_write(ep, p, id + i, 1) < 0)
724 ep->fids = id + count;
731 * Initialize element ID for table TYPE in BUF.
732 * FIXME pass buffer size!
733 * BUF is marked fresh with ef_mark_fresh().
736 ef_blank(int type, int id, void *buf)
739 struct emptypedstr *elt;
741 if (ef_check(type) < 0)
744 do_blank(ep, buf, id, 1);
745 if (ep->flags & EFF_TYPED) {
747 elt->seqno = get_seqno(ep, elt->uid);
749 ef_mark_fresh(type, buf);
753 * Initialize COUNT elements of EP in BUF, starting with element ID.
756 do_blank(struct empfile *ep, void *buf, int id, int count)
759 struct emptypedstr *elt;
761 memset(buf, 0, count * ep->size);
762 for (i = 0; i < count; i++) {
763 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
764 if (ep->flags & EFF_TYPED) {
765 elt->ef_type = ep->uid;
774 * Truncate table TYPE to COUNT elements.
775 * Any pointers obtained from ef_ptr() become invalid.
776 * Return non-zero on success, zero on failure.
779 ef_truncate(int type, int count)
784 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
787 if (CANT_HAPPEN(count < 0 || count > ep->fids))
790 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
791 if (ftruncate(ep->fd, count * ep->size) < 0) {
792 logerror("Can't truncate %s to %d elements (%s)",
793 ep->file, count, strerror(errno));
799 if (ep->flags & EFF_MEM) {
800 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
801 if (!(ep->flags & EFF_STATIC)) {
802 if (!ef_realloc_cache(ep, count + need_sentinel)) {
803 logerror("Can't shrink %s cache after truncate (%s)",
804 ep->name, strerror(errno));
805 /* continue with unshrunk cache */
809 memset(ep->cache + count * ep->size, 0, ep->size);
812 if (ep->baseid >= count)
814 else if (ep->cids > count - ep->baseid)
815 ep->cids = count - ep->baseid;
826 if (ef_check(type) < 0)
828 return empfile[type].cadef;
834 if (ef_check(type) < 0)
836 return empfile[type].fids;
842 if (ef_check(type) < 0)
844 return empfile[type].flags;
850 if (ef_check(type) < 0)
852 if (empfile[type].fd <= 0)
854 return fdate(empfile[type].fd);
858 * Search for a table matching NAME, return its table type.
859 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
863 ef_byname(char *name)
865 return stmtch(name, empfile, offsetof(struct empfile, name),
870 * Search CHOICES[] for a table type matching NAME, return it.
871 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
873 * CHOICES[] must be terminated with a negative value.
876 ef_byname_from(char *name, int choices[])
882 for (p = choices; *p >= 0; p++) {
883 if (ef_check(*p) < 0)
885 switch (mineq(name, empfile[*p].name)) {
903 if (ef_check(type) < 0)
904 return "bad ef_type";
905 return empfile[type].name;
911 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
917 * Ensure table contains element ID.
918 * If necessary, extend it in steps of COUNT elements.
919 * Return non-zero on success, zero on failure.
922 ef_ensure_space(int type, int id, int count)
924 if (ef_check(type) < 0)
928 while (id >= empfile[type].fids) {
929 if (!ef_extend(type, count))