2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2011, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure, Markus Armbruster
6 * Empire is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * See files README, COPYING and CREDITS in the root of the source
22 * tree for related information and legal notices. It is expected
23 * that future projects/authors will amend these files as needed.
27 * file.c: Operations on Empire tables (`files' for historical reasons)
29 * Known contributors to this file:
32 * Markus Armbruster, 2005-2011
41 #include <sys/types.h>
51 #include "prototypes.h"
53 static int open_locked(char *, int, mode_t);
54 static int ef_realloc_cache(struct empfile *, int);
55 static int fillcache(struct empfile *, int);
56 static int do_read(struct empfile *, void *, int, int);
57 static int do_write(struct empfile *, void *, int, int);
58 static unsigned get_seqno(struct empfile *, int);
59 static void new_seqno(struct empfile *, void *);
60 static void must_be_fresh(struct empfile *, void *);
61 static int do_extend(struct empfile *, int);
62 static void do_blank(struct empfile *, void *, int, int);
63 static int ef_check(int);
65 static unsigned ef_generation;
68 * Open the file-backed table TYPE (EF_SECTOR, ...).
69 * HOW are flags to control operation. Naturally, immutable flags are
71 * The table must not be already open.
72 * Return non-zero on success, zero on failure.
75 ef_open(int type, int how)
78 int oflags, fd, fsiz, fids, nslots, fail;
80 if (ef_check(type) < 0)
82 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
83 how &= ~EFF_IMMUTABLE;
87 if (CANT_HAPPEN(!ep->file || ep->base != EF_BAD || ep->fd >= 0))
90 if (how & EFF_PRIVATE)
93 oflags |= O_CREAT | O_TRUNC;
94 fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
96 logerror("Can't open %s (%s)", ep->file, strerror(errno));
101 if (how & EFF_CREATE) {
102 fids = ep->nent >= 0 ? ep->nent : 0;
105 if (fsiz % ep->size) {
106 logerror("Can't open %s (file size not a multiple of record size %d)",
111 fids = fsiz / ep->size;
112 if (ep->nent >= 0 && ep->nent != fids) {
113 logerror("Can't open %s (got %d records instead of %d)",
114 ep->file, fids, ep->nent);
121 if (ep->flags & EFF_STATIC) {
122 /* ep->cache already points to space for ep->csize elements */
124 if (fids > ep->csize) {
125 CANT_HAPPEN(ep->nent >= 0); /* insufficient static cache */
126 logerror("Can't open %s (file larger than %d records)",
127 ep->file, ep->csize);
133 if (CANT_HAPPEN(ep->cache))
138 nslots = blksize(fd) / ep->size;
139 if (!ef_realloc_cache(ep, nslots)) {
140 logerror("Can't map %s (%s)", ep->file, strerror(errno));
147 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
150 if (how & EFF_CREATE) {
151 /* populate new file */
153 fail = !do_extend(ep, fids);
156 if ((how & EFF_MEM) && fids)
157 fail = fillcache(ep, 0) != fids;
162 ep->cids = 0; /* prevent cache flush */
173 open_locked(char *name, int oflags, mode_t mode)
175 int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
179 fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
186 fd = open(name, oflags, mode);
190 lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
191 lock.l_whence = SEEK_SET;
192 lock.l_start = lock.l_len = 0;
193 if (fcntl(fd, F_SETLK, &lock) == -1) {
202 * Reallocate cache for table EP to hold COUNT slots.
203 * The table must not be allocated statically.
204 * The cache may still be unmapped.
205 * If reallocation succeeds, any pointers obtained from ef_ptr()
207 * If it fails, the cache is unchanged, and errno is set.
208 * Return non-zero on success, zero on failure.
211 ef_realloc_cache(struct empfile *ep, int count)
215 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
217 if (CANT_HAPPEN(count < 0))
221 * Avoid zero slots, because that can lead to null cache, which
222 * would be interpreted as unmapped cache.
226 cache = realloc(ep->cache, count * ep->size);
236 * Open the table TYPE, which is a view of a base table
237 * The table must not be already open.
238 * Return non-zero on success, zero on failure.
239 * Beware: views work only as long as the base table doesn't change size!
240 * You must close the view before closing its base table.
243 ef_open_view(int type)
248 if (ef_check(type) < 0)
252 if (ef_check(base) < 0)
254 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)
256 || ep->size != empfile[base].size
257 || ep->nent != empfile[base].nent
258 || ep->cache || ep->oninit || ep->postread
259 || ep->prewrite || ep->onresize))
262 ep->cache = empfile[base].cache;
263 ep->csize = empfile[base].csize;
264 ep->flags |= EFF_MEM;
265 ep->baseid = empfile[base].baseid;
266 ep->cids = empfile[base].cids;
267 ep->fids = empfile[base].fids;
272 * Close the open table TYPE (EF_SECTOR, ...).
273 * Return non-zero on success, zero on failure.
281 if (ef_check(type) < 0)
285 if (EF_IS_VIEW(type)) {
291 if (!(ep->flags & EFF_STATIC)) {
296 if (close(ep->fd) < 0) {
297 logerror("Error closing %s (%s)", ep->file, strerror(errno));
302 ep->flags &= EFF_IMMUTABLE;
303 ep->baseid = ep->cids = ep->fids = 0;
310 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
311 * Do nothing if the table is privately mapped.
312 * Update timestamps of written elements if table is EFF_TYPED.
313 * Return non-zero on success, zero on failure.
320 if (ef_check(type) < 0)
323 if (ep->flags & EFF_PRIVATE)
324 return 1; /* nothing to do */
325 if (CANT_HAPPEN(ep->fd < 0))
328 * We don't know which cache entries are dirty. ef_write() writes
329 * through, but direct updates through ef_ptr() don't. They are
330 * allowed only with EFF_MEM. Assume the whole cash is dirty
333 if (ep->flags & EFF_MEM) {
334 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
342 * Return pointer to element ID in table TYPE if it exists, else NULL.
343 * The table must be fully cached, i.e. flags & EFF_MEM.
344 * The caller is responsible for flushing changes he makes.
347 ef_ptr(int type, int id)
351 if (ef_check(type) < 0)
354 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
356 if (id < 0 || id >= ep->fids)
358 return ep->cache + ep->size * id;
362 * Read element ID from table TYPE into buffer INTO.
363 * FIXME pass buffer size!
364 * INTO is marked fresh with ef_mark_fresh().
365 * Return non-zero on success, zero on failure.
368 ef_read(int type, int id, void *into)
373 if (ef_check(type) < 0)
376 if (CANT_HAPPEN(!ep->cache))
378 if (id < 0 || id >= ep->fids)
381 if (ep->flags & EFF_MEM) {
382 cachep = ep->cache + id * ep->size;
384 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
385 if (fillcache(ep, id) < 1)
388 cachep = ep->cache + (id - ep->baseid) * ep->size;
390 memcpy(into, cachep, ep->size);
391 ef_mark_fresh(type, into);
394 ep->postread(id, into);
399 * Fill cache of file-backed EP with elements starting at ID.
400 * If any were read, return their number.
401 * Else return -1 and leave the cache unchanged.
404 fillcache(struct empfile *ep, int id)
408 if (CANT_HAPPEN(!ep->cache))
411 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
421 do_read(struct empfile *ep, void *buf, int id, int count)
426 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
429 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
430 logerror("Error seeking %s to elt %d (%s)",
431 ep->file, id, strerror(errno));
436 n = count * ep->size;
438 ret = read(ep->fd, p, n);
440 if (errno != EINTR) {
441 logerror("Error reading %s elt %d (%s)",
443 id + (int)((p - (char *)buf) / ep->size),
447 } else if (ret == 0) {
448 logerror("Unexpected EOF reading %s elt %d",
449 ep->file, id + (int)((p - (char *)buf) / ep->size));
457 return (p - (char *)buf) / ep->size;
461 * Write COUNT elements starting at ID from BUF to file-backed EP.
462 * Update the timestamp if the table is EFF_TYPED.
463 * Don't actually write if table is privately mapped.
464 * Return 0 on success, -1 on error (file may be corrupt then).
467 do_write(struct empfile *ep, void *buf, int id, int count)
471 struct emptypedstr *elt;
474 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
477 if (ep->flags & EFF_TYPED) {
478 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
479 for (i = 0; i < count; i++) {
481 * TODO Oopses here could be due to bad data corruption.
482 * Fail instead of attempting to recover?
484 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
485 if (CANT_HAPPEN(elt->ef_type != ep->uid))
486 elt->ef_type = ep->uid;
487 if (CANT_HAPPEN(elt->uid != id + i))
489 if (now != (time_t)-1)
490 elt->timestamp = now;
494 if (ep->flags & EFF_PRIVATE)
497 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
498 logerror("Error seeking %s to elt %d (%s)",
499 ep->file, id, strerror(errno));
504 n = count * ep->size;
506 ret = write(ep->fd, p, n);
508 if (errno != EINTR) {
509 logerror("Error writing %s elt %d (%s)",
511 id + (int)((p - (char *)buf) / ep->size),
525 * Write element ID into table TYPE from buffer FROM.
526 * FIXME pass buffer size!
527 * Update timestamp in FROM if table is EFF_TYPED.
528 * If table is file-backed and not privately mapped, write through
529 * cache straight to disk.
530 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
531 * Can write at the end of partially cached table.
532 * FROM must be fresh; see ef_make_stale().
533 * Return non-zero on success, zero on failure.
536 ef_write(int type, int id, void *from)
541 if (ef_check(type) < 0)
544 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
547 if (id >= ep->fids) {
548 /* beyond end of file */
549 if (CANT_HAPPEN((ep->flags & EFF_MEM) || id > ep->fids))
550 return 0; /* not implemented */
551 /* write at end of file extends it */
556 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
557 cachep = ep->cache + (id - ep->baseid) * ep->size;
559 must_be_fresh(ep, from);
563 ep->prewrite(id, cachep, from);
565 if (do_write(ep, from, id, 1) < 0)
568 if (cachep && cachep != from) /* update the cache if necessary */
569 memcpy(cachep, from, ep->size);
575 * BUF is an element of table TYPE.
576 * ID is its new element ID.
577 * If table is EFF_TYPED, change id and sequence number stored in BUF.
581 ef_set_uid(int type, void *buf, int uid)
583 struct emptypedstr *elt;
586 if (ef_check(type) < 0)
589 if (!(ep->flags & EFF_TYPED))
595 elt->seqno = get_seqno(ep, uid);
599 * Return sequence number of element ID in table EP.
600 * Return zero if table is not EFF_TYPED (it has no sequence number
604 get_seqno(struct empfile *ep, int id)
606 struct emptypedstr *elt;
608 if (!(ep->flags & EFF_TYPED))
610 if (id < 0 || id >= ep->fids)
612 if (id >= ep->baseid && id < ep->baseid + ep->cids)
613 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
615 /* need a buffer, steal last cache slot */
616 if (ep->cids == ep->csize)
618 elt = (void *)(ep->cache + ep->cids * ep->size);
619 if (do_read(ep, elt, id, 1) < 0)
620 return 0; /* deep trouble */
626 * Increment sequence number in BUF, which is about to be written to EP.
627 * Do nothing if table is not EFF_TYPED (it has no sequence number
629 * Else, BUF's sequence number must match the one in EP's cache. If
630 * it doesn't, we're about to clobber a previous write.
633 new_seqno(struct empfile *ep, void *buf)
635 struct emptypedstr *elt = buf;
638 if (!(ep->flags & EFF_TYPED))
640 old_seqno = get_seqno(ep, elt->uid);
641 CANT_HAPPEN(old_seqno != elt->seqno);
642 elt->seqno = old_seqno + 1;
646 * Make all copies stale.
647 * Only fresh copies may be written back to the cache.
648 * To be called by functions that may yield the processor.
649 * Writing an copy when there has been a yield since it was read is
650 * unsafe, because we could clobber another thread's write then.
651 * Robust code must assume the that any function that may yield does
652 * yield. Marking copies stale there lets us catch unsafe writes.
660 /* Mark copy of an element of table TYPE in BUF fresh. */
662 ef_mark_fresh(int type, void *buf)
666 if (ef_check(type) < 0)
669 if (!(ep->flags & EFF_TYPED))
671 ((struct emptypedstr *)buf)->generation = ef_generation;
675 must_be_fresh(struct empfile *ep, void *buf)
677 struct emptypedstr *elt = buf;
679 if (!(ep->flags & EFF_TYPED))
681 CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
685 * Extend table TYPE by COUNT elements.
686 * Any pointers obtained from ef_ptr() become invalid.
687 * Return non-zero on success, zero on failure.
690 ef_extend(int type, int count)
694 if (ef_check(type) < 0)
697 if (!do_extend(ep, count))
705 do_extend(struct empfile *ep, int count)
708 int need_sentinel, i, id;
710 if (CANT_HAPPEN(EF_IS_VIEW(ep->uid)) || count < 0)
714 if (ep->flags & EFF_MEM) {
715 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
716 if (id + count + need_sentinel > ep->csize) {
717 if (ep->flags & EFF_STATIC) {
718 logerror("Can't extend %s beyond %d elements",
719 ep->name, ep->csize - need_sentinel);
722 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
723 logerror("Can't extend %s to %d elements (%s)",
724 ep->name, id + count, strerror(errno));
728 p = ep->cache + id * ep->size;
729 do_blank(ep, p, id, count);
731 if (do_write(ep, p, id, count) < 0)
735 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
736 ep->cids = id + count;
738 /* need a buffer, steal last cache slot */
739 if (ep->cids == ep->csize)
741 p = ep->cache + ep->cids * ep->size;
742 for (i = 0; i < count; i++) {
743 do_blank(ep, p, id + i, 1);
744 if (do_write(ep, p, id + i, 1) < 0)
748 ep->fids = id + count;
753 * Initialize element ID for table TYPE in BUF.
754 * FIXME pass buffer size!
755 * BUF is marked fresh with ef_mark_fresh().
758 ef_blank(int type, int id, void *buf)
761 struct emptypedstr *elt;
763 if (ef_check(type) < 0)
766 do_blank(ep, buf, id, 1);
767 if (ep->flags & EFF_TYPED) {
769 elt->seqno = get_seqno(ep, elt->uid);
771 ef_mark_fresh(type, buf);
775 * Initialize COUNT elements of EP in BUF, starting with element ID.
778 do_blank(struct empfile *ep, void *buf, int id, int count)
781 struct emptypedstr *elt;
783 memset(buf, 0, count * ep->size);
784 for (i = 0; i < count; i++) {
785 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
786 if (ep->flags & EFF_TYPED) {
787 elt->ef_type = ep->uid;
796 * Truncate table TYPE to COUNT elements.
797 * Any pointers obtained from ef_ptr() become invalid.
798 * Return non-zero on success, zero on failure.
801 ef_truncate(int type, int count)
806 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
809 if (CANT_HAPPEN(count < 0 || count > ep->fids))
812 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
813 if (ftruncate(ep->fd, count * ep->size) < 0) {
814 logerror("Can't truncate %s to %d elements (%s)",
815 ep->file, count, strerror(errno));
821 if (ep->flags & EFF_MEM) {
822 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
823 if (!(ep->flags & EFF_STATIC)) {
824 if (!ef_realloc_cache(ep, count + need_sentinel)) {
825 logerror("Can't shrink %s cache after truncate (%s)",
826 ep->name, strerror(errno));
827 /* continue with unshrunk cache */
831 memset(ep->cache + count * ep->size, 0, ep->size);
834 if (ep->baseid >= count)
836 else if (ep->cids > count - ep->baseid)
837 ep->cids = count - ep->baseid;
848 if (ef_check(type) < 0)
850 return empfile[type].cadef;
856 if (ef_check(type) < 0)
858 return empfile[type].fids;
864 if (ef_check(type) < 0)
866 return empfile[type].flags;
872 if (ef_check(type) < 0)
874 if (empfile[type].fd <= 0)
876 return fdate(empfile[type].fd);
880 * Search for a table matching NAME, return its table type.
881 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
885 ef_byname(char *name)
887 return stmtch(name, empfile, offsetof(struct empfile, name),
892 * Search CHOICES[] for a table type matching NAME, return it.
893 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
895 * CHOICES[] must be terminated with a negative value.
898 ef_byname_from(char *name, int choices[])
904 for (p = choices; *p >= 0; p++) {
905 if (ef_check(*p) < 0)
907 switch (mineq(name, empfile[*p].name)) {
925 if (ef_check(type) < 0)
926 return "bad ef_type";
927 return empfile[type].name;
933 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
939 * Ensure table contains element ID.
940 * If necessary, extend it in steps of COUNT elements.
941 * Return non-zero on success, zero on failure.
944 ef_ensure_space(int type, int id, int count)
946 if (ef_check(type) < 0)
950 while (id >= empfile[type].fids) {
951 if (!ef_extend(type, count))
958 * Return maximum ID acceptable for table TYPE.
959 * Assuming infinite memory and disk space.
962 ef_id_limit(int type)
966 if (ef_check(type) < 0)
971 if (ep->flags & EFF_MEM) {
972 if (ep->flags & EFF_STATIC)
973 return ep->csize - 1 - ((ep->flags & EFF_SENTINEL) != 0);