2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2020, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure, Markus Armbruster
6 * Empire is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * See files README, COPYING and CREDITS in the root of the source
22 * tree for related information and legal notices. It is expected
23 * that future projects/authors will amend these files as needed.
27 * file.c: Operations on Empire tables (`files' for historical reasons)
29 * Known contributors to this file:
32 * Markus Armbruster, 2005-2014
41 #include <sys/types.h>
50 #include "prototypes.h"
52 static int open_locked(char *, int, mode_t);
53 static int ef_realloc_cache(struct empfile *, int);
54 static int fillcache(struct empfile *, int);
55 static int do_read(struct empfile *, void *, int, int);
56 static int do_write(struct empfile *, void *, int, int);
57 static unsigned get_seqno(struct empfile *, int);
58 static void new_seqno(struct empfile *, void *);
59 static void must_be_fresh(struct empfile *, void *);
60 static int do_extend(struct empfile *, int);
61 static void do_blank(struct empfile *, void *, int, int);
62 static int ef_check(int);
64 static unsigned ef_generation;
67 * Open the file-backed table @type (EF_SECTOR, ...).
68 * @how are flags to control operation. Naturally, immutable flags are
70 * The table must not be already open.
71 * Return non-zero on success, zero on failure.
74 ef_open(int type, int how)
77 int oflags, fd, fsiz, fids, nslots, fail;
79 if (ef_check(type) < 0)
81 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
82 how &= ~EFF_IMMUTABLE;
86 if (CANT_HAPPEN(!ep->file || ep->base != EF_BAD || ep->fd >= 0))
88 if (CANT_HAPPEN(ep->prewrite && !(how & EFF_MEM)))
89 return 0; /* not implemented */
91 if (how & EFF_PRIVATE)
94 oflags |= O_CREAT | O_TRUNC;
95 fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
97 logerror("Can't open %s (%s)", ep->file, strerror(errno));
102 if (how & EFF_CREATE) {
103 fids = ep->nent >= 0 ? ep->nent : 0;
106 if (fsiz % ep->size) {
107 logerror("Can't open %s (file size not a multiple of record size %d)",
112 fids = fsiz / ep->size;
113 if (ep->nent >= 0 && ep->nent != fids) {
114 logerror("Can't open %s (got %d records instead of %d)",
115 ep->file, fids, ep->nent);
122 if (ep->flags & EFF_STATIC) {
123 /* ep->cache already points to space for ep->csize elements */
125 if (fids > ep->csize) {
126 CANT_HAPPEN(ep->nent >= 0); /* insufficient static cache */
127 logerror("Can't open %s (file larger than %d records)",
128 ep->file, ep->csize);
134 if (CANT_HAPPEN(ep->cache))
139 nslots = blksize(fd) / ep->size;
140 if (!ef_realloc_cache(ep, nslots)) {
141 logerror("Can't map %s (%s)", ep->file, strerror(errno));
148 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
151 if (how & EFF_CREATE) {
152 /* populate new file */
154 fail = !do_extend(ep, fids);
157 if ((how & EFF_MEM) && fids)
158 fail = fillcache(ep, 0) != fids;
163 ep->cids = 0; /* prevent cache flush */
174 open_locked(char *name, int oflags, mode_t mode)
176 int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
180 fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
187 fd = open(name, oflags, mode);
191 lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
192 lock.l_whence = SEEK_SET;
193 lock.l_start = lock.l_len = 0;
194 if (fcntl(fd, F_SETLK, &lock) == -1) {
203 * Reallocate cache for table @ep to hold @count slots.
204 * The table must not be allocated statically.
205 * The cache may still be unmapped.
206 * If reallocation succeeds, any pointers obtained from ef_ptr()
208 * If it fails, the cache is unchanged, and errno is set.
209 * Return non-zero on success, zero on failure.
212 ef_realloc_cache(struct empfile *ep, int count)
216 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
218 if (CANT_HAPPEN(count < 0))
222 * Avoid zero slots, because that can lead to null cache, which
223 * would be interpreted as unmapped cache.
227 cache = realloc(ep->cache, count * ep->size);
237 * Open the table @type, which is a view of a base table
238 * The table must not be already open.
239 * Return non-zero on success, zero on failure.
240 * Beware: views work only as long as the base table doesn't change size!
241 * You must close the view before closing its base table.
244 ef_open_view(int type)
249 if (ef_check(type) < 0)
253 if (ef_check(base) < 0)
255 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)
257 || ep->size != empfile[base].size
258 || ep->nent != empfile[base].nent
259 || ep->cache || ep->oninit || ep->postread
260 || ep->prewrite || ep->onresize))
263 ep->cache = empfile[base].cache;
264 ep->csize = empfile[base].csize;
265 ep->flags |= EFF_MEM;
266 ep->baseid = empfile[base].baseid;
267 ep->cids = empfile[base].cids;
268 ep->fids = empfile[base].fids;
273 * Close the open table @type (EF_SECTOR, ...).
274 * Return non-zero on success, zero on failure.
282 if (ef_check(type) < 0)
286 if (EF_IS_VIEW(type)) {
292 if (!(ep->flags & EFF_STATIC)) {
297 if (close(ep->fd) < 0) {
298 logerror("Error closing %s (%s)", ep->file, strerror(errno));
303 ep->flags &= EFF_IMMUTABLE;
304 ep->baseid = ep->cids = ep->fids = 0;
311 * Flush file-backed table @type (EF_SECTOR, ...) to its backing file.
312 * Do nothing if the table is privately mapped.
313 * Update timestamps of written elements if table is EFF_TYPED.
314 * Return non-zero on success, zero on failure.
321 if (ef_check(type) < 0)
324 if (ep->flags & EFF_PRIVATE)
325 return 1; /* nothing to do */
326 if (CANT_HAPPEN(ep->fd < 0))
329 * We don't know which cache entries are dirty. ef_write() writes
330 * through, but direct updates through ef_ptr() don't. They are
331 * allowed only with EFF_MEM. Assume the whole cash is dirty
334 if (ep->flags & EFF_MEM) {
335 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
343 * Return pointer to element @id in table @type if it exists, else NULL.
344 * The table must be fully cached, i.e. flags & EFF_MEM.
345 * The caller is responsible for flushing changes he makes.
348 ef_ptr(int type, int id)
352 if (ef_check(type) < 0)
355 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
357 if (id < 0 || id >= ep->fids)
359 return ep->cache + ep->size * id;
363 * Read element @id from table @type into buffer @into.
364 * FIXME pass buffer size!
365 * @into is marked fresh with ef_mark_fresh().
366 * Return non-zero on success, zero on failure.
369 ef_read(int type, int id, void *into)
374 if (ef_check(type) < 0)
377 if (CANT_HAPPEN(!ep->cache))
379 if (id < 0 || id >= ep->fids)
382 if (ep->flags & EFF_MEM) {
383 cachep = ep->cache + id * ep->size;
385 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
386 if (fillcache(ep, id) < 1)
389 cachep = ep->cache + (id - ep->baseid) * ep->size;
391 memcpy(into, cachep, ep->size);
392 ef_mark_fresh(type, into);
395 ep->postread(id, into);
400 * Fill cache of file-backed @ep with elements starting at @id.
401 * If any were read, return their number.
402 * Else return -1 and leave the cache unchanged.
405 fillcache(struct empfile *ep, int id)
409 if (CANT_HAPPEN(!ep->cache))
412 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
422 do_read(struct empfile *ep, void *buf, int id, int count)
427 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
430 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
431 logerror("Error seeking %s to elt %d (%s)",
432 ep->file, id, strerror(errno));
437 n = count * ep->size;
439 ret = read(ep->fd, p, n);
441 if (errno != EINTR) {
442 logerror("Error reading %s elt %d (%s)",
444 id + (int)((p - (char *)buf) / ep->size),
448 } else if (ret == 0) {
449 logerror("Unexpected EOF reading %s elt %d",
450 ep->file, id + (int)((p - (char *)buf) / ep->size));
458 return (p - (char *)buf) / ep->size;
462 * Write @count elements starting at @id from @buf to file-backed @ep.
463 * Update the timestamp if the table is EFF_TYPED.
464 * Don't actually write if table is privately mapped.
465 * Return 0 on success, -1 on error (file may be corrupt then).
468 do_write(struct empfile *ep, void *buf, int id, int count)
472 struct ef_typedstr *elt;
475 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
478 if (ep->flags & EFF_TYPED) {
479 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
480 for (i = 0; i < count; i++) {
482 * TODO Oopses here could be due to bad data corruption.
483 * Fail instead of attempting to recover?
485 elt = (struct ef_typedstr *)((char *)buf + i * ep->size);
486 if (CANT_HAPPEN(elt->ef_type != ep->uid))
487 elt->ef_type = ep->uid;
488 if (CANT_HAPPEN(elt->uid != id + i))
490 if (now != (time_t)-1)
491 elt->timestamp = now;
495 if (ep->flags & EFF_PRIVATE)
498 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
499 logerror("Error seeking %s to elt %d (%s)",
500 ep->file, id, strerror(errno));
505 n = count * ep->size;
507 ret = write(ep->fd, p, n);
509 if (errno != EINTR) {
510 logerror("Error writing %s elt %d (%s)",
512 id + (int)((p - (char *)buf) / ep->size),
526 * Write element @id into table @type from buffer @from.
527 * FIXME pass buffer size!
528 * Update timestamp in @from if table is EFF_TYPED.
529 * If table is file-backed and not privately mapped, write through
530 * cache straight to disk.
531 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
532 * Can write at the end of partially cached table.
533 * @from must be fresh; see ef_make_stale().
534 * Return non-zero on success, zero on failure.
537 ef_write(int type, int id, void *from)
542 if (ef_check(type) < 0)
545 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
547 if (CANT_HAPPEN(id < 0))
549 if (CANT_HAPPEN(ep->nent >= 0 && id >= ep->nent))
550 return 0; /* beyond fixed size */
552 if (id >= ep->fids) {
553 /* beyond end of file */
554 if (CANT_HAPPEN((ep->flags & EFF_MEM) || id > ep->fids))
555 return 0; /* not implemented */
556 /* write at end of file extends it */
561 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
562 cachep = ep->cache + (id - ep->baseid) * ep->size;
564 must_be_fresh(ep, from);
568 ep->prewrite(id, cachep, from);
570 if (do_write(ep, from, id, 1) < 0)
573 if (cachep && cachep != from) /* update the cache if necessary */
574 memcpy(cachep, from, ep->size);
580 * @buf is an element of table @type.
581 * @id is its new element ID.
582 * If table is EFF_TYPED, change ID and sequence number stored in @buf.
586 ef_set_uid(int type, void *buf, int uid)
588 struct ef_typedstr *elt;
591 if (ef_check(type) < 0)
594 if (!(ep->flags & EFF_TYPED))
600 elt->seqno = get_seqno(ep, uid);
604 * Are *@a and *@b equal, except for timestamps and such?
607 ef_typedstr_eq(struct ef_typedstr *a, struct ef_typedstr *b)
609 return a->ef_type == b->ef_type
610 && a->seqno == b->seqno
612 && !memcmp((char *)a + sizeof(*a), (char *)b + sizeof(*a),
613 empfile[a->ef_type].size - sizeof(*a));
617 * Return sequence number of element @id in table @ep.
618 * Return zero if table is not EFF_TYPED (it has no sequence number
622 get_seqno(struct empfile *ep, int id)
624 struct ef_typedstr *elt;
626 if (!(ep->flags & EFF_TYPED))
628 if (id < 0 || id >= ep->fids)
630 if (id >= ep->baseid && id < ep->baseid + ep->cids)
631 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
633 /* need a buffer, steal last cache slot */
634 if (ep->cids == ep->csize)
636 elt = (void *)(ep->cache + ep->cids * ep->size);
637 if (do_read(ep, elt, id, 1) < 0)
638 return 0; /* deep trouble */
644 * Increment sequence number in @buf, which is about to be written to @ep.
645 * Do nothing if table is not EFF_TYPED (it has no sequence number
647 * Else, @buf's sequence number must match the one in @ep's cache. If
648 * it doesn't, we're about to clobber a previous write.
651 new_seqno(struct empfile *ep, void *buf)
653 struct ef_typedstr *elt = buf;
656 if (!(ep->flags & EFF_TYPED))
658 old_seqno = get_seqno(ep, elt->uid);
659 CANT_HAPPEN(old_seqno != elt->seqno);
660 elt->seqno = old_seqno + 1;
664 * Make all copies stale.
665 * Only fresh copies may be written back to the cache.
666 * To be called by functions that may yield the processor.
667 * Writing an copy when there has been a yield since it was read is
668 * unsafe, because we could clobber another thread's write then.
669 * Robust code must assume the that any function that may yield does
670 * yield. Marking copies stale there lets us catch unsafe writes.
678 /* Mark copy of an element of table @type in @buf fresh. */
680 ef_mark_fresh(int type, void *buf)
684 if (ef_check(type) < 0)
687 if (!(ep->flags & EFF_TYPED))
689 ((struct ef_typedstr *)buf)->generation = ef_generation;
693 must_be_fresh(struct empfile *ep, void *buf)
695 struct ef_typedstr *elt = buf;
697 if (!(ep->flags & EFF_TYPED))
699 CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
703 * Extend table @type by @count elements.
704 * Any pointers obtained from ef_ptr() become invalid.
705 * Return non-zero on success, zero on failure.
708 ef_extend(int type, int count)
712 if (ef_check(type) < 0)
716 logerror("Can't extend %s, its size is fixed", ep->name);
719 if (!do_extend(ep, count))
727 do_extend(struct empfile *ep, int count)
730 int need_sentinel, i, id;
732 if (CANT_HAPPEN(EF_IS_VIEW(ep->uid)) || count < 0)
736 if (ep->flags & EFF_MEM) {
737 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
738 if (id + count + need_sentinel > ep->csize) {
739 if (ep->flags & EFF_STATIC) {
740 logerror("Can't extend %s beyond %d elements",
741 ep->name, ep->csize - need_sentinel);
744 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
745 logerror("Can't extend %s to %d elements (%s)",
746 ep->name, id + count, strerror(errno));
750 p = ep->cache + id * ep->size;
751 do_blank(ep, p, id, count);
753 if (do_write(ep, p, id, count) < 0)
757 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
758 ep->cids = id + count;
760 /* need a buffer, steal last cache slot */
761 if (ep->cids == ep->csize)
763 p = ep->cache + ep->cids * ep->size;
764 for (i = 0; i < count; i++) {
765 do_blank(ep, p, id + i, 1);
766 if (do_write(ep, p, id + i, 1) < 0)
770 ep->fids = id + count;
775 * Initialize element @id for table @type in @buf.
776 * FIXME pass buffer size!
777 * @buf is marked fresh with ef_mark_fresh().
780 ef_blank(int type, int id, void *buf)
783 struct ef_typedstr *elt;
785 if (ef_check(type) < 0)
788 do_blank(ep, buf, id, 1);
789 if (ep->flags & EFF_TYPED) {
791 elt->seqno = get_seqno(ep, elt->uid);
793 ef_mark_fresh(type, buf);
797 * Initialize @count elements of @ep in @buf, starting with element @id.
800 do_blank(struct empfile *ep, void *buf, int id, int count)
803 struct ef_typedstr *elt;
805 memset(buf, 0, count * ep->size);
806 for (i = 0; i < count; i++) {
807 elt = (struct ef_typedstr *)((char *)buf + i * ep->size);
808 if (ep->flags & EFF_TYPED) {
809 elt->ef_type = ep->uid;
818 * Truncate table @type to @count elements.
819 * Any pointers obtained from ef_ptr() become invalid.
820 * Return non-zero on success, zero on failure.
823 ef_truncate(int type, int count)
828 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
832 logerror("Can't truncate %s, its size is fixed", ep->name);
835 if (CANT_HAPPEN(count < 0 || count > ep->fids))
838 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
839 if (ftruncate(ep->fd, count * ep->size) < 0) {
840 logerror("Can't truncate %s to %d elements (%s)",
841 ep->file, count, strerror(errno));
847 if (ep->flags & EFF_MEM) {
848 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
849 if (!(ep->flags & EFF_STATIC)) {
850 if (!ef_realloc_cache(ep, count + need_sentinel)) {
851 logerror("Can't shrink %s cache after truncate (%s)",
852 ep->name, strerror(errno));
853 /* continue with unshrunk cache */
857 memset(ep->cache + count * ep->size, 0, ep->size);
860 if (ep->baseid >= count)
862 else if (ep->cids > count - ep->baseid)
863 ep->cids = count - ep->baseid;
874 if (ef_check(type) < 0)
876 return empfile[type].cadef;
882 if (ef_check(type) < 0)
884 return empfile[type].fids;
890 if (ef_check(type) < 0)
892 return empfile[type].flags;
898 if (ef_check(type) < 0)
900 if (empfile[type].fd <= 0)
902 return fdate(empfile[type].fd);
906 * Search for a table matching @name, return its table type.
907 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
911 ef_byname(char *name)
913 return stmtch(name, empfile, offsetof(struct empfile, name),
918 * Search @choices[] for a table type matching @name, return it.
919 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
921 * @choices[] must be terminated with a negative value.
924 ef_byname_from(char *name, int choices[])
930 for (p = choices; *p >= 0; p++) {
931 if (ef_check(*p) < 0)
933 switch (mineq(name, empfile[*p].name)) {
949 * Return name of table @type. Always a single, short word.
954 if (ef_check(type) < 0)
956 return empfile[type].name;
960 * Return "pretty" name of table @type.
963 ef_nameof_pretty(int type)
965 if (ef_check(type) < 0)
967 return empfile[type].pretty_name;
973 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
979 * Ensure table @type contains element @id.
980 * If necessary, extend it in steps of @count elements.
981 * Return non-zero on success, zero on failure.
984 ef_ensure_space(int type, int id, int count)
986 if (ef_check(type) < 0 || CANT_HAPPEN(id < 0))
989 while (id >= empfile[type].fids) {
990 if (!ef_extend(type, count))
997 * Return maximum ID acceptable for table @type.
998 * Assuming infinite memory and disk space.
1001 ef_id_limit(int type)
1005 if (ef_check(type) < 0)
1007 ep = &empfile[type];
1009 return ep->nent - 1;
1010 if (ep->flags & EFF_MEM) {
1011 if (ep->flags & EFF_STATIC)
1012 return ep->csize - 1 - ((ep->flags & EFF_SENTINEL) != 0);