2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2009, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2009
41 #include <sys/types.h>
51 #include "prototypes.h"
53 static int open_locked(char *, int, mode_t);
54 static int ef_realloc_cache(struct empfile *, int);
55 static int fillcache(struct empfile *, int);
56 static int do_read(struct empfile *, void *, int, int);
57 static int do_write(struct empfile *, void *, int, int);
58 static unsigned get_seqno(struct empfile *, int);
59 static void new_seqno(struct empfile *, void *);
60 static void must_be_fresh(struct empfile *, void *);
61 static void do_blank(struct empfile *, void *, int, int);
62 static int ef_check(int);
64 static unsigned ef_generation;
67 * Open the file-backed table TYPE (EF_SECTOR, ...).
68 * HOW are flags to control operation. Naturally, immutable flags are
70 * If NELT is non-negative, the table must have that many elements.
71 * Return non-zero on success, zero on failure.
72 * You must call ef_close() before the next ef_open().
75 ef_open(int type, int how, int nelt)
78 int oflags, fd, fsiz, nslots;
80 if (ef_check(type) < 0)
82 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
83 how &= ~EFF_IMMUTABLE;
87 if (CANT_HAPPEN(ep->fd >= 0))
90 if (how & EFF_PRIVATE)
93 oflags |= O_CREAT | O_TRUNC;
94 fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
96 logerror("Can't open %s (%s)", ep->file, strerror(errno));
102 if (fsiz % ep->size) {
103 logerror("Can't open %s (file size not a multiple of record size %d)",
108 ep->fids = fsiz / ep->size;
109 if (nelt >= 0 && nelt != ep->fids) {
110 logerror("Can't open %s (got %d records instead of %d)",
111 ep->file, ep->fids, nelt);
117 if (ep->flags & EFF_STATIC) {
118 /* ep->cache already points to space for ep->csize elements */
120 if (ep->fids > ep->csize) {
121 logerror("Can't open %s: file larger than %d bytes",
122 ep->file, ep->fids * ep->size);
128 if (CANT_HAPPEN(ep->cache))
133 nslots = blksize(fd) / ep->size;
134 if (!ef_realloc_cache(ep, nslots)) {
135 logerror("Can't map %s (%s)", ep->file, strerror(errno));
142 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
145 /* map file into cache */
146 if ((how & EFF_MEM) && ep->fids) {
147 if (fillcache(ep, 0) != ep->fids) {
148 ep->cids = 0; /* prevent cache flush */
149 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
155 if (ep->onresize && ep->onresize(type) < 0)
161 open_locked(char *name, int oflags, mode_t mode)
163 int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
167 fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
174 fd = open(name, oflags, mode);
178 lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
179 lock.l_whence = SEEK_SET;
180 lock.l_start = lock.l_len = 0;
181 if (fcntl(fd, F_SETLK, &lock) == -1) {
190 * Reallocate cache for table EP to hold COUNT slots.
191 * The table must not be allocated statically.
192 * The cache may still be unmapped.
193 * If reallocation succeeds, any pointers obtained from ef_ptr()
195 * If it fails, the cache is unchanged, and errno is set.
196 * Return non-zero on success, zero on failure.
199 ef_realloc_cache(struct empfile *ep, int count)
203 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
205 if (CANT_HAPPEN(count < 0))
209 * Avoid zero slots, because that can lead to null cache, which
210 * would be interpreted as unmapped cache.
214 cache = realloc(ep->cache, count * ep->size);
224 * Open the table TYPE as view of table BASE.
225 * Return non-zero on success, zero on failure.
226 * Beware: views work only as long as BASE doesn't change size!
227 * You must call ef_close(TYPE) before closing BASE.
230 ef_open_view(int type, int base)
234 if (CANT_HAPPEN(!EF_IS_VIEW(type)))
237 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)))
240 ep->cache = empfile[base].cache;
241 ep->csize = empfile[base].csize;
242 ep->flags |= EFF_MEM;
243 ep->baseid = empfile[base].baseid;
244 ep->cids = empfile[base].cids;
245 ep->fids = empfile[base].fids;
250 * Close the file-backed table TYPE (EF_SECTOR, ...).
251 * Return non-zero on success, zero on failure.
259 if (ef_check(type) < 0)
263 if (EF_IS_VIEW(type))
268 ep->flags &= EFF_IMMUTABLE;
269 if (!(ep->flags & EFF_STATIC)) {
273 if (close(ep->fd) < 0) {
274 logerror("Error closing %s (%s)", ep->file, strerror(errno));
279 ep->baseid = ep->cids = ep->fids = 0;
280 if (ep->onresize && ep->onresize(type) < 0)
286 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
287 * Do nothing if the table is privately mapped.
288 * Update timestamps of written elements if table is EFF_TYPED.
289 * Return non-zero on success, zero on failure.
296 if (ef_check(type) < 0)
299 if (ep->flags & EFF_PRIVATE)
300 return 1; /* nothing to do */
301 if (CANT_HAPPEN(ep->fd < 0))
304 * We don't know which cache entries are dirty. ef_write() writes
305 * through, but direct updates through ef_ptr() don't. They are
306 * allowed only with EFF_MEM. Assume the whole cash is dirty
309 if (ep->flags & EFF_MEM) {
310 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
318 * Return pointer to element ID in table TYPE if it exists, else NULL.
319 * The table must be fully cached, i.e. flags & EFF_MEM.
320 * The caller is responsible for flushing changes he makes.
323 ef_ptr(int type, int id)
327 if (ef_check(type) < 0)
330 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
332 if (id < 0 || id >= ep->fids)
334 return ep->cache + ep->size * id;
338 * Read element ID from table TYPE into buffer INTO.
339 * FIXME pass buffer size!
340 * Return non-zero on success, zero on failure.
343 ef_read(int type, int id, void *into)
348 if (ef_check(type) < 0)
351 if (CANT_HAPPEN(!ep->cache))
353 if (id < 0 || id >= ep->fids)
356 if (ep->flags & EFF_MEM) {
357 cachep = ep->cache + id * ep->size;
359 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
360 if (fillcache(ep, id) < 1)
363 cachep = ep->cache + (id - ep->baseid) * ep->size;
365 memcpy(into, cachep, ep->size);
366 ef_mark_fresh(type, into);
369 ep->postread(id, into);
374 * Fill cache of file-backed EP with elements starting at ID.
375 * If any were read, return their number.
376 * Else return -1 and leave the cache unchanged.
379 fillcache(struct empfile *ep, int id)
383 if (CANT_HAPPEN(!ep->cache))
386 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
396 do_read(struct empfile *ep, void *buf, int id, int count)
401 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
404 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
405 logerror("Error seeking %s to elt %d (%s)",
406 ep->file, id, strerror(errno));
411 n = count * ep->size;
413 ret = read(ep->fd, p, n);
415 if (errno != EINTR) {
416 logerror("Error reading %s elt %d (%s)",
418 id + (int)((p - (char *)buf) / ep->size),
422 } else if (ret == 0) {
423 logerror("Unexpected EOF reading %s elt %d",
424 ep->file, id + (int)((p - (char *)buf) / ep->size));
432 return (p - (char *)buf) / ep->size;
436 * Write COUNT elements starting at ID from BUF to file-backed EP.
437 * Update the timestamp if the table is EFF_TYPED.
438 * Don't actually write if table is privately mapped.
439 * Return 0 on success, -1 on error (file may be corrupt then).
442 do_write(struct empfile *ep, void *buf, int id, int count)
446 struct emptypedstr *elt;
449 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
452 if (ep->flags & EFF_TYPED) {
453 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
454 for (i = 0; i < count; i++) {
456 * TODO Oopses here could be due to bad data corruption.
457 * Fail instead of attempting to recover?
459 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
460 if (CANT_HAPPEN(elt->ef_type != ep->uid))
461 elt->ef_type = ep->uid;
462 if (CANT_HAPPEN(elt->uid != id + i))
464 if (now != (time_t)-1)
465 elt->timestamp = now;
469 if (ep->flags & EFF_PRIVATE)
472 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
473 logerror("Error seeking %s to elt %d (%s)",
474 ep->file, id, strerror(errno));
479 n = count * ep->size;
481 ret = write(ep->fd, p, n);
483 if (errno != EINTR) {
484 logerror("Error writing %s elt %d (%s)",
486 id + (int)((p - (char *)buf) / ep->size),
500 * Write element ID into table TYPE from buffer FROM.
501 * FIXME pass buffer size!
502 * Update timestamp in FROM if table is EFF_TYPED.
503 * If table is file-backed and not privately mapped, write through
504 * cache straight to disk.
505 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
506 * Can write at the end of partially cached table.
507 * Return non-zero on success, zero on failure.
510 ef_write(int type, int id, void *from)
515 if (ef_check(type) < 0)
518 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
520 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
521 return 0; /* not implemented */
523 if (id >= ep->fids) {
524 /* write beyond end of file extends it, take note */
526 if (ep->onresize && ep->onresize(type) < 0)
529 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
530 cachep = ep->cache + (id - ep->baseid) * ep->size;
532 must_be_fresh(ep, from);
536 ep->prewrite(id, cachep, from);
538 if (do_write(ep, from, id, 1) < 0)
541 if (cachep && cachep != from) /* update the cache if necessary */
542 memcpy(cachep, from, ep->size);
548 * BUF is an element of table TYPE.
549 * ID is its new element ID.
550 * If table is EFF_TYPED, change id and sequence number stored in BUF.
554 ef_set_uid(int type, void *buf, int uid)
556 struct emptypedstr *elt;
559 if (ef_check(type) < 0)
562 if (!(ep->flags & EFF_TYPED))
568 elt->seqno = get_seqno(ep, uid);
572 * Return sequence number of element ID in table EP.
573 * Return zero if table is not EFF_TYPED (it has no sequence number
577 get_seqno(struct empfile *ep, int id)
579 struct emptypedstr *elt;
581 if (!(ep->flags & EFF_TYPED))
583 if (id < 0 || id >= ep->fids)
585 if (id >= ep->baseid && id < ep->baseid + ep->cids)
586 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
588 /* need a buffer, steal last cache slot */
589 if (ep->cids == ep->csize)
591 elt = (void *)(ep->cache + ep->cids * ep->size);
592 if (do_read(ep, elt, id, 1) < 0)
593 return 0; /* deep trouble */
599 * Increment sequence number in BUF, which is about to be written to EP.
600 * Do nothing if table is not EFF_TYPED (it has no sequence number
604 new_seqno(struct empfile *ep, void *buf)
606 struct emptypedstr *elt = buf;
609 if (!(ep->flags & EFF_TYPED))
611 old_seqno = get_seqno(ep, elt->uid);
612 CANT_HAPPEN(old_seqno != elt->seqno);
613 elt->seqno = old_seqno + 1;
623 ef_mark_fresh(int type, void *buf)
627 if (ef_check(type) < 0)
630 if (!(ep->flags & EFF_TYPED))
632 ((struct emptypedstr *)buf)->generation = ef_generation;
636 must_be_fresh(struct empfile *ep, void *buf)
638 struct emptypedstr *elt = buf;
640 if (!(ep->flags & EFF_TYPED))
642 CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
646 * Extend table TYPE by COUNT elements.
647 * Any pointers obtained from ef_ptr() become invalid.
648 * Return non-zero on success, zero on failure.
651 ef_extend(int type, int count)
655 int need_sentinel, i, id;
657 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
660 if (CANT_HAPPEN(count < 0))
664 if (ep->flags & EFF_MEM) {
665 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
666 if (id + count + need_sentinel > ep->csize) {
667 if (ep->flags & EFF_STATIC) {
668 logerror("Can't extend %s beyond %d elements",
669 ep->name, ep->csize - need_sentinel);
672 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
673 logerror("Can't extend %s to %d elements (%s)",
674 ep->name, id + count, strerror(errno));
678 p = ep->cache + id * ep->size;
679 do_blank(ep, p, id, count);
681 if (do_write(ep, p, id, count) < 0)
685 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
686 ep->cids = id + count;
688 /* need a buffer, steal last cache slot */
689 if (ep->cids == ep->csize)
691 p = ep->cache + ep->cids * ep->size;
692 for (i = 0; i < count; i++) {
693 do_blank(ep, p, id + i, 1);
694 if (do_write(ep, p, id + i, 1) < 0)
698 ep->fids = id + count;
699 if (ep->onresize && ep->onresize(type) < 0)
705 * Initialize element ID for EP in BUF.
706 * FIXME pass buffer size!
709 ef_blank(int type, int id, void *buf)
712 struct emptypedstr *elt;
714 if (ef_check(type) < 0)
717 do_blank(ep, buf, id, 1);
718 if (ep->flags & EFF_TYPED) {
720 elt->seqno = get_seqno(ep, elt->uid);
722 ef_mark_fresh(type, buf);
726 * Initialize COUNT elements of EP in BUF, starting with element ID.
729 do_blank(struct empfile *ep, void *buf, int id, int count)
732 struct emptypedstr *elt;
734 memset(buf, 0, count * ep->size);
735 for (i = 0; i < count; i++) {
736 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
737 if (ep->flags & EFF_TYPED) {
738 elt->ef_type = ep->uid;
747 * Truncate table TYPE to COUNT elements.
748 * Any pointers obtained from ef_ptr() become invalid.
749 * Return non-zero on success, zero on failure.
752 ef_truncate(int type, int count)
757 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
760 if (CANT_HAPPEN(count < 0 || count > ep->fids))
763 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
764 if (ftruncate(ep->fd, count * ep->size) < 0) {
765 logerror("Can't truncate %s to %d elements (%s)",
766 ep->file, count, strerror(errno));
772 if (ep->flags & EFF_MEM) {
773 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
774 if (!(ep->flags & EFF_STATIC)) {
775 if (!ef_realloc_cache(ep, count + need_sentinel)) {
776 logerror("Can't shrink %s cache after truncate (%s)",
777 ep->name, strerror(errno));
778 /* continue with unshrunk cache */
782 memset(ep->cache + count * ep->size, 0, ep->size);
785 if (ep->baseid >= count)
787 else if (ep->cids > count - ep->baseid)
788 ep->cids = count - ep->baseid;
791 if (ep->onresize && ep->onresize(type) < 0)
799 if (ef_check(type) < 0)
801 return empfile[type].cadef;
807 if (ef_check(type) < 0)
809 return empfile[type].fids;
815 if (ef_check(type) < 0)
817 return empfile[type].flags;
823 if (ef_check(type) < 0)
825 if (empfile[type].fd <= 0)
827 return fdate(empfile[type].fd);
831 * Search for a table matching NAME, return its table type.
832 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
836 ef_byname(char *name)
838 return stmtch(name, empfile, offsetof(struct empfile, name),
843 * Search CHOICES[] for a table type matching NAME, return it.
844 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
846 * CHOICES[] must be terminated with a negative value.
849 ef_byname_from(char *name, int choices[])
855 for (p = choices; *p >= 0; p++) {
856 if (ef_check(*p) < 0)
858 switch (mineq(name, empfile[*p].name)) {
876 if (ef_check(type) < 0)
877 return "bad ef_type";
878 return empfile[type].name;
884 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
890 * Ensure table contains element ID.
891 * If necessary, extend it in steps of COUNT elements.
892 * Return non-zero on success, zero on failure.
895 ef_ensure_space(int type, int id, int count)
897 if (ef_check(type) < 0)
901 while (id >= empfile[type].fids) {
902 if (!ef_extend(type, count))