2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2008, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2008
41 #include <sys/types.h>
47 #include "prototypes.h"
49 static int ef_realloc_cache(struct empfile *, int);
50 static int fillcache(struct empfile *, int);
51 static int do_read(struct empfile *, void *, int, int);
52 static int do_write(struct empfile *, void *, int, int);
53 static unsigned get_seqno(struct empfile *, int);
54 static void new_seqno(struct empfile *, void *);
55 static void do_blank(struct empfile *, void *, int, int);
56 static int ef_check(int);
59 * Open the file-backed table TYPE (EF_SECTOR, ...).
60 * HOW are flags to control operation. Naturally, immutable flags are
62 * If NELT is non-negative, the table must have that many elements.
63 * Return non-zero on success, zero on failure.
64 * You must call ef_close() before the next ef_open().
67 ef_open(int type, int how, int nelt)
71 int oflags, fd, fsiz, nslots;
73 if (ef_check(type) < 0)
75 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
76 how &= ~EFF_IMMUTABLE;
80 if (CANT_HAPPEN(ep->fd >= 0))
83 if (how & EFF_PRIVATE)
86 oflags |= O_CREAT | O_TRUNC;
90 if ((fd = open(ep->file, oflags, S_IRWUG)) < 0) {
91 logerror("Can't open %s (%s)", ep->file, strerror(errno));
95 lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
96 lock.l_whence = SEEK_SET;
97 lock.l_start = lock.l_len = 0;
98 if (fcntl(fd, F_SETLK, &lock) == -1) {
99 logerror("Can't lock %s (%s)", ep->file, strerror(errno));
106 if (fsiz % ep->size) {
107 logerror("Can't open %s (file size not a multiple of record size %d)",
112 ep->fids = fsiz / ep->size;
113 if (nelt >= 0 && nelt != ep->fids) {
114 logerror("Can't open %s (got %d records instead of %d)",
115 ep->file, ep->fids, nelt);
121 if (ep->flags & EFF_STATIC) {
122 /* ep->cache already points to space for ep->csize elements */
124 if (ep->fids > ep->csize) {
125 logerror("Can't open %s: file larger than %d bytes",
126 ep->file, ep->fids * ep->size);
132 if (CANT_HAPPEN(ep->cache))
137 nslots = blksize(fd) / ep->size;
138 if (!ef_realloc_cache(ep, nslots)) {
139 logerror("Can't map %s (%s)", ep->file, strerror(errno));
146 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
149 /* map file into cache */
150 if ((how & EFF_MEM) && ep->fids) {
151 if (fillcache(ep, 0) != ep->fids) {
152 ep->cids = 0; /* prevent cache flush */
153 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
159 if (ep->onresize && ep->onresize(type) < 0)
165 * Reallocate cache for table EP to hold COUNT slots.
166 * The table must not be allocated statically.
167 * The cache may still be unmapped.
168 * If reallocation succeeds, any pointers obtained from ef_ptr()
170 * If it fails, the cache is unchanged, and errno is set.
171 * Return non-zero on success, zero on failure.
174 ef_realloc_cache(struct empfile *ep, int count)
178 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
180 if (CANT_HAPPEN(count < 0))
184 * Avoid zero slots, because that can lead to null cache, which
185 * would be interpreted as unmapped cache.
189 cache = realloc(ep->cache, count * ep->size);
199 * Open the table TYPE as view of table BASE.
200 * Return non-zero on success, zero on failure.
201 * Beware: views work only as long as BASE doesn't change size!
202 * You must call ef_close(TYPE) before closing BASE.
205 ef_open_view(int type, int base)
209 if (CANT_HAPPEN(!EF_IS_VIEW(type)))
212 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)))
215 ep->cache = empfile[base].cache;
216 ep->csize = empfile[base].csize;
217 ep->flags |= EFF_MEM;
218 ep->baseid = empfile[base].baseid;
219 ep->cids = empfile[base].cids;
220 ep->fids = empfile[base].fids;
225 * Close the file-backed table TYPE (EF_SECTOR, ...).
226 * Return non-zero on success, zero on failure.
234 if (ef_check(type) < 0)
238 if (EF_IS_VIEW(type))
243 ep->flags &= EFF_IMMUTABLE;
244 if (!(ep->flags & EFF_STATIC)) {
248 if (close(ep->fd) < 0) {
249 logerror("Error closing %s (%s)", ep->file, strerror(errno));
254 ep->baseid = ep->cids = ep->fids = 0;
255 if (ep->onresize && ep->onresize(type) < 0)
261 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
262 * Do nothing if the table is privately mapped.
263 * Update timestamps of written elements if table is EFF_TYPED.
264 * Return non-zero on success, zero on failure.
271 if (ef_check(type) < 0)
274 if (ep->flags & EFF_PRIVATE)
275 return 1; /* nothing to do */
276 if (CANT_HAPPEN(ep->fd < 0))
279 * We don't know which cache entries are dirty. ef_write() writes
280 * through, but direct updates through ef_ptr() don't. They are
281 * allowed only with EFF_MEM. Assume the whole cash is dirty
284 if (ep->flags & EFF_MEM) {
285 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
293 * Return pointer to element ID in table TYPE if it exists, else NULL.
294 * The table must be fully cached, i.e. flags & EFF_MEM.
295 * The caller is responsible for flushing changes he makes.
298 ef_ptr(int type, int id)
302 if (ef_check(type) < 0)
305 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
307 if (id < 0 || id >= ep->fids)
309 return ep->cache + ep->size * id;
313 * Read element ID from table TYPE into buffer INTO.
314 * FIXME pass buffer size!
315 * Return non-zero on success, zero on failure.
318 ef_read(int type, int id, void *into)
323 if (ef_check(type) < 0)
326 if (CANT_HAPPEN(!ep->cache))
328 if (id < 0 || id >= ep->fids)
331 if (ep->flags & EFF_MEM) {
332 cachep = ep->cache + id * ep->size;
334 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
335 if (fillcache(ep, id) < 1)
338 cachep = ep->cache + (id - ep->baseid) * ep->size;
340 memcpy(into, cachep, ep->size);
343 ep->postread(id, into);
348 * Fill cache of file-backed EP with elements starting at ID.
349 * If any were read, return their number.
350 * Else return -1 and leave the cache unchanged.
353 fillcache(struct empfile *ep, int id)
357 if (CANT_HAPPEN(!ep->cache))
360 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
370 do_read(struct empfile *ep, void *buf, int id, int count)
375 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
378 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
379 logerror("Error seeking %s to elt %d (%s)",
380 ep->file, id, strerror(errno));
385 n = count * ep->size;
387 ret = read(ep->fd, p, n);
389 if (errno != EINTR) {
390 logerror("Error reading %s elt %d (%s)",
392 id + (int)((p - (char *)buf) / ep->size),
396 } else if (ret == 0) {
397 logerror("Unexpected EOF reading %s elt %d",
398 ep->file, id + (int)((p - (char *)buf) / ep->size));
406 return (p - (char *)buf) / ep->size;
410 * Write COUNT elements starting at ID from BUF to file-backed EP.
411 * Update the timestamp if the table is EFF_TYPED.
412 * Don't actually write if table is privately mapped.
413 * Return 0 on success, -1 on error (file may be corrupt then).
416 do_write(struct empfile *ep, void *buf, int id, int count)
420 struct emptypedstr *elt;
423 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
426 if (ep->flags & EFF_TYPED) {
427 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
428 for (i = 0; i < count; i++) {
430 * TODO Oopses here could be due to bad data corruption.
431 * Fail instead of attempting to recover?
433 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
434 if (CANT_HAPPEN(elt->ef_type != ep->uid))
435 elt->ef_type = ep->uid;
436 if (CANT_HAPPEN(elt->uid != id + i))
438 if (now != (time_t)-1)
439 elt->timestamp = now;
443 if (ep->flags & EFF_PRIVATE)
446 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
447 logerror("Error seeking %s to elt %d (%s)",
448 ep->file, id, strerror(errno));
453 n = count * ep->size;
455 ret = write(ep->fd, p, n);
457 if (errno != EINTR) {
458 logerror("Error writing %s elt %d (%s)",
460 id + (int)((p - (char *)buf) / ep->size),
474 * Write element ID into table TYPE from buffer FROM.
475 * FIXME pass buffer size!
476 * Update timestamp in FROM if table is EFF_TYPED.
477 * If table is file-backed and not privately mapped, write through
478 * cache straight to disk.
479 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
480 * Can write at the end of partially cached table.
481 * Return non-zero on success, zero on failure.
484 ef_write(int type, int id, void *from)
489 if (ef_check(type) < 0)
492 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
494 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
495 return 0; /* not implemented */
497 if (id >= ep->fids) {
498 /* write beyond end of file extends it, take note */
500 if (ep->onresize && ep->onresize(type) < 0)
503 if (id >= ep->baseid && id < ep->baseid + ep->cids)
504 cachep = ep->cache + (id - ep->baseid) * ep->size;
508 ep->prewrite(id, cachep, from);
510 if (do_write(ep, from, id, 1) < 0)
513 if (cachep && cachep != from) /* update the cache if necessary */
514 memcpy(cachep, from, ep->size);
520 * BUF is an element of table TYPE.
521 * ID is its new element ID.
522 * If table is EFF_TYPED, change id and sequence number stored in BUF.
526 ef_set_uid(int type, void *buf, int uid)
528 struct emptypedstr *elt;
531 if (ef_check(type) < 0)
534 if (!(ep->flags & EFF_TYPED))
540 elt->seqno = get_seqno(ep, uid);
544 * Return sequence number of element ID in table EP.
545 * Return zero if table is not EFF_TYPED (it has no sequence number
549 get_seqno(struct empfile *ep, int id)
551 struct emptypedstr *elt;
553 if (!(ep->flags & EFF_TYPED))
555 if (id < 0 || id >= ep->fids)
557 if (id >= ep->baseid && id < ep->baseid + ep->cids)
558 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
560 /* need a buffer, steal last cache slot */
561 if (ep->cids == ep->csize)
563 elt = (void *)(ep->cache + ep->cids * ep->size);
564 if (do_read(ep, elt, id, 1) < 0)
565 return 0; /* deep trouble */
571 * Increment sequence number in BUF, which is about to be written to EP.
572 * Do nothing if table is not EFF_TYPED (it has no sequence number
576 new_seqno(struct empfile *ep, void *buf)
578 struct emptypedstr *elt = buf;
581 if (!(ep->flags & EFF_TYPED))
583 old_seqno = get_seqno(ep, elt->uid);
584 if (CANT_HAPPEN(old_seqno != elt->seqno))
585 old_seqno = MAX(old_seqno, elt->seqno);
586 elt->seqno = old_seqno + 1;
590 * Extend table TYPE by COUNT elements.
591 * Any pointers obtained from ef_ptr() become invalid.
592 * Return non-zero on success, zero on failure.
595 ef_extend(int type, int count)
599 int need_sentinel, i, id;
601 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
604 if (CANT_HAPPEN(count < 0))
608 if (ep->flags & EFF_MEM) {
609 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
610 if (id + count + need_sentinel > ep->csize) {
611 if (ep->flags & EFF_STATIC) {
612 logerror("Can't extend %s beyond %d elements",
613 ep->name, ep->csize - need_sentinel);
616 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
617 logerror("Can't extend %s to %d elements (%s)",
618 ep->name, id + count, strerror(errno));
622 p = ep->cache + id * ep->size;
623 do_blank(ep, p, id, count);
625 if (do_write(ep, p, id, count) < 0)
629 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
630 ep->cids = id + count;
632 /* need a buffer, steal last cache slot */
633 if (ep->cids == ep->csize)
635 p = ep->cache + ep->cids * ep->size;
636 for (i = 0; i < count; i++) {
637 do_blank(ep, p, id + i, 1);
638 if (do_write(ep, p, id + i, 1) < 0)
642 ep->fids = id + count;
643 if (ep->onresize && ep->onresize(type) < 0)
649 * Initialize element ID for EP in BUF.
650 * FIXME pass buffer size!
653 ef_blank(int type, int id, void *buf)
656 struct emptypedstr *elt;
658 if (ef_check(type) < 0)
661 do_blank(ep, buf, id, 1);
662 if (ep->flags & EFF_TYPED) {
664 elt->seqno = get_seqno(ep, elt->uid);
669 * Initialize COUNT elements of EP in BUF, starting with element ID.
672 do_blank(struct empfile *ep, void *buf, int id, int count)
675 struct emptypedstr *elt;
677 memset(buf, 0, count * ep->size);
678 for (i = 0; i < count; i++) {
679 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
680 if (ep->flags & EFF_TYPED) {
681 elt->ef_type = ep->uid;
690 * Truncate table TYPE to COUNT elements.
691 * Any pointers obtained from ef_ptr() become invalid.
692 * Return non-zero on success, zero on failure.
695 ef_truncate(int type, int count)
700 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
703 if (CANT_HAPPEN(count < 0 || count > ep->fids))
706 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
707 if (ftruncate(ep->fd, count * ep->size) < 0) {
708 logerror("Can't truncate %s to %d elements (%s)",
709 ep->file, count, strerror(errno));
715 if (ep->flags & EFF_MEM) {
716 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
717 if (!(ep->flags & EFF_STATIC)) {
718 if (!ef_realloc_cache(ep, count + need_sentinel)) {
719 logerror("Can't shrink %s cache after truncate (%s)",
720 ep->name, strerror(errno));
721 /* continue with unshrunk cache */
725 memset(ep->cache + count * ep->size, 0, ep->size);
728 if (ep->baseid >= count)
730 else if (ep->cids > count - ep->baseid)
731 ep->cids = count - ep->baseid;
734 if (ep->onresize && ep->onresize(type) < 0)
742 if (ef_check(type) < 0)
744 return empfile[type].cadef;
750 if (ef_check(type) < 0)
752 return empfile[type].fids;
758 if (ef_check(type) < 0)
760 return empfile[type].flags;
766 if (ef_check(type) < 0)
768 if (empfile[type].fd <= 0)
770 return fdate(empfile[type].fd);
774 * Search for a table matching NAME, return its table type.
775 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
779 ef_byname(char *name)
781 return stmtch(name, empfile, offsetof(struct empfile, name),
786 * Search CHOICES[] for a table type matching NAME, return it.
787 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
789 * CHOICES[] must be terminated with a negative value.
792 ef_byname_from(char *name, int choices[])
798 for (p = choices; *p >= 0; p++) {
799 if (ef_check(*p) < 0)
801 switch (mineq(name, empfile[*p].name)) {
819 if (ef_check(type) < 0)
820 return "bad ef_type";
821 return empfile[type].name;
827 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
833 * Ensure table contains element ID.
834 * If necessary, extend it in steps of COUNT elements.
835 * Return non-zero on success, zero on failure.
838 ef_ensure_space(int type, int id, int count)
840 if (ef_check(type) < 0)
844 while (id >= empfile[type].fids) {
845 if (!ef_extend(type, count))