2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2009, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2008
41 #include <sys/types.h>
47 #include "prototypes.h"
49 static int ef_realloc_cache(struct empfile *, int);
50 static int fillcache(struct empfile *, int);
51 static int do_read(struct empfile *, void *, int, int);
52 static int do_write(struct empfile *, void *, int, int);
53 static unsigned get_seqno(struct empfile *, int);
54 static void new_seqno(struct empfile *, void *);
55 static void must_be_fresh(struct empfile *, void *);
56 static void do_blank(struct empfile *, void *, int, int);
57 static int ef_check(int);
59 static unsigned ef_generation;
62 * Open the file-backed table TYPE (EF_SECTOR, ...).
63 * HOW are flags to control operation. Naturally, immutable flags are
65 * If NELT is non-negative, the table must have that many elements.
66 * Return non-zero on success, zero on failure.
67 * You must call ef_close() before the next ef_open().
70 ef_open(int type, int how, int nelt)
74 int oflags, fd, fsiz, nslots;
76 if (ef_check(type) < 0)
78 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
79 how &= ~EFF_IMMUTABLE;
83 if (CANT_HAPPEN(ep->fd >= 0))
86 if (how & EFF_PRIVATE)
89 oflags |= O_CREAT | O_TRUNC;
93 if ((fd = open(ep->file, oflags, S_IRWUG)) < 0) {
94 logerror("Can't open %s (%s)", ep->file, strerror(errno));
98 lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
99 lock.l_whence = SEEK_SET;
100 lock.l_start = lock.l_len = 0;
101 if (fcntl(fd, F_SETLK, &lock) == -1) {
102 logerror("Can't lock %s (%s)", ep->file, strerror(errno));
109 if (fsiz % ep->size) {
110 logerror("Can't open %s (file size not a multiple of record size %d)",
115 ep->fids = fsiz / ep->size;
116 if (nelt >= 0 && nelt != ep->fids) {
117 logerror("Can't open %s (got %d records instead of %d)",
118 ep->file, ep->fids, nelt);
124 if (ep->flags & EFF_STATIC) {
125 /* ep->cache already points to space for ep->csize elements */
127 if (ep->fids > ep->csize) {
128 logerror("Can't open %s: file larger than %d bytes",
129 ep->file, ep->fids * ep->size);
135 if (CANT_HAPPEN(ep->cache))
140 nslots = blksize(fd) / ep->size;
141 if (!ef_realloc_cache(ep, nslots)) {
142 logerror("Can't map %s (%s)", ep->file, strerror(errno));
149 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
152 /* map file into cache */
153 if ((how & EFF_MEM) && ep->fids) {
154 if (fillcache(ep, 0) != ep->fids) {
155 ep->cids = 0; /* prevent cache flush */
156 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
162 if (ep->onresize && ep->onresize(type) < 0)
168 * Reallocate cache for table EP to hold COUNT slots.
169 * The table must not be allocated statically.
170 * The cache may still be unmapped.
171 * If reallocation succeeds, any pointers obtained from ef_ptr()
173 * If it fails, the cache is unchanged, and errno is set.
174 * Return non-zero on success, zero on failure.
177 ef_realloc_cache(struct empfile *ep, int count)
181 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
183 if (CANT_HAPPEN(count < 0))
187 * Avoid zero slots, because that can lead to null cache, which
188 * would be interpreted as unmapped cache.
192 cache = realloc(ep->cache, count * ep->size);
202 * Open the table TYPE as view of table BASE.
203 * Return non-zero on success, zero on failure.
204 * Beware: views work only as long as BASE doesn't change size!
205 * You must call ef_close(TYPE) before closing BASE.
208 ef_open_view(int type, int base)
212 if (CANT_HAPPEN(!EF_IS_VIEW(type)))
215 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)))
218 ep->cache = empfile[base].cache;
219 ep->csize = empfile[base].csize;
220 ep->flags |= EFF_MEM;
221 ep->baseid = empfile[base].baseid;
222 ep->cids = empfile[base].cids;
223 ep->fids = empfile[base].fids;
228 * Close the file-backed table TYPE (EF_SECTOR, ...).
229 * Return non-zero on success, zero on failure.
237 if (ef_check(type) < 0)
241 if (EF_IS_VIEW(type))
246 ep->flags &= EFF_IMMUTABLE;
247 if (!(ep->flags & EFF_STATIC)) {
251 if (close(ep->fd) < 0) {
252 logerror("Error closing %s (%s)", ep->file, strerror(errno));
257 ep->baseid = ep->cids = ep->fids = 0;
258 if (ep->onresize && ep->onresize(type) < 0)
264 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
265 * Do nothing if the table is privately mapped.
266 * Update timestamps of written elements if table is EFF_TYPED.
267 * Return non-zero on success, zero on failure.
274 if (ef_check(type) < 0)
277 if (ep->flags & EFF_PRIVATE)
278 return 1; /* nothing to do */
279 if (CANT_HAPPEN(ep->fd < 0))
282 * We don't know which cache entries are dirty. ef_write() writes
283 * through, but direct updates through ef_ptr() don't. They are
284 * allowed only with EFF_MEM. Assume the whole cash is dirty
287 if (ep->flags & EFF_MEM) {
288 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
296 * Return pointer to element ID in table TYPE if it exists, else NULL.
297 * The table must be fully cached, i.e. flags & EFF_MEM.
298 * The caller is responsible for flushing changes he makes.
301 ef_ptr(int type, int id)
305 if (ef_check(type) < 0)
308 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
310 if (id < 0 || id >= ep->fids)
312 return ep->cache + ep->size * id;
316 * Read element ID from table TYPE into buffer INTO.
317 * FIXME pass buffer size!
318 * Return non-zero on success, zero on failure.
321 ef_read(int type, int id, void *into)
326 if (ef_check(type) < 0)
329 if (CANT_HAPPEN(!ep->cache))
331 if (id < 0 || id >= ep->fids)
334 if (ep->flags & EFF_MEM) {
335 cachep = ep->cache + id * ep->size;
337 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
338 if (fillcache(ep, id) < 1)
341 cachep = ep->cache + (id - ep->baseid) * ep->size;
343 memcpy(into, cachep, ep->size);
344 ef_mark_fresh(type, into);
347 ep->postread(id, into);
352 * Fill cache of file-backed EP with elements starting at ID.
353 * If any were read, return their number.
354 * Else return -1 and leave the cache unchanged.
357 fillcache(struct empfile *ep, int id)
361 if (CANT_HAPPEN(!ep->cache))
364 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
374 do_read(struct empfile *ep, void *buf, int id, int count)
379 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
382 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
383 logerror("Error seeking %s to elt %d (%s)",
384 ep->file, id, strerror(errno));
389 n = count * ep->size;
391 ret = read(ep->fd, p, n);
393 if (errno != EINTR) {
394 logerror("Error reading %s elt %d (%s)",
396 id + (int)((p - (char *)buf) / ep->size),
400 } else if (ret == 0) {
401 logerror("Unexpected EOF reading %s elt %d",
402 ep->file, id + (int)((p - (char *)buf) / ep->size));
410 return (p - (char *)buf) / ep->size;
414 * Write COUNT elements starting at ID from BUF to file-backed EP.
415 * Update the timestamp if the table is EFF_TYPED.
416 * Don't actually write if table is privately mapped.
417 * Return 0 on success, -1 on error (file may be corrupt then).
420 do_write(struct empfile *ep, void *buf, int id, int count)
424 struct emptypedstr *elt;
427 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
430 if (ep->flags & EFF_TYPED) {
431 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
432 for (i = 0; i < count; i++) {
434 * TODO Oopses here could be due to bad data corruption.
435 * Fail instead of attempting to recover?
437 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
438 if (CANT_HAPPEN(elt->ef_type != ep->uid))
439 elt->ef_type = ep->uid;
440 if (CANT_HAPPEN(elt->uid != id + i))
442 if (now != (time_t)-1)
443 elt->timestamp = now;
447 if (ep->flags & EFF_PRIVATE)
450 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
451 logerror("Error seeking %s to elt %d (%s)",
452 ep->file, id, strerror(errno));
457 n = count * ep->size;
459 ret = write(ep->fd, p, n);
461 if (errno != EINTR) {
462 logerror("Error writing %s elt %d (%s)",
464 id + (int)((p - (char *)buf) / ep->size),
478 * Write element ID into table TYPE from buffer FROM.
479 * FIXME pass buffer size!
480 * Update timestamp in FROM if table is EFF_TYPED.
481 * If table is file-backed and not privately mapped, write through
482 * cache straight to disk.
483 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
484 * Can write at the end of partially cached table.
485 * Return non-zero on success, zero on failure.
488 ef_write(int type, int id, void *from)
493 if (ef_check(type) < 0)
496 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
498 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
499 return 0; /* not implemented */
501 if (id >= ep->fids) {
502 /* write beyond end of file extends it, take note */
504 if (ep->onresize && ep->onresize(type) < 0)
507 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
508 cachep = ep->cache + (id - ep->baseid) * ep->size;
510 must_be_fresh(ep, from);
514 ep->prewrite(id, cachep, from);
516 if (do_write(ep, from, id, 1) < 0)
519 if (cachep && cachep != from) /* update the cache if necessary */
520 memcpy(cachep, from, ep->size);
526 * BUF is an element of table TYPE.
527 * ID is its new element ID.
528 * If table is EFF_TYPED, change id and sequence number stored in BUF.
532 ef_set_uid(int type, void *buf, int uid)
534 struct emptypedstr *elt;
537 if (ef_check(type) < 0)
540 if (!(ep->flags & EFF_TYPED))
546 elt->seqno = get_seqno(ep, uid);
550 * Return sequence number of element ID in table EP.
551 * Return zero if table is not EFF_TYPED (it has no sequence number
555 get_seqno(struct empfile *ep, int id)
557 struct emptypedstr *elt;
559 if (!(ep->flags & EFF_TYPED))
561 if (id < 0 || id >= ep->fids)
563 if (id >= ep->baseid && id < ep->baseid + ep->cids)
564 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
566 /* need a buffer, steal last cache slot */
567 if (ep->cids == ep->csize)
569 elt = (void *)(ep->cache + ep->cids * ep->size);
570 if (do_read(ep, elt, id, 1) < 0)
571 return 0; /* deep trouble */
577 * Increment sequence number in BUF, which is about to be written to EP.
578 * Do nothing if table is not EFF_TYPED (it has no sequence number
582 new_seqno(struct empfile *ep, void *buf)
584 struct emptypedstr *elt = buf;
587 if (!(ep->flags & EFF_TYPED))
589 old_seqno = get_seqno(ep, elt->uid);
590 if (CANT_HAPPEN(old_seqno != elt->seqno))
591 old_seqno = MAX(old_seqno, elt->seqno);
592 elt->seqno = old_seqno + 1;
602 ef_mark_fresh(int type, void *buf)
606 if (ef_check(type) < 0)
609 if (!(ep->flags & EFF_TYPED))
611 ((struct emptypedstr *)buf)->generation = ef_generation;
615 must_be_fresh(struct empfile *ep, void *buf)
617 struct emptypedstr *elt = buf;
619 if (!(ep->flags & EFF_TYPED))
621 CANT_HAPPEN(elt->generation != ef_generation);
625 * Extend table TYPE by COUNT elements.
626 * Any pointers obtained from ef_ptr() become invalid.
627 * Return non-zero on success, zero on failure.
630 ef_extend(int type, int count)
634 int need_sentinel, i, id;
636 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
639 if (CANT_HAPPEN(count < 0))
643 if (ep->flags & EFF_MEM) {
644 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
645 if (id + count + need_sentinel > ep->csize) {
646 if (ep->flags & EFF_STATIC) {
647 logerror("Can't extend %s beyond %d elements",
648 ep->name, ep->csize - need_sentinel);
651 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
652 logerror("Can't extend %s to %d elements (%s)",
653 ep->name, id + count, strerror(errno));
657 p = ep->cache + id * ep->size;
658 do_blank(ep, p, id, count);
660 if (do_write(ep, p, id, count) < 0)
664 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
665 ep->cids = id + count;
667 /* need a buffer, steal last cache slot */
668 if (ep->cids == ep->csize)
670 p = ep->cache + ep->cids * ep->size;
671 for (i = 0; i < count; i++) {
672 do_blank(ep, p, id + i, 1);
673 if (do_write(ep, p, id + i, 1) < 0)
677 ep->fids = id + count;
678 if (ep->onresize && ep->onresize(type) < 0)
684 * Initialize element ID for EP in BUF.
685 * FIXME pass buffer size!
688 ef_blank(int type, int id, void *buf)
691 struct emptypedstr *elt;
693 if (ef_check(type) < 0)
696 do_blank(ep, buf, id, 1);
697 if (ep->flags & EFF_TYPED) {
699 elt->seqno = get_seqno(ep, elt->uid);
701 ef_mark_fresh(type, buf);
705 * Initialize COUNT elements of EP in BUF, starting with element ID.
708 do_blank(struct empfile *ep, void *buf, int id, int count)
711 struct emptypedstr *elt;
713 memset(buf, 0, count * ep->size);
714 for (i = 0; i < count; i++) {
715 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
716 if (ep->flags & EFF_TYPED) {
717 elt->ef_type = ep->uid;
726 * Truncate table TYPE to COUNT elements.
727 * Any pointers obtained from ef_ptr() become invalid.
728 * Return non-zero on success, zero on failure.
731 ef_truncate(int type, int count)
736 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
739 if (CANT_HAPPEN(count < 0 || count > ep->fids))
742 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
743 if (ftruncate(ep->fd, count * ep->size) < 0) {
744 logerror("Can't truncate %s to %d elements (%s)",
745 ep->file, count, strerror(errno));
751 if (ep->flags & EFF_MEM) {
752 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
753 if (!(ep->flags & EFF_STATIC)) {
754 if (!ef_realloc_cache(ep, count + need_sentinel)) {
755 logerror("Can't shrink %s cache after truncate (%s)",
756 ep->name, strerror(errno));
757 /* continue with unshrunk cache */
761 memset(ep->cache + count * ep->size, 0, ep->size);
764 if (ep->baseid >= count)
766 else if (ep->cids > count - ep->baseid)
767 ep->cids = count - ep->baseid;
770 if (ep->onresize && ep->onresize(type) < 0)
778 if (ef_check(type) < 0)
780 return empfile[type].cadef;
786 if (ef_check(type) < 0)
788 return empfile[type].fids;
794 if (ef_check(type) < 0)
796 return empfile[type].flags;
802 if (ef_check(type) < 0)
804 if (empfile[type].fd <= 0)
806 return fdate(empfile[type].fd);
810 * Search for a table matching NAME, return its table type.
811 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
815 ef_byname(char *name)
817 return stmtch(name, empfile, offsetof(struct empfile, name),
822 * Search CHOICES[] for a table type matching NAME, return it.
823 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
825 * CHOICES[] must be terminated with a negative value.
828 ef_byname_from(char *name, int choices[])
834 for (p = choices; *p >= 0; p++) {
835 if (ef_check(*p) < 0)
837 switch (mineq(name, empfile[*p].name)) {
855 if (ef_check(type) < 0)
856 return "bad ef_type";
857 return empfile[type].name;
863 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
869 * Ensure table contains element ID.
870 * If necessary, extend it in steps of COUNT elements.
871 * Return non-zero on success, zero on failure.
874 ef_ensure_space(int type, int id, int count)
876 if (ef_check(type) < 0)
880 while (id >= empfile[type].fids) {
881 if (!ef_extend(type, count))