2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2009, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2008
41 #include <sys/types.h>
47 #include "prototypes.h"
49 static int ef_realloc_cache(struct empfile *, int);
50 static int fillcache(struct empfile *, int);
51 static int do_read(struct empfile *, void *, int, int);
52 static int do_write(struct empfile *, void *, int, int);
53 static unsigned get_seqno(struct empfile *, int);
54 static void new_seqno(struct empfile *, void *);
55 static void do_blank(struct empfile *, void *, int, int);
56 static int ef_check(int);
59 * Open the file-backed table TYPE (EF_SECTOR, ...).
60 * HOW are flags to control operation. Naturally, immutable flags are
62 * If NELT is non-negative, the table must have that many elements.
63 * Return non-zero on success, zero on failure.
64 * You must call ef_close() before the next ef_open().
67 ef_open(int type, int how, int nelt)
71 int oflags, fd, fsiz, nslots;
73 if (ef_check(type) < 0)
75 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
76 how &= ~EFF_IMMUTABLE;
80 if (CANT_HAPPEN(ep->fd >= 0))
83 if (how & EFF_PRIVATE)
86 oflags |= O_CREAT | O_TRUNC;
90 fd = open(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
92 logerror("Can't open %s (%s)", ep->file, strerror(errno));
96 lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
97 lock.l_whence = SEEK_SET;
98 lock.l_start = lock.l_len = 0;
99 if (fcntl(fd, F_SETLK, &lock) == -1) {
100 logerror("Can't lock %s (%s)", ep->file, strerror(errno));
107 if (fsiz % ep->size) {
108 logerror("Can't open %s (file size not a multiple of record size %d)",
113 ep->fids = fsiz / ep->size;
114 if (nelt >= 0 && nelt != ep->fids) {
115 logerror("Can't open %s (got %d records instead of %d)",
116 ep->file, ep->fids, nelt);
122 if (ep->flags & EFF_STATIC) {
123 /* ep->cache already points to space for ep->csize elements */
125 if (ep->fids > ep->csize) {
126 logerror("Can't open %s: file larger than %d bytes",
127 ep->file, ep->fids * ep->size);
133 if (CANT_HAPPEN(ep->cache))
138 nslots = blksize(fd) / ep->size;
139 if (!ef_realloc_cache(ep, nslots)) {
140 logerror("Can't map %s (%s)", ep->file, strerror(errno));
147 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
150 /* map file into cache */
151 if ((how & EFF_MEM) && ep->fids) {
152 if (fillcache(ep, 0) != ep->fids) {
153 ep->cids = 0; /* prevent cache flush */
154 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
160 if (ep->onresize && ep->onresize(type) < 0)
166 * Reallocate cache for table EP to hold COUNT slots.
167 * The table must not be allocated statically.
168 * The cache may still be unmapped.
169 * If reallocation succeeds, any pointers obtained from ef_ptr()
171 * If it fails, the cache is unchanged, and errno is set.
172 * Return non-zero on success, zero on failure.
175 ef_realloc_cache(struct empfile *ep, int count)
179 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
181 if (CANT_HAPPEN(count < 0))
185 * Avoid zero slots, because that can lead to null cache, which
186 * would be interpreted as unmapped cache.
190 cache = realloc(ep->cache, count * ep->size);
200 * Open the table TYPE as view of table BASE.
201 * Return non-zero on success, zero on failure.
202 * Beware: views work only as long as BASE doesn't change size!
203 * You must call ef_close(TYPE) before closing BASE.
206 ef_open_view(int type, int base)
210 if (CANT_HAPPEN(!EF_IS_VIEW(type)))
213 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)))
216 ep->cache = empfile[base].cache;
217 ep->csize = empfile[base].csize;
218 ep->flags |= EFF_MEM;
219 ep->baseid = empfile[base].baseid;
220 ep->cids = empfile[base].cids;
221 ep->fids = empfile[base].fids;
226 * Close the file-backed table TYPE (EF_SECTOR, ...).
227 * Return non-zero on success, zero on failure.
235 if (ef_check(type) < 0)
239 if (EF_IS_VIEW(type))
244 ep->flags &= EFF_IMMUTABLE;
245 if (!(ep->flags & EFF_STATIC)) {
249 if (close(ep->fd) < 0) {
250 logerror("Error closing %s (%s)", ep->file, strerror(errno));
255 ep->baseid = ep->cids = ep->fids = 0;
256 if (ep->onresize && ep->onresize(type) < 0)
262 * Flush file-backed table TYPE (EF_SECTOR, ...) to its backing file.
263 * Do nothing if the table is privately mapped.
264 * Update timestamps of written elements if table is EFF_TYPED.
265 * Return non-zero on success, zero on failure.
272 if (ef_check(type) < 0)
275 if (ep->flags & EFF_PRIVATE)
276 return 1; /* nothing to do */
277 if (CANT_HAPPEN(ep->fd < 0))
280 * We don't know which cache entries are dirty. ef_write() writes
281 * through, but direct updates through ef_ptr() don't. They are
282 * allowed only with EFF_MEM. Assume the whole cash is dirty
285 if (ep->flags & EFF_MEM) {
286 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
294 * Return pointer to element ID in table TYPE if it exists, else NULL.
295 * The table must be fully cached, i.e. flags & EFF_MEM.
296 * The caller is responsible for flushing changes he makes.
299 ef_ptr(int type, int id)
303 if (ef_check(type) < 0)
306 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
308 if (id < 0 || id >= ep->fids)
310 return ep->cache + ep->size * id;
314 * Read element ID from table TYPE into buffer INTO.
315 * FIXME pass buffer size!
316 * Return non-zero on success, zero on failure.
319 ef_read(int type, int id, void *into)
324 if (ef_check(type) < 0)
327 if (CANT_HAPPEN(!ep->cache))
329 if (id < 0 || id >= ep->fids)
332 if (ep->flags & EFF_MEM) {
333 cachep = ep->cache + id * ep->size;
335 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
336 if (fillcache(ep, id) < 1)
339 cachep = ep->cache + (id - ep->baseid) * ep->size;
341 memcpy(into, cachep, ep->size);
344 ep->postread(id, into);
349 * Fill cache of file-backed EP with elements starting at ID.
350 * If any were read, return their number.
351 * Else return -1 and leave the cache unchanged.
354 fillcache(struct empfile *ep, int id)
358 if (CANT_HAPPEN(!ep->cache))
361 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
371 do_read(struct empfile *ep, void *buf, int id, int count)
376 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
379 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
380 logerror("Error seeking %s to elt %d (%s)",
381 ep->file, id, strerror(errno));
386 n = count * ep->size;
388 ret = read(ep->fd, p, n);
390 if (errno != EINTR) {
391 logerror("Error reading %s elt %d (%s)",
393 id + (int)((p - (char *)buf) / ep->size),
397 } else if (ret == 0) {
398 logerror("Unexpected EOF reading %s elt %d",
399 ep->file, id + (int)((p - (char *)buf) / ep->size));
407 return (p - (char *)buf) / ep->size;
411 * Write COUNT elements starting at ID from BUF to file-backed EP.
412 * Update the timestamp if the table is EFF_TYPED.
413 * Don't actually write if table is privately mapped.
414 * Return 0 on success, -1 on error (file may be corrupt then).
417 do_write(struct empfile *ep, void *buf, int id, int count)
421 struct emptypedstr *elt;
424 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
427 if (ep->flags & EFF_TYPED) {
428 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
429 for (i = 0; i < count; i++) {
431 * TODO Oopses here could be due to bad data corruption.
432 * Fail instead of attempting to recover?
434 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
435 if (CANT_HAPPEN(elt->ef_type != ep->uid))
436 elt->ef_type = ep->uid;
437 if (CANT_HAPPEN(elt->uid != id + i))
439 if (now != (time_t)-1)
440 elt->timestamp = now;
444 if (ep->flags & EFF_PRIVATE)
447 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
448 logerror("Error seeking %s to elt %d (%s)",
449 ep->file, id, strerror(errno));
454 n = count * ep->size;
456 ret = write(ep->fd, p, n);
458 if (errno != EINTR) {
459 logerror("Error writing %s elt %d (%s)",
461 id + (int)((p - (char *)buf) / ep->size),
475 * Write element ID into table TYPE from buffer FROM.
476 * FIXME pass buffer size!
477 * Update timestamp in FROM if table is EFF_TYPED.
478 * If table is file-backed and not privately mapped, write through
479 * cache straight to disk.
480 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
481 * Can write at the end of partially cached table.
482 * Return non-zero on success, zero on failure.
485 ef_write(int type, int id, void *from)
490 if (ef_check(type) < 0)
493 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
495 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
496 return 0; /* not implemented */
498 if (id >= ep->fids) {
499 /* write beyond end of file extends it, take note */
501 if (ep->onresize && ep->onresize(type) < 0)
504 if (id >= ep->baseid && id < ep->baseid + ep->cids)
505 cachep = ep->cache + (id - ep->baseid) * ep->size;
509 ep->prewrite(id, cachep, from);
511 if (do_write(ep, from, id, 1) < 0)
514 if (cachep && cachep != from) /* update the cache if necessary */
515 memcpy(cachep, from, ep->size);
521 * BUF is an element of table TYPE.
522 * ID is its new element ID.
523 * If table is EFF_TYPED, change id and sequence number stored in BUF.
527 ef_set_uid(int type, void *buf, int uid)
529 struct emptypedstr *elt;
532 if (ef_check(type) < 0)
535 if (!(ep->flags & EFF_TYPED))
541 elt->seqno = get_seqno(ep, uid);
545 * Return sequence number of element ID in table EP.
546 * Return zero if table is not EFF_TYPED (it has no sequence number
550 get_seqno(struct empfile *ep, int id)
552 struct emptypedstr *elt;
554 if (!(ep->flags & EFF_TYPED))
556 if (id < 0 || id >= ep->fids)
558 if (id >= ep->baseid && id < ep->baseid + ep->cids)
559 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
561 /* need a buffer, steal last cache slot */
562 if (ep->cids == ep->csize)
564 elt = (void *)(ep->cache + ep->cids * ep->size);
565 if (do_read(ep, elt, id, 1) < 0)
566 return 0; /* deep trouble */
572 * Increment sequence number in BUF, which is about to be written to EP.
573 * Do nothing if table is not EFF_TYPED (it has no sequence number
577 new_seqno(struct empfile *ep, void *buf)
579 struct emptypedstr *elt = buf;
582 if (!(ep->flags & EFF_TYPED))
584 old_seqno = get_seqno(ep, elt->uid);
585 if (CANT_HAPPEN(old_seqno != elt->seqno))
586 old_seqno = MAX(old_seqno, elt->seqno);
587 elt->seqno = old_seqno + 1;
591 * Extend table TYPE by COUNT elements.
592 * Any pointers obtained from ef_ptr() become invalid.
593 * Return non-zero on success, zero on failure.
596 ef_extend(int type, int count)
600 int need_sentinel, i, id;
602 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
605 if (CANT_HAPPEN(count < 0))
609 if (ep->flags & EFF_MEM) {
610 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
611 if (id + count + need_sentinel > ep->csize) {
612 if (ep->flags & EFF_STATIC) {
613 logerror("Can't extend %s beyond %d elements",
614 ep->name, ep->csize - need_sentinel);
617 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
618 logerror("Can't extend %s to %d elements (%s)",
619 ep->name, id + count, strerror(errno));
623 p = ep->cache + id * ep->size;
624 do_blank(ep, p, id, count);
626 if (do_write(ep, p, id, count) < 0)
630 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
631 ep->cids = id + count;
633 /* need a buffer, steal last cache slot */
634 if (ep->cids == ep->csize)
636 p = ep->cache + ep->cids * ep->size;
637 for (i = 0; i < count; i++) {
638 do_blank(ep, p, id + i, 1);
639 if (do_write(ep, p, id + i, 1) < 0)
643 ep->fids = id + count;
644 if (ep->onresize && ep->onresize(type) < 0)
650 * Initialize element ID for EP in BUF.
651 * FIXME pass buffer size!
654 ef_blank(int type, int id, void *buf)
657 struct emptypedstr *elt;
659 if (ef_check(type) < 0)
662 do_blank(ep, buf, id, 1);
663 if (ep->flags & EFF_TYPED) {
665 elt->seqno = get_seqno(ep, elt->uid);
670 * Initialize COUNT elements of EP in BUF, starting with element ID.
673 do_blank(struct empfile *ep, void *buf, int id, int count)
676 struct emptypedstr *elt;
678 memset(buf, 0, count * ep->size);
679 for (i = 0; i < count; i++) {
680 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
681 if (ep->flags & EFF_TYPED) {
682 elt->ef_type = ep->uid;
691 * Truncate table TYPE to COUNT elements.
692 * Any pointers obtained from ef_ptr() become invalid.
693 * Return non-zero on success, zero on failure.
696 ef_truncate(int type, int count)
701 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
704 if (CANT_HAPPEN(count < 0 || count > ep->fids))
707 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
708 if (ftruncate(ep->fd, count * ep->size) < 0) {
709 logerror("Can't truncate %s to %d elements (%s)",
710 ep->file, count, strerror(errno));
716 if (ep->flags & EFF_MEM) {
717 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
718 if (!(ep->flags & EFF_STATIC)) {
719 if (!ef_realloc_cache(ep, count + need_sentinel)) {
720 logerror("Can't shrink %s cache after truncate (%s)",
721 ep->name, strerror(errno));
722 /* continue with unshrunk cache */
726 memset(ep->cache + count * ep->size, 0, ep->size);
729 if (ep->baseid >= count)
731 else if (ep->cids > count - ep->baseid)
732 ep->cids = count - ep->baseid;
735 if (ep->onresize && ep->onresize(type) < 0)
743 if (ef_check(type) < 0)
745 return empfile[type].cadef;
751 if (ef_check(type) < 0)
753 return empfile[type].fids;
759 if (ef_check(type) < 0)
761 return empfile[type].flags;
767 if (ef_check(type) < 0)
769 if (empfile[type].fd <= 0)
771 return fdate(empfile[type].fd);
775 * Search for a table matching NAME, return its table type.
776 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
780 ef_byname(char *name)
782 return stmtch(name, empfile, offsetof(struct empfile, name),
787 * Search CHOICES[] for a table type matching NAME, return it.
788 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
790 * CHOICES[] must be terminated with a negative value.
793 ef_byname_from(char *name, int choices[])
799 for (p = choices; *p >= 0; p++) {
800 if (ef_check(*p) < 0)
802 switch (mineq(name, empfile[*p].name)) {
820 if (ef_check(type) < 0)
821 return "bad ef_type";
822 return empfile[type].name;
828 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
834 * Ensure table contains element ID.
835 * If necessary, extend it in steps of COUNT elements.
836 * Return non-zero on success, zero on failure.
839 ef_ensure_space(int type, int id, int count)
841 if (ef_check(type) < 0)
845 while (id >= empfile[type].fids) {
846 if (!ef_extend(type, count))