2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2008, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2008
41 #include <sys/types.h>
47 #include "prototypes.h"
49 static int ef_realloc_cache(struct empfile *, int);
50 static int fillcache(struct empfile *, int);
51 static int do_read(struct empfile *, void *, int, int);
52 static int do_write(struct empfile *, void *, int, int, time_t);
53 static int get_seqno(struct empfile *, int);
54 static void new_seqno(struct empfile *, void *);
55 static void do_blank(struct empfile *, void *, int, int);
58 * Open the file-backed table TYPE (EF_SECTOR, ...).
59 * HOW are flags to control operation. Naturally, immutable flags are
61 * Return non-zero on success, zero on failure.
62 * You must call ef_close() before the next ef_open().
65 ef_open(int type, int how)
69 int oflags, fd, fsiz, nslots;
71 if (ef_check(type) < 0)
73 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
74 how &= ~EFF_IMMUTABLE;
78 if (CANT_HAPPEN(ep->fd >= 0))
81 if (how & EFF_PRIVATE)
84 oflags |= O_CREAT | O_TRUNC;
88 if ((fd = open(ep->file, oflags, S_IRWUG)) < 0) {
89 logerror("Can't open %s (%s)", ep->file, strerror(errno));
93 lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
94 lock.l_whence = SEEK_SET;
95 lock.l_start = lock.l_len = 0;
96 if (fcntl(fd, F_SETLK, &lock) == -1) {
97 logerror("Can't lock %s (%s)", ep->file, strerror(errno));
104 if (fsiz % ep->size) {
105 logerror("Can't open %s (file size not a multiple of record size %d)",
110 ep->fids = fsiz / ep->size;
113 if (ep->flags & EFF_STATIC) {
114 /* ep->cache already points to space for ep->csize elements */
116 if (ep->fids > ep->csize) {
117 logerror("Can't open %s: file larger than %d bytes",
118 ep->file, ep->fids * ep->size);
124 if (CANT_HAPPEN(ep->cache))
129 nslots = blksize(fd) / ep->size;
130 if (!ef_realloc_cache(ep, nslots)) {
131 logerror("Can't map %s (%s)", ep->file, strerror(errno));
138 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
141 /* map file into cache */
142 if ((how & EFF_MEM) && ep->fids) {
143 if (fillcache(ep, 0) != ep->fids) {
144 ep->cids = 0; /* prevent cache flush */
145 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
155 * Reallocate cache for table EP to hold COUNT slots.
156 * The table must not be allocated statically.
157 * The cache may still be unmapped.
158 * If reallocation succeeds, any pointers obtained from ef_ptr()
160 * If it fails, the cache is unchanged, and errno is set.
161 * Return non-zero on success, zero on failure.
164 ef_realloc_cache(struct empfile *ep, int count)
168 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
170 if (CANT_HAPPEN(count < 0))
174 * Avoid zero slots, because that can lead to null cache, which
175 * would be interpreted as unmapped cache.
179 cache = realloc(ep->cache, count * ep->size);
189 * Close the file-backed table TYPE (EF_SECTOR, ...).
190 * Return non-zero on success, zero on failure.
198 retval = ef_flush(type);
200 ep->flags &= EFF_IMMUTABLE;
201 if (!(ep->flags & EFF_STATIC)) {
205 if (close(ep->fd) < 0) {
206 logerror("Error closing %s (%s)", ep->name, strerror(errno));
214 * Flush table TYPE (EF_SECTOR, ...) to disk.
215 * Does nothing if the table is privately mapped.
216 * Return non-zero on success, zero on failure.
223 if (ef_check(type) < 0)
226 if (ep->flags & EFF_PRIVATE)
227 return 1; /* nothing to do */
228 if (CANT_HAPPEN(ep->fd < 0))
231 * We don't know which cache entries are dirty. ef_write() writes
232 * through, but direct updates through ef_ptr() don't. They are
233 * allowed only with EFF_MEM. Assume the whole cash is dirty
236 if (ep->flags & EFF_MEM) {
237 if (do_write(ep, ep->cache, ep->baseid, ep->cids, time(NULL)) < 0)
245 * Return pointer to element ID in table TYPE if it exists, else NULL.
246 * The table must be fully cached, i.e. flags & EFF_MEM.
247 * The caller is responsible for flushing changes he makes.
250 ef_ptr(int type, int id)
254 if (ef_check(type) < 0)
257 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
259 if (id < 0 || id >= ep->fids)
261 return ep->cache + ep->size * id;
265 * Read element ID from table TYPE into buffer INTO.
266 * FIXME pass buffer size!
267 * Return non-zero on success, zero on failure.
270 ef_read(int type, int id, void *into)
275 if (ef_check(type) < 0)
278 if (CANT_HAPPEN(!ep->cache))
280 if (id < 0 || id >= ep->fids)
283 if (ep->flags & EFF_MEM) {
284 from = ep->cache + id * ep->size;
286 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
287 if (fillcache(ep, id) < 1)
290 from = ep->cache + (id - ep->baseid) * ep->size;
292 memcpy(into, from, ep->size);
295 ep->postread(id, into);
300 * Fill cache of file-backed EP with elements starting at ID.
301 * If any were read, return their number.
302 * Else return -1 and leave the cache unchanged.
305 fillcache(struct empfile *ep, int id)
309 if (CANT_HAPPEN(!ep->cache))
312 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
322 do_read(struct empfile *ep, void *buf, int id, int count)
327 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
330 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
331 logerror("Error seeking %s to elt %d (%s)",
332 ep->file, id, strerror(errno));
337 n = count * ep->size;
339 ret = read(ep->fd, p, n);
341 if (errno != EINTR) {
342 logerror("Error reading %s elt %d (%s)",
344 id + (int)((p - (char *)buf) / ep->size),
348 } else if (ret == 0) {
349 logerror("Unexpected EOF reading %s elt %d",
350 ep->file, id + (int)((p - (char *)buf) / ep->size));
358 return (p - (char *)buf) / ep->size;
362 * Write COUNT elements starting at ID from BUF to file-backed EP.
363 * Set the timestamp to NOW if the table has those.
364 * Return 0 on success, -1 on error (file may be corrupt then).
367 do_write(struct empfile *ep, void *buf, int id, int count, time_t now)
371 struct emptypedstr *elt;
373 if (CANT_HAPPEN(ep->fd < 0 || (ep->flags & EFF_PRIVATE)
374 || id < 0 || count < 0))
377 if (ep->flags & EFF_TYPED) {
378 for (i = 0; i < count; i++) {
380 * TODO Oopses here could be due to bad data corruption.
381 * Fail instead of attempting to recover?
383 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
384 if (CANT_HAPPEN(elt->ef_type != ep->uid))
385 elt->ef_type = ep->uid;
386 if (CANT_HAPPEN(elt->uid != id + i))
388 elt->timestamp = now;
392 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
393 logerror("Error seeking %s to elt %d (%s)",
394 ep->file, id, strerror(errno));
399 n = count * ep->size;
401 ret = write(ep->fd, p, n);
403 if (errno != EINTR) {
404 logerror("Error writing %s elt %d (%s)",
406 id + (int)((p - (char *)buf) / ep->size),
420 * Write element ID into table TYPE from buffer FROM.
421 * FIXME pass buffer size!
422 * If table is file-backed and not privately mapped, write through
423 * cache straight to disk.
424 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
425 * Can write at the end of partially cached table.
426 * Return non-zero on success, zero on failure.
429 ef_write(int type, int id, void *from)
434 if (ef_check(type) < 0)
437 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
440 ep->prewrite(id, from);
441 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
442 return 0; /* not implemented */
444 if (!(ep->flags & EFF_PRIVATE)) {
445 if (do_write(ep, from, id, 1, time(NULL)) < 0)
448 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
449 /* update the cache if necessary */
450 to = ep->cache + (id - ep->baseid) * ep->size;
452 memcpy(to, from, ep->size);
454 if (id >= ep->fids) {
455 /* write beyond end of file extends it, take note */
462 ef_set_uid(int type, void *buf, int uid)
464 struct emptypedstr *elt;
467 if (ef_check(type) < 0)
470 if (!(ep->flags & EFF_TYPED))
476 elt->seqno = get_seqno(ep, uid);
480 get_seqno(struct empfile *ep, int id)
482 struct emptypedstr *elt;
484 if (!(ep->flags & EFF_TYPED))
486 if (id < 0 || id >= ep->fids)
488 if (id >= ep->baseid && id < ep->baseid + ep->cids)
489 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
491 /* need a buffer, steal last cache slot */
492 if (ep->cids == ep->csize)
494 elt = (void *)(ep->cache + ep->cids * ep->size);
495 if (do_read(ep, elt, id, 1) < 0)
496 return 0; /* deep trouble */
502 new_seqno(struct empfile *ep, void *buf)
504 struct emptypedstr *elt = buf;
507 if (!(ep->flags & EFF_TYPED))
509 old_seqno = get_seqno(ep, elt->uid);
511 if (CANT_HAPPEN(old_seqno != elt->seqno))
512 old_seqno = MAX(old_seqno, elt->seqno);
514 if (old_seqno != elt->seqno) {
515 logerror("seqno mismatch ef_type=%d uid=%d: %d!=%d",
516 ep->uid, elt->uid, old_seqno, elt->seqno);
517 old_seqno = MAX(old_seqno, elt->seqno);
520 elt->seqno = old_seqno + 1;
524 * Extend table TYPE by COUNT elements.
525 * Any pointers obtained from ef_ptr() become invalid.
526 * Return non-zero on success, zero on failure.
529 ef_extend(int type, int count)
534 time_t now = time(NULL);
536 if (ef_check(type) < 0)
539 if (CANT_HAPPEN(count < 0))
543 if (ep->flags & EFF_MEM) {
544 if (id + count > ep->csize) {
545 if (ep->flags & EFF_STATIC) {
546 logerror("Can't extend %s beyond %d elements",
547 ep->file, ep->csize);
550 if (!ef_realloc_cache(ep, id + count)) {
551 logerror("Can't extend %s to %d elements (%s)",
552 ep->file, id + count, strerror(errno));
556 p = ep->cache + id * ep->size;
557 do_blank(ep, p, id, count);
558 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
559 if (do_write(ep, p, id, count, now) < 0)
564 /* need a buffer, steal last cache slot */
565 if (ep->cids == ep->csize)
567 p = ep->cache + ep->cids * ep->size;
568 for (i = 0; i < count; i++) {
569 do_blank(ep, p, id + i, 1);
570 if (do_write(ep, p, id + i, 1, now) < 0)
579 * Initialize element ID for EP in BUF.
580 * FIXME pass buffer size!
583 ef_blank(int type, int id, void *buf)
586 struct emptypedstr *elt;
588 if (ef_check(type) < 0)
591 do_blank(ep, buf, id, 1);
592 if (ep->flags & EFF_TYPED) {
594 elt->seqno = get_seqno(ep, elt->uid);
599 * Initialize COUNT elements of EP in BUF, starting with element ID.
602 do_blank(struct empfile *ep, void *buf, int id, int count)
605 struct emptypedstr *elt;
607 memset(buf, 0, count * ep->size);
608 if (ep->flags & EFF_TYPED) {
609 for (i = 0; i < count; i++) {
610 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
611 elt->ef_type = ep->uid;
618 * Truncate table TYPE to COUNT elements.
619 * Any pointers obtained from ef_ptr() become invalid.
620 * Return non-zero on success, zero on failure.
623 ef_truncate(int type, int count)
627 if (ef_check(type) < 0)
630 if (CANT_HAPPEN(count < 0 || count > ep->fids))
633 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
634 if (ftruncate(ep->fd, count * ep->size) < 0) {
635 logerror("Can't truncate %s to %d elements (%s)",
636 ep->file, count, strerror(errno));
642 if (ep->flags & EFF_MEM) {
643 if (!(ep->flags & EFF_STATIC)) {
644 if (!ef_realloc_cache(ep, count)) {
645 logerror("Can't shrink cache after truncate");
646 /* continue with unshrunk cache */
651 if (ep->baseid >= count)
653 else if (ep->cids > count - ep->baseid)
654 ep->cids = count - ep->baseid;
663 return empfile[type].cadef;
669 return empfile[type].fids;
675 return empfile[type].flags;
681 if (empfile[type].fd <= 0)
683 return fdate(empfile[type].fd);
687 * Search for a table matching NAME, return its table type.
688 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
692 ef_byname(char *name)
694 return stmtch(name, empfile, offsetof(struct empfile, name),
699 * Search CHOICES[] for a table type matching NAME, return it.
700 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
702 * CHOICES[] must be terminated with a negative value.
705 ef_byname_from(char *name, int choices[])
711 for (p = choices; *p >= 0; p++) {
712 if (ef_check(*p) < 0)
714 switch (mineq(name, empfile[*p].name)) {
732 if (ef_check(type) < 0)
733 return "bad ef_type";
734 return empfile[type].name;
740 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
746 * Ensure file-backed table contains ID.
747 * If necessary, extend it in steps of COUNT elements.
748 * Return non-zero on success, zero on failure.
751 ef_ensure_space(int type, int id, int count)
753 if (ef_check(type) < 0)
757 while (id >= empfile[type].fids) {
758 if (!ef_extend(type, count))