2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2008, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * See files README, COPYING and CREDITS in the root of the source
23 * tree for related information and legal notices. It is expected
24 * that future projects/authors will amend these files as needed.
28 * file.c: Operations on Empire tables (`files' for historical reasons)
30 * Known contributors to this file:
33 * Markus Armbruster, 2005-2006
41 #include <sys/types.h>
47 #include "prototypes.h"
49 static int ef_realloc_cache(struct empfile *, int);
50 static int fillcache(struct empfile *, int);
51 static int do_write(struct empfile *, void *, int, int);
52 static void do_blank(struct empfile *, void *, int, int);
55 * Open the file-backed table TYPE (EF_SECTOR, ...).
56 * HOW are flags to control operation. Naturally, immutable flags are
58 * Return non-zero on success, zero on failure.
59 * You must call ef_close() before the next ef_open().
62 ef_open(int type, int how)
66 int oflags, fd, fsiz, nslots;
68 if (ef_check(type) < 0)
70 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
71 how &= ~EFF_IMMUTABLE;
75 if (CANT_HAPPEN(ep->fd >= 0))
78 if (how & EFF_PRIVATE)
81 oflags |= O_CREAT | O_TRUNC;
85 if ((fd = open(ep->file, oflags, S_IRWUG)) < 0) {
86 logerror("Can't open %s (%s)", ep->file, strerror(errno));
90 lock.l_type = how & EFF_PRIVATE ? F_RDLCK : F_WRLCK;
91 lock.l_whence = SEEK_SET;
92 lock.l_start = lock.l_len = 0;
93 if (fcntl(fd, F_SETLK, &lock) == -1) {
94 logerror("Can't lock %s (%s)", ep->file, strerror(errno));
101 if (fsiz % ep->size) {
102 logerror("Can't open %s (file size not a multiple of record size %d)",
107 ep->fids = fsiz / ep->size;
110 if (ep->flags & EFF_STATIC) {
111 /* ep->cache already points to space for ep->csize elements */
113 if (ep->fids > ep->csize) {
114 logerror("Can't open %s: file larger than %d bytes",
115 ep->file, ep->fids * ep->size);
121 if (CANT_HAPPEN(ep->cache))
126 nslots = blksize(fd) / ep->size;
127 if (!ef_realloc_cache(ep, nslots)) {
128 logerror("Can't map %s (%s)", ep->file, strerror(errno));
135 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
138 /* map file into cache */
139 if ((how & EFF_MEM) && ep->fids) {
140 if (fillcache(ep, 0) != ep->fids) {
141 ep->cids = 0; /* prevent cache flush */
142 ep->flags &= EFF_IMMUTABLE; /* maintain invariant */
152 * Reallocate cache for table EP to hold COUNT slots.
153 * The table must not be allocated statically.
154 * The cache may still be unmapped.
155 * If reallocation succeeds, any pointers obtained from ef_ptr()
157 * If it fails, the cache is unchanged, and errno is set.
158 * Return non-zero on success, zero on failure.
161 ef_realloc_cache(struct empfile *ep, int count)
165 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
167 if (CANT_HAPPEN(count < 0))
171 * Avoid zero slots, because that can lead to null cache, which
172 * would be interpreted as unmapped cache.
176 cache = realloc(ep->cache, count * ep->size);
186 * Close the file-backed table TYPE (EF_SECTOR, ...).
187 * Return non-zero on success, zero on failure.
195 retval = ef_flush(type);
197 ep->flags &= EFF_IMMUTABLE;
198 if (!(ep->flags & EFF_STATIC)) {
202 if (close(ep->fd) < 0) {
203 logerror("Error closing %s (%s)", ep->name, strerror(errno));
211 * Flush table TYPE (EF_SECTOR, ...) to disk.
212 * Does nothing if the table is privately mapped.
213 * Return non-zero on success, zero on failure.
220 if (ef_check(type) < 0)
223 if (ep->flags & EFF_PRIVATE)
224 return 1; /* nothing to do */
225 if (CANT_HAPPEN(ep->fd < 0))
228 * We don't know which cache entries are dirty. ef_write() writes
229 * through, but direct updates through ef_ptr() don't. They are
230 * allowed only with EFF_MEM. Assume the whole cash is dirty
233 if (ep->flags & EFF_MEM)
234 return do_write(ep, ep->cache, ep->baseid, ep->cids) >= 0;
240 * Return pointer to element ID in table TYPE if it exists, else NULL.
241 * The table must be fully cached, i.e. flags & EFF_MEM.
242 * The caller is responsible for flushing changes he makes.
245 ef_ptr(int type, int id)
249 if (ef_check(type) < 0)
252 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
254 if (id < 0 || id >= ep->fids)
256 return ep->cache + ep->size * id;
260 * Read element ID from table TYPE into buffer INTO.
261 * FIXME pass buffer size!
262 * Return non-zero on success, zero on failure.
265 ef_read(int type, int id, void *into)
270 if (ef_check(type) < 0)
273 if (CANT_HAPPEN(!ep->cache))
275 if (id < 0 || id >= ep->fids)
278 if (ep->flags & EFF_MEM) {
279 from = ep->cache + id * ep->size;
281 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
282 if (fillcache(ep, id) < 1)
285 from = ep->cache + (id - ep->baseid) * ep->size;
287 memcpy(into, from, ep->size);
290 ep->postread(id, into);
295 * Fill cache of EP with elements starting at ID.
296 * If any were read, return their number.
297 * Else return -1 and leave the cache unchanged.
300 fillcache(struct empfile *ep, int start)
305 if (CANT_HAPPEN(ep->fd < 0 || !ep->cache))
308 if (lseek(ep->fd, start * ep->size, SEEK_SET) == (off_t)-1) {
309 logerror("Error seeking %s (%s)", ep->file, strerror(errno));
314 n = ep->csize * ep->size;
316 ret = read(ep->fd, p, n);
318 if (errno != EAGAIN) {
319 logerror("Error reading %s (%s)", ep->file, strerror(errno));
322 } else if (ret == 0) {
331 return -1; /* nothing read, old cache still ok */
334 ep->cids = (p - ep->cache) / ep->size;
339 * Write COUNT elements from BUF to EP, starting at ID.
340 * Return 0 on success, -1 on error.
343 do_write(struct empfile *ep, void *buf, int id, int count)
347 struct emptypedstr *elt;
349 if (CANT_HAPPEN(ep->fd < 0 || (ep->flags & EFF_PRIVATE)
350 || id < 0 || count < 0))
353 if (ep->flags & EFF_TYPED) {
354 for (i = 0; i < count; i++) {
356 * TODO Oopses here could be due to bad data corruption.
357 * Fail instead of attempting to recover?
359 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
360 if (CANT_HAPPEN(elt->ef_type != ep->uid))
361 elt->ef_type = ep->uid;
362 if (CANT_HAPPEN(elt->uid != id + i))
367 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
368 logerror("Error seeking %s (%s)", ep->file, strerror(errno));
373 n = count * ep->size;
375 ret = write(ep->fd, p, n);
377 if (errno != EAGAIN) {
378 logerror("Error writing %s (%s)", ep->file, strerror(errno));
379 /* FIXME if this extended file, truncate back to old size */
392 * Write element ID into table TYPE from buffer FROM.
393 * FIXME pass buffer size!
394 * If table is file-backed and not privately mapped, write through
395 * cache straight to disk.
396 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
397 * Can write at the end of partially cached table.
398 * Return non-zero on success, zero on failure.
401 ef_write(int type, int id, void *from)
406 if (ef_check(type) < 0)
409 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
412 ep->prewrite(id, from);
413 if (CANT_HAPPEN((ep->flags & EFF_MEM) ? id >= ep->fids : id > ep->fids))
414 return 0; /* not implemented */
415 if (!(ep->flags & EFF_PRIVATE)) {
416 if (do_write(ep, from, id, 1) < 0)
419 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
420 /* update the cache if necessary */
421 to = ep->cache + (id - ep->baseid) * ep->size;
423 memcpy(to, from, ep->size);
425 if (id >= ep->fids) {
426 /* write beyond end of file extends it, take note */
433 * Extend table TYPE by COUNT elements.
434 * Any pointers obtained from ef_ptr() become invalid.
435 * Return non-zero on success, zero on failure.
438 ef_extend(int type, int count)
444 if (ef_check(type) < 0)
447 if (CANT_HAPPEN(count < 0))
451 if (ep->flags & EFF_MEM) {
452 if (id + count > ep->csize) {
453 if (ep->flags & EFF_STATIC) {
454 logerror("Can't extend %s beyond %d elements",
455 ep->file, ep->csize);
458 if (!ef_realloc_cache(ep, id + count)) {
459 logerror("Can't extend %s to %d elements (%s)",
460 ep->file, id + count, strerror(errno));
464 p = ep->cache + id * ep->size;
465 do_blank(ep, p, id, count);
466 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
467 if (do_write(ep, p, id, count) < 0)
472 /* need a buffer, steal last cache slot */
473 if (ep->cids == ep->csize)
475 p = ep->cache + ep->cids * ep->size;
476 for (i = 0; i < count; i++) {
477 do_blank(ep, p, id + i, 1);
478 if (do_write(ep, p, id + i, 1) < 0)
487 * Initialize element ID for EP in BUF.
488 * FIXME pass buffer size!
491 ef_blank(int type, int id, void *buf)
493 if (ef_check(type) < 0)
495 do_blank(&empfile[type], buf, id, 1);
499 * Initialize COUNT elements of EP in BUF, starting with element ID.
502 do_blank(struct empfile *ep, void *buf, int id, int count)
505 struct emptypedstr *elt;
507 memset(buf, 0, count * ep->size);
508 if (ep->flags & EFF_TYPED) {
509 for (i = 0; i < count; i++) {
510 elt = (struct emptypedstr *)((char *)buf + i * ep->size);
511 elt->ef_type = ep->uid;
518 * Truncate table TYPE to COUNT elements.
519 * Any pointers obtained from ef_ptr() become invalid.
520 * Return non-zero on success, zero on failure.
523 ef_truncate(int type, int count)
527 if (ef_check(type) < 0)
530 if (CANT_HAPPEN(count < 0 || count > ep->fids))
533 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
534 if (ftruncate(ep->fd, count * ep->size) < 0) {
535 logerror("Can't truncate %s to %d elements (%s)",
536 ep->file, count, strerror(errno));
542 if (ep->flags & EFF_MEM) {
543 if (!(ep->flags & EFF_STATIC)) {
544 if (!ef_realloc_cache(ep, count)) {
545 logerror("Can't shrink cache after truncate");
546 /* continue with unshrunk cache */
551 if (ep->baseid >= count)
553 else if (ep->cids > count - ep->baseid)
554 ep->cids = count - ep->baseid;
563 return empfile[type].cadef;
569 return empfile[type].fids;
575 return empfile[type].flags;
581 if (empfile[type].fd <= 0)
583 return fdate(empfile[type].fd);
587 * Search for a table matching NAME, return its table type.
588 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
592 ef_byname(char *name)
594 return stmtch(name, empfile, offsetof(struct empfile, name),
599 * Search CHOICES[] for a table type matching NAME, return it.
600 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
602 * CHOICES[] must be terminated with a negative value.
605 ef_byname_from(char *name, int choices[])
611 for (p = choices; *p >= 0; p++) {
612 if (ef_check(*p) < 0)
614 switch (mineq(name, empfile[*p].name)) {
632 if (ef_check(type) < 0)
633 return "bad ef_type";
634 return empfile[type].name;
640 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
646 * Ensure file-backed table contains ID.
647 * If necessary, extend it in steps of COUNT elements.
648 * Return non-zero on success, zero on failure.
651 ef_ensure_space(int type, int id, int count)
653 if (ef_check(type) < 0)
657 while (id >= empfile[type].fids) {
658 if (!ef_extend(type, count))