Generation numbers to catch write back of stale copies

Oops when a stale copy is written back, i.e. the processor was yielded
since the copy was made.  Such bugs are difficult to spot.  Sequence
numbers catch them when they do actual harm (they also catch different
bugs).  Generation numbers catch them even when they don't.

New ef_generation to count generations.  Call new ef_make_stale() to
step it whenever the processor may be yielded.

New struct emptypedstr member generation.  Make sure all members of
unit empobj_storage share it.  It is only used in copies; its value on
disk and in the cache is meaningless.  Set it to ef_generation by
calling new ef_mark_fresh() when making copies in ef_read() and
ef_blank().  Do the same in obj_changed() to make check_sect_ok() &
friends freshen their argument when it is unchanged.  Copies with
generation other than ef_generation are stale.

Oops in ef_write() when a stale copy is written back.
This commit is contained in:
Markus Armbruster 2009-03-01 18:56:41 +01:00
parent f8be963202
commit 20b31defe7
19 changed files with 82 additions and 3 deletions

View file

@ -52,9 +52,12 @@ static int do_read(struct empfile *, void *, int, int);
static int do_write(struct empfile *, void *, int, int);
static unsigned get_seqno(struct empfile *, int);
static void new_seqno(struct empfile *, void *);
static void must_be_fresh(struct empfile *, void *);
static void do_blank(struct empfile *, void *, int, int);
static int ef_check(int);
static unsigned ef_generation;
/*
* Open the file-backed table TYPE (EF_SECTOR, ...).
* HOW are flags to control operation. Naturally, immutable flags are
@ -338,6 +341,7 @@ ef_read(int type, int id, void *into)
cachep = ep->cache + (id - ep->baseid) * ep->size;
}
memcpy(into, cachep, ep->size);
ef_mark_fresh(type, into);
if (ep->postread)
ep->postread(id, into);
@ -500,9 +504,11 @@ ef_write(int type, int id, void *from)
if (ep->onresize && ep->onresize(type) < 0)
return 0;
}
if (id >= ep->baseid && id < ep->baseid + ep->cids)
if (id >= ep->baseid && id < ep->baseid + ep->cids) {
cachep = ep->cache + (id - ep->baseid) * ep->size;
else
if (cachep != from)
must_be_fresh(ep, from);
} else
cachep = NULL;
if (ep->prewrite)
ep->prewrite(id, cachep, from);
@ -586,6 +592,35 @@ new_seqno(struct empfile *ep, void *buf)
elt->seqno = old_seqno + 1;
}
void
ef_make_stale(void)
{
ef_generation++;
}
void
ef_mark_fresh(int type, void *buf)
{
struct empfile *ep;
if (ef_check(type) < 0)
return;
ep = &empfile[type];
if (!(ep->flags & EFF_TYPED))
return;
((struct emptypedstr *)buf)->generation = ef_generation;
}
static void
must_be_fresh(struct empfile *ep, void *buf)
{
struct emptypedstr *elt = buf;
if (!(ep->flags & EFF_TYPED))
return;
CANT_HAPPEN(elt->generation != ef_generation);
}
/*
* Extend table TYPE by COUNT elements.
* Any pointers obtained from ef_ptr() become invalid.
@ -663,6 +698,7 @@ ef_blank(int type, int id, void *buf)
elt = buf;
elt->seqno = get_seqno(ep, elt->uid);
}
ef_mark_fresh(type, buf);
}
/*