Skip to content

Commit

Permalink
cheri/revoke.h: style(9) pass
Browse files Browse the repository at this point in the history
  • Loading branch information
jrtc27 committed Jul 24, 2023
1 parent 7be0096 commit 561e951
Showing 1 changed file with 132 additions and 131 deletions.
263 changes: 132 additions & 131 deletions sys/cheri/revoke.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@
#include <cheri/cherireg.h> /* For CHERI_OTYPE_BITS */
#endif

typedef uint64_t cheri_revoke_epoch_t;
#define CHERI_REVOKE_ST_EPOCH_WIDTH 61
typedef uint64_t cheri_revoke_epoch_t;
#define CHERI_REVOKE_ST_EPOCH_WIDTH 61

/*
* Epoch greater than orderings: a > b, a >= b.
*
* We use RFC1982 serial number arithmetic to deal with wrap-around. We
* We use RFC1982 serial number arithmetic to deal with wrap-around. We
* assume that there will never be any reason to ask about epochs so far
* apart that this is problematic.
*
Expand All @@ -59,20 +59,25 @@ typedef uint64_t cheri_revoke_epoch_t;
* XXX this almost surely belongs somewhere else.
*/

static inline int cheri_revoke_epoch_gt(cheri_revoke_epoch_t a,
cheri_revoke_epoch_t b) {
static inline int
cheri_revoke_epoch_gt(cheri_revoke_epoch_t a, cheri_revoke_epoch_t b)
{
static const cheri_revoke_epoch_t top =
1ULL << (CHERI_REVOKE_ST_EPOCH_WIDTH-1);
return ((a < b) && ((b - a) > top)) || ((a > b) && ((a - b) < top));

return ((a < b && b - a > top) || (a > b && a - b < top));
}
static inline int cheri_revoke_epoch_ge(cheri_revoke_epoch_t a,
cheri_revoke_epoch_t b) {
return (a == b) || cheri_revoke_epoch_gt(a, b);

static inline int
cheri_revoke_epoch_ge(cheri_revoke_epoch_t a, cheri_revoke_epoch_t b)
{
return (a == b || cheri_revoke_epoch_gt(a, b));
}

static inline int cheri_revoke_epoch_clears(cheri_revoke_epoch_t now,
cheri_revoke_epoch_t then) {
return cheri_revoke_epoch_ge(now, then + (then & 1) + 2);
static inline int
cheri_revoke_epoch_clears(cheri_revoke_epoch_t now, cheri_revoke_epoch_t then)
{
return (cheri_revoke_epoch_ge(now, then + (then & 1) + 2));
}

#if __has_feature(capabilities)
Expand All @@ -96,16 +101,16 @@ cheri_revoke_is_revoked(const void * __capability cap)
*
* In particular, we used to define the shadow as the (padded) concatenation of
* four structures: the fine-grained memory shadow, the coarse-grained memory
* shadow, the otype shadow, and the info page. Unpleasantly, we did so
* identically for all supporting architectures. Moreover, while this worked
* shadow, the otype shadow, and the info page. Unpleasantly, we did so
* identically for all supporting architectures. Moreover, while this worked
* fine so long as each architecture had a fixed size for each of those, it
* broke as soon as RISC-V supported both RV39 and RV47, because now we needed
* to know the dynamic-at-boot size of the address space to offset into the
* amalgamated structure.
*
* So instead, we propose a more uniform, more-MI layout that "grows" in two
* directions and we pass a pointer to the middle of the structure. This allows
* the MD tests to use constant offsets even as the AS size grows. Concretely,
* directions and we pass a pointer to the middle of the structure. This allows
* the MD tests to use constant offsets even as the AS size grows. Concretely,
* the shadow is now the concatenation of
*
* - the coarse-grained memory shadow bitmap (1 bit per page), with the bit
Expand All @@ -128,7 +133,7 @@ cheri_revoke_is_revoked(const void * __capability cap)
* XXX Note that all of this is somewhat conjecture: we don't actually use the
* otype or coarse shadows at the moment, the coarse shadow seems like it might
* go away in favor of some other mechanism, and some architectures additionally
* have other namespaces, like CIDs, that we might want to track. There's also
* have other namespaces, like CIDs, that we might want to track. There's also
* the intense desire to have CHERI+MTE simplify this dance enormously, assuming
* we get there.
*/
Expand All @@ -142,88 +147,85 @@ static const size_t VM_CHERI_REVOKE_BSZ_OTYPE =

/*************************** REVOKER CONTROL FLAGS ***************************/

/*
* Finish the current revocation epoch this pass.
* If there is no current revocation epoch, start one and then
* finish it.
*
* If this flag is not given, then either start an epoch by doing
* the first (full) pass or continue an epoch by doing an
* incremental pass. (For the load side algorithm, incremental passes
* except the opening one are essentially no-ops.)
*/
#define CHERI_REVOKE_LAST_PASS 0x0001
/*
* Finish the current revocation epoch this pass. If there is no current
* revocation epoch, start one and then finish it.
*
* If this flag is not given, then either start an epoch by doing the first
* (full) pass or continue an epoch by doing an incremental pass. (For the
* load side algorithm, incremental passes except the opening one are
* essentially no-ops.)
*/
#define CHERI_REVOKE_LAST_PASS 0x0001

/*
* If this bit is set, the kernel is free to return without making
* progress.
*/
#define CHERI_REVOKE_NO_WAIT_OK 0x0002
/*
* If this bit is set, the kernel is free to return without making progress.
*/
#define CHERI_REVOKE_NO_WAIT_OK 0x0002

/*
* Ignore the given epoch argument and always attempt to advance the
* epoch clock relative to its value "at the time of the call".
*/
/*
* Ignore the given epoch argument and always attempt to advance the epoch
* clock relative to its value "at the time of the call".
*/
#define CHERI_REVOKE_IGNORE_START 0x0004

/*
* Do a pass only if an epoch is open after synchronization.
*
* XXX This has probably lost any utility it may ever have had.
*/
/*
* Do a pass only if an epoch is open after synchronization.
*
* XXX This has probably lost any utility it may ever have had.
*/
#define CHERI_REVOKE_ONLY_IF_OPEN 0x0008

/*
* Ordinarily, cheri_revoke with CHERI_REVOKE_LAST_PASS attempts to
* minimize the amount of work it does with the world held in
* single-threaded state. It will do up to two passes:
*
* * an opening/incremental pass with the world running
*
* * a pass with the world stopped, which visits kernel hoarders
* and recently-dirty pages (since the above pass)
*
* The first may be disabled by passing CHERI_REVOKE_LAST_NO_EARLY,
* causing more work to be pushed into the world-stopped phase.
*
* Setting CHERI_REVOKE_LAST_NO_EARLY when not setting
* CHERI_REVOKE_LAST_PASS will cause no passes to be performed.
*
* XXX This has probably lost any utility it may ever have had.
*/
#define CHERI_REVOKE_LAST_NO_EARLY 0x0010
/*
* Ordinarily, cheri_revoke with CHERI_REVOKE_LAST_PASS attempts to minimize
* the amount of work it does with the world held in single-threaded state. It
* will do up to two passes:
*
* * an opening/incremental pass with the world running
*
* * a pass with the world stopped, which visits kernel hoarders and
* recently-dirty pages (since the above pass)
*
* The first may be disabled by passing CHERI_REVOKE_LAST_NO_EARLY, causing
* more work to be pushed into the world-stopped phase.
*
* Setting CHERI_REVOKE_LAST_NO_EARLY when not setting CHERI_REVOKE_LAST_PASS
* will cause no passes to be performed.
*
* XXX This has probably lost any utility it may ever have had.
*/
#define CHERI_REVOKE_LAST_NO_EARLY 0x0010

/*
* Force a synchronization with the PMAP before doing a non-LAST
* pass (including the EARLY part of a LAST call). This should let
* us measure the impact of lazily synchronizing with the PMAP
* capdirty bits.
*
* This may also be useful if one were to do intermediate (i.e.,
* neither opening nor closing) passes, but at present we do not.
*
* Meaningless if CHERI_REVOKE_LAST_NO_EARLY also set.
*
* XXX This has probably lost any utility it may ever have had.
*/
#define CHERI_REVOKE_EARLY_SYNC 0x0020
/*
* Force a synchronization with the PMAP before doing a non-LAST pass
* (including the EARLY part of a LAST call). This should let us measure the
* impact of lazily synchronizing with the PMAP capdirty bits.
*
* This may also be useful if one were to do intermediate (i.e., neither
* opening nor closing) passes, but at present we do not.
*
* Meaningless if CHERI_REVOKE_LAST_NO_EARLY also set.
*
* XXX This has probably lost any utility it may ever have had.
*/
#define CHERI_REVOKE_EARLY_SYNC 0x0020

/*
* If opening a new revocation epoch, ignore the default mode and run
* this one using the load-side algorithm.
*/
#define CHERI_REVOKE_FORCE_LOAD_SIDE 0x0040
/*
* If opening a new revocation epoch, ignore the default mode and run this one
* using the load-side algorithm.
*/
#define CHERI_REVOKE_FORCE_LOAD_SIDE 0x0040

/*
* If opening a new revocation epoch, ignore the default mode and run
* this one using the store-side algorithm.
*/
#define CHERI_REVOKE_FORCE_STORE_SIDE 0x0080
/*
* If opening a new revocation epoch, ignore the default mode and run this one
* using the store-side algorithm.
*/
#define CHERI_REVOKE_FORCE_STORE_SIDE 0x0080

/*
* Reset the stats counters to zero "after" reporting
*/
#define CHERI_REVOKE_TAKE_STATS 0x1000
/*
* Reset the stats counters to zero "after" reporting
*/
#define CHERI_REVOKE_TAKE_STATS 0x1000

/*
* Information conveyed to userland about a given cheri_revoke scan.
Expand Down Expand Up @@ -274,7 +276,7 @@ struct cheri_revoke_stats {

/*
* Calls from the VM iterator to the VM fault handlers, in each of RO
* and RW states. Exclusive of CLG fault handler invocations.
* and RW states. Exclusive of CLG fault handler invocations.
*/
uint32_t pages_faulted_ro;
uint32_t pages_faulted_rw;
Expand All @@ -286,7 +288,7 @@ struct cheri_revoke_stats {
uint32_t fault_visits;

/*
* Various fast-out paths of the VM iterator. _fast is synthesized from
* Various fast-out paths of the VM iterator. _fast is synthesized from
* the size of spans skipped, while _nofill and pages_skip itself are
* incremented by bailing attempts to find each vm_page_t structure.
*/
Expand All @@ -309,76 +311,75 @@ struct cheri_revoke_stats {

/*
* XXX The use of two counters is mostly a bug, but it's never been worth
* fixing. It originated in response to some error handling paths where it
* fixing. It originated in response to some error handling paths where it
* seemed like we might have to "rewind" the epoch counter to back out of a
* failed revocation pass, but probably a different design would have been
* better. Apologies. Eventually, there will be only one.
* better. Apologies. Eventually, there will be only one.
*/
struct cheri_revoke_epochs {
cheri_revoke_epoch_t enqueue; /* Label on entry to quarantine */
cheri_revoke_epoch_t dequeue; /* Gates removal from quarantine */
cheri_revoke_epoch_t enqueue; /* Label on entry to quarantine */
cheri_revoke_epoch_t dequeue; /* Gates removal from quarantine */
};

struct cheri_revoke_info {
const ptraddr_t base_mem_nomap;
const ptraddr_t base_otype;
const ptraddr_t base_mem_nomap;
const ptraddr_t base_otype;

struct cheri_revoke_epochs epochs;
struct cheri_revoke_epochs epochs;
};

struct cheri_revoke_syscall_info {
struct cheri_revoke_epochs epochs;
struct cheri_revoke_stats stats;
struct cheri_revoke_epochs epochs;
struct cheri_revoke_stats stats;
};

#define CHERI_REVOKE_SHADOW_NOVMMAP 0x00 /* The ordinary shadow space */
#define CHERI_REVOKE_SHADOW_OTYPE 0x01 /* The otype shadow space */
#define CHERI_REVOKE_SHADOW_NOVMMAP 0x00 /* The ordinary shadow space */
#define CHERI_REVOKE_SHADOW_OTYPE 0x01 /* The otype shadow space */
/*
* It is not possible to ask for the _MEM_MAP bitmask, as we intend that one
* to be used by the kernel internally for munmap(). Maybe that's wrong?
* to be used by the kernel internally for munmap(). Maybe that's wrong?
*
* XXX Do we want a madvise mode to allow userspace to request revocation
* of vm objects that we aren't deleting? They *can* use the NOVMMAP
* bitmask, but it's 256 times as many bits to flip.
*/

#define CHERI_REVOKE_SHADOW_INFO_STRUCT 0x03 /* R/O access to shared state */
#define CHERI_REVOKE_SHADOW_INFO_STRUCT 0x03 /* R/O access to shared state */

/*
* XXX This should go away as soon as we have allocators w/ per-arena shadows
* or come to depend on CHERI+MTE, whichever happens first. However, the
* or come to depend on CHERI+MTE, whichever happens first. However, the
* minimal-bookkeeping version of libmrs uses this, and that's very convenient.
*/
#define CHERI_REVOKE_SHADOW_NOVMMAP_ENTIRE 0x07 /* The entire shadow region */
#define CHERI_REVOKE_SHADOW_NOVMMAP_ENTIRE 0x07 /* The entire shadow region */

#define CHERI_REVOKE_SHADOW_SPACE_MASK 0x07 /* Flag bits for shadow index */
#define CHERI_REVOKE_SHADOW_SPACE_MASK 0x07 /* Flag bits for shadow index */

#ifndef _KERNEL
/*
* Drive the revocation state machine.
*
* If the current epoch clock is sufficient to caprvoke_epoch_clears
* start_epoch, this call returns immediately, populating
* statout->epoch_{init,fini} with the current clock's value.
*
* XXX if cheri_revoke_epoch becomes more complex than a scalar type,
* this prototype will need to change or we'll need to be more
* explicit about it being a hint or something.
*/
int cheri_revoke(int flags, cheri_revoke_epoch_t start_epoch,
struct cheri_revoke_syscall_info *crsi);
/*
* Drive the revocation state machine.
*
* If the current epoch clock is sufficient to caprvoke_epoch_clears
* start_epoch, this call returns immediately, populating
* statout->epoch_{init,fini} with the current clock's value.
*
* XXX if cheri_revoke_epoch becomes more complex than a scalar type, this
* prototype will need to change or we'll need to be more explicit about it
* being a hint or something.
*/
int cheri_revoke(int flags, cheri_revoke_epoch_t start_epoch,
struct cheri_revoke_syscall_info *crsi);

/*
* Request a capability to the shadow bitmap state for the given
* arena. Flags determine which space is requested; the arena cap
* must have appropriate privileges.
*
* This call must fail if the resulting capability would not be
* representable due to alignment constraints.
*/
int cheri_revoke_get_shadow(int flags,
void * __capability arena,
void * __capability * shadow);
/*
* Request a capability to the shadow bitmap state for the given arena. Flags
* determine which space is requested; the arena cap must have appropriate
* privileges.
*
* This call must fail if the resulting capability would not be representable
* due to alignment constraints.
*/
int cheri_revoke_get_shadow(int flags, void * __capability arena,
void * __capability * shadow);
#endif

#endif /* !__SYS_CHERI_REVOKE_H__ */

0 comments on commit 561e951

Please sign in to comment.