Skip to content

Commit

Permalink
Rename AppendVecId to AccountsFileId
Browse files Browse the repository at this point in the history
  • Loading branch information
yhchiang-sol committed Mar 22, 2024
1 parent cbd0369 commit 28ede6b
Show file tree
Hide file tree
Showing 13 changed files with 104 additions and 104 deletions.
10 changes: 5 additions & 5 deletions accounts-db/src/account_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
//! Note that AccountInfo is saved to disk buckets during runtime, but disk buckets are recreated at startup.
use {
crate::{
accounts_db::AppendVecId,
accounts_db::AccountsFileId,
accounts_file::ALIGN_BOUNDARY_OFFSET,
accounts_index::{IsCached, ZeroLamport},
},
Expand All @@ -21,7 +21,7 @@ pub type StoredSize = u32;
/// specify where account data is located
#[derive(Debug, PartialEq, Eq)]
pub enum StorageLocation {
AppendVec(AppendVecId, Offset),
AppendVec(AccountsFileId, Offset),
Cached,
}

Expand Down Expand Up @@ -85,7 +85,7 @@ pub struct PackedOffsetAndFlags {
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
store_id: AccountsFileId,

account_offset_and_flags: AccountOffsetAndFlags,
}
Expand Down Expand Up @@ -121,7 +121,7 @@ impl IsCached for StorageLocation {
}

/// We have to have SOME value for store_id when we are cached
const CACHE_VIRTUAL_STORAGE_ID: AppendVecId = AppendVecId::MAX;
const CACHE_VIRTUAL_STORAGE_ID: AccountsFileId = AccountsFileId::MAX;

impl AccountInfo {
pub fn new(storage_location: StorageLocation, lamports: u64) -> Self {
Expand Down Expand Up @@ -160,7 +160,7 @@ impl AccountInfo {
(offset / ALIGN_BOUNDARY_OFFSET) as OffsetReduced
}

pub fn store_id(&self) -> AppendVecId {
pub fn store_id(&self) -> AccountsFileId {
// if the account is in a cached store, the store_id is meaningless
assert!(!self.is_cached());
self.store_id
Expand Down
8 changes: 4 additions & 4 deletions accounts-db/src/account_storage.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Manage the map of slot -> append vec

use {
crate::accounts_db::{AccountStorageEntry, AppendVecId},
crate::accounts_db::{AccountStorageEntry, AccountsFileId},
dashmap::DashMap,
solana_sdk::clock::Slot,
std::sync::Arc,
Expand All @@ -15,7 +15,7 @@ pub struct AccountStorageReference {
pub storage: Arc<AccountStorageEntry>,
/// id can be read from 'storage', but it is an atomic read.
/// id will never change while a storage is held, so we store it separately here for faster runtime lookup in 'get_account_storage_entry'
pub id: AppendVecId,
pub id: AccountsFileId,
}

pub type AccountStorageMap = DashMap<Slot, AccountStorageReference>;
Expand Down Expand Up @@ -50,7 +50,7 @@ impl AccountStorage {
pub(crate) fn get_account_storage_entry(
&self,
slot: Slot,
store_id: AppendVecId,
store_id: AccountsFileId,
) -> Option<Arc<AccountStorageEntry>> {
let lookup_in_map = || {
self.map
Expand Down Expand Up @@ -343,7 +343,7 @@ pub(crate) mod tests {
}

impl AccountStorage {
fn get_test_storage_with_id(&self, id: AppendVecId) -> Arc<AccountStorageEntry> {
fn get_test_storage_with_id(&self, id: AccountsFileId) -> Arc<AccountStorageEntry> {
let slot = 0;
// add a map store
let common_store_path = Path::new("");
Expand Down
44 changes: 22 additions & 22 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ struct StorageSizeAndCount {
/// number of accounts in the storage including both alive and dead accounts
pub count: usize,
}
type StorageSizeAndCountMap = DashMap<AppendVecId, StorageSizeAndCount>;
type StorageSizeAndCountMap = DashMap<AccountsFileId, StorageSizeAndCount>;

impl GenerateIndexTimings {
pub fn report(&self, startup_stats: &StartupStats) {
Expand Down Expand Up @@ -764,8 +764,8 @@ impl<'a> MultiThreadProgress<'a> {
}

/// An offset into the AccountsDb::storage vector
pub type AtomicAppendVecId = AtomicU32;
pub type AppendVecId = u32;
pub type AtomicAccountsFileId = AtomicU32;
pub type AccountsFileId = u32;

type AccountSlots = HashMap<Pubkey, HashSet<Slot>>;
type SlotOffsets = HashMap<Slot, HashSet<usize>>;
Expand Down Expand Up @@ -1005,7 +1005,7 @@ struct CleanKeyTimings {
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AppendVecId,
pub(crate) id: AccountsFileId,

pub(crate) slot: Slot,

Expand All @@ -1031,7 +1031,7 @@ pub struct AccountStorageEntry {
}

impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: AppendVecId, file_size: u64) -> Self {
pub fn new(path: &Path, slot: Slot, id: AccountsFileId, file_size: u64) -> Self {
let tail = AccountsFile::file_name(slot, id);
let path = Path::new(path).join(tail);
let accounts = AccountsFile::AppendVec(AppendVec::new(&path, true, file_size as usize));
Expand All @@ -1048,7 +1048,7 @@ impl AccountStorageEntry {

pub fn new_existing(
slot: Slot,
id: AppendVecId,
id: AccountsFileId,
accounts: AccountsFile,
num_accounts: usize,
) -> Self {
Expand Down Expand Up @@ -1115,7 +1115,7 @@ impl AccountStorageEntry {
self.slot
}

pub fn append_vec_id(&self) -> AppendVecId {
pub fn append_vec_id(&self) -> AccountsFileId {
self.id
}

Expand Down Expand Up @@ -1297,7 +1297,7 @@ pub struct AccountsDb {
read_only_accounts_cache: ReadOnlyAccountsCache,

/// distribute the accounts across storage lists
pub next_id: AtomicAppendVecId,
pub next_id: AtomicAccountsFileId,

/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
Expand Down Expand Up @@ -2336,7 +2336,7 @@ impl AccountsDb {
READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
),
uncleaned_pubkeys: DashMap::new(),
next_id: AtomicAppendVecId::new(0),
next_id: AtomicAccountsFileId::new(0),
shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()),
write_cache_limit_bytes: None,
write_version: AtomicU64::new(0),
Expand Down Expand Up @@ -2504,9 +2504,9 @@ impl AccountsDb {
self.base_working_path.clone()
}

fn next_id(&self) -> AppendVecId {
fn next_id(&self) -> AccountsFileId {
let next_id = self.next_id.fetch_add(1, Ordering::AcqRel);
assert!(next_id != AppendVecId::MAX, "We've run out of storage ids!");
assert!(next_id != AccountsFileId::MAX, "We've run out of storage ids!");
next_id
}

Expand Down Expand Up @@ -6322,9 +6322,9 @@ impl AccountsDb {
/// This runs prior to the storages being put in AccountsDb.storage
pub fn combine_multiple_slots_into_one_at_startup(
path: &Path,
id: AppendVecId,
id: AccountsFileId,
slot: Slot,
slot_stores: &HashMap<AppendVecId, Arc<AccountStorageEntry>>,
slot_stores: &HashMap<AccountsFileId, Arc<AccountStorageEntry>>,
) -> Arc<AccountStorageEntry> {
let size = slot_stores.values().map(|storage| storage.capacity()).sum();
let storage = AccountStorageEntry::new(path, slot, id, size);
Expand Down Expand Up @@ -8641,7 +8641,7 @@ impl AccountsDb {
&self,
storage: &Arc<AccountStorageEntry>,
slot: Slot,
store_id: AppendVecId,
store_id: AccountsFileId,
rent_collector: &RentCollector,
storage_info: &StorageSizeAndCountMap,
) -> SlotIndexGenerationInfo {
Expand Down Expand Up @@ -9609,7 +9609,7 @@ pub mod tests {

impl CurrentAncientAppendVec {
/// note this requires that 'slot_and_append_vec' is Some
fn append_vec_id(&self) -> AppendVecId {
fn append_vec_id(&self) -> AccountsFileId {
self.append_vec().append_vec_id()
}
}
Expand Down Expand Up @@ -10782,7 +10782,7 @@ pub mod tests {
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
id: AppendVecId,
id: AccountsFileId,
mark_alive: bool,
account_data_size: Option<u64>,
fill_percentage: u64,
Expand All @@ -10808,7 +10808,7 @@ pub mod tests {
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
id: AppendVecId,
id: AccountsFileId,
mark_alive: bool,
account_data_size: Option<u64>,
) -> Arc<AccountStorageEntry> {
Expand Down Expand Up @@ -13313,7 +13313,7 @@ pub mod tests {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());

// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
Expand All @@ -13340,7 +13340,7 @@ pub mod tests {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());

// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
Expand All @@ -13350,7 +13350,7 @@ pub mod tests {
db.calculate_accounts_delta_hash(slot);
db.add_root_and_flush_write_cache(slot);
// reset next_id to what it was previously to cause us to re-use the same id
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
});
let ancestors = Ancestors::default();
keys.iter().for_each(|key| {
Expand Down Expand Up @@ -17236,7 +17236,7 @@ pub mod tests {
.max()
.unwrap_or(999);
for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) {
let id = starting_id + (i as AppendVecId);
let id = starting_id + (i as AccountsFileId);
let pubkey1 = solana_sdk::pubkey::new_rand();
let storage = sample_storage_with_entries_id_fill_percentage(
tf,
Expand Down Expand Up @@ -17285,7 +17285,7 @@ pub mod tests {
.max()
.unwrap_or(999);
for i in 0..num_slots {
let id = starting_id + (i as AppendVecId);
let id = starting_id + (i as AccountsFileId);
let pubkey1 = solana_sdk::pubkey::new_rand();
let storage = sample_storage_with_entries_id(
tf,
Expand Down
4 changes: 2 additions & 2 deletions accounts-db/src/accounts_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use {
account_storage::meta::{
StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta,
},
accounts_db::AppendVecId,
accounts_db::AccountsFileId,
accounts_hash::AccountHash,
append_vec::{AppendVec, AppendVecError},
storable_accounts::StorableAccounts,
Expand Down Expand Up @@ -104,7 +104,7 @@ impl AccountsFile {
}
}

pub fn file_name(slot: Slot, id: AppendVecId) -> String {
pub fn file_name(slot: Slot, id: AccountsFileId) -> String {
format!("{slot}.{id}")
}

Expand Down
6 changes: 3 additions & 3 deletions accounts-db/src/sorted_storages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ mod tests {
use {
super::*,
crate::{
accounts_db::{AccountStorageEntry, AppendVecId},
accounts_db::{AccountStorageEntry, AccountsFileId},
accounts_file::AccountsFile,
append_vec::AppendVec,
},
Expand Down Expand Up @@ -297,7 +297,7 @@ mod tests {
assert!(
(slot != 2 && slot != 4)
^ storage
.map(|storage| storage.append_vec_id() == (slot as AppendVecId))
.map(|storage| storage.append_vec_id() == (slot as AccountsFileId))
.unwrap_or(false),
"slot: {slot}, storage: {storage:?}"
);
Expand Down Expand Up @@ -440,7 +440,7 @@ mod tests {
);
}

fn create_sample_store(id: AppendVecId) -> Arc<AccountStorageEntry> {
fn create_sample_store(id: AccountsFileId) -> Arc<AccountStorageEntry> {
let tf = crate::append_vec::test_utils::get_append_vec_path("create_sample_store");
let (_temp_dirs, paths) = crate::accounts_db::get_temp_accounts_paths(1).unwrap();
let size: usize = 123;
Expand Down
6 changes: 3 additions & 3 deletions docs/src/implemented-proposals/persistent-account-storage.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ The underlying memory for an AppendVec is a memory-mapped file. Memory-mapped fi
The account index is designed to support a single index for all the currently forked Accounts.

```text
type AppendVecId = usize;
type AccountsFileId = usize;
type Fork = u64;
struct AccountMap(Hashmap<Fork, (AppendVecId, u64)>);
struct AccountMap(Hashmap<Fork, (AccountsFileId, u64)>);
type AccountIndex = HashMap<Pubkey, AccountMap>;
```
Expand All @@ -39,7 +39,7 @@ The index is a map of account Pubkeys to a map of Forks and the location of the
pub fn load_slow(&self, id: Fork, pubkey: &Pubkey) -> Option<&Account>
```

The read is satisfied by pointing to a memory-mapped location in the `AppendVecId` at the stored offset. A reference can be returned without a copy.
The read is satisfied by pointing to a memory-mapped location in the `AccountsFileId` at the stored offset. A reference can be returned without a copy.

### Root Forks

Expand Down
10 changes: 5 additions & 5 deletions runtime/src/bank/serde_snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ mod tests {
snapshot_bank_utils,
snapshot_utils::{
self, create_tmp_accounts_dir_for_tests, get_storages_to_serialize, ArchiveFormat,
StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
StorageAndNextAccountsFileId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
},
status_cache::StatusCache,
},
Expand All @@ -23,7 +23,7 @@ mod tests {
account_storage::{AccountStorageMap, AccountStorageReference},
accounts_db::{
get_temp_accounts_paths, AccountShrinkThreshold, AccountStorageEntry, AccountsDb,
AtomicAppendVecId,
AtomicAccountsFileId,
},
accounts_file::{AccountsFile, AccountsFileError},
accounts_hash::{AccountsDeltaHash, AccountsHash},
Expand Down Expand Up @@ -53,7 +53,7 @@ mod tests {
fn copy_append_vecs<P: AsRef<Path>>(
accounts_db: &AccountsDb,
output_dir: P,
) -> Result<StorageAndNextAppendVecId, AccountsFileError> {
) -> Result<StorageAndNextAccountsFileId, AccountsFileError> {
let storage_entries = accounts_db.get_snapshot_storages(RangeFull).0;
let storage: AccountStorageMap = AccountStorageMap::with_capacity(storage_entries.len());
let mut next_append_vec_id = 0;
Expand Down Expand Up @@ -84,9 +84,9 @@ mod tests {
);
}

Ok(StorageAndNextAppendVecId {
Ok(StorageAndNextAccountsFileId {
storage,
next_append_vec_id: AtomicAppendVecId::new(next_append_vec_id + 1),
next_append_vec_id: AtomicAccountsFileId::new(next_append_vec_id + 1),
})
}

Expand Down
Loading

0 comments on commit 28ede6b

Please sign in to comment.