Backup where I am
This commit is contained in:
parent
bd4cda4502
commit
b6e55b40fb
1 changed files with 117 additions and 30 deletions
147
src/store.rs
147
src/store.rs
|
@ -1,89 +1,176 @@
|
||||||
use std::{collections::{HashMap, HashSet}, path::Path, sync::Arc};
|
use std::{
|
||||||
|
collections::{BTreeMap, HashMap, HashSet},
|
||||||
|
path::Path,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::models::{Document, Project};
|
use crate::models::{Document, Project};
|
||||||
|
|
||||||
pub trait Filter = Fn(&Item) -> bool;
|
pub trait Filter = Fn(&Row) -> bool;
|
||||||
pub trait Map<T> = Fn(&Item) -> T;
|
pub trait Map<T> = Fn(&Row) -> T;
|
||||||
|
|
||||||
pub enum Item {
|
pub enum Row {
|
||||||
Project(Box<Project>),
|
Project(Box<Project>),
|
||||||
Document(Box<Document>),
|
Document(Box<Document>),
|
||||||
|
//User(Box<User>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
pub struct ItemId(Uuid);
|
pub struct RowId(Uuid);
|
||||||
|
|
||||||
|
// supports comparison operators
|
||||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
pub struct IndexId(String);
|
pub struct IndexId(String);
|
||||||
|
|
||||||
pub type Index = HashSet<ItemId>;
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
|
pub struct IndexValue(String);
|
||||||
|
|
||||||
|
pub enum Index {
|
||||||
|
BTree(BTreeMap<IndexValue, HashSet<RowId>>),
|
||||||
|
Unique(HashMap<IndexValue, RowId>),
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Store {
|
pub struct Store {
|
||||||
items: HashMap<ItemId, Arc<Item>>,
|
rows: HashMap<RowId, Arc<Row>>,
|
||||||
indexes: HashMap<IndexId, Index>,
|
indexes: HashMap<IndexId, Index>,
|
||||||
|
|
||||||
keyspace: fjall::Keyspace,
|
keyspace: fjall::Keyspace,
|
||||||
persistent_items: fjall::PartitionHandle,
|
data_partition: fjall::PartitionHandle,
|
||||||
|
permissions_partition: fjall::PartitionHandle,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Store {
|
impl Store {
|
||||||
pub fn new<P: AsRef<Path>>(path: P) -> Result<Store> {
|
pub fn new<P: AsRef<Path>>(path: P) -> Result<Store> {
|
||||||
let items = HashMap::new();
|
let rows = HashMap::new();
|
||||||
let indexes = HashMap::new();
|
let indexes = HashMap::new();
|
||||||
|
|
||||||
let kv_config = fjall::Config::new(path)
|
let kv_config = fjall::Config::new(path)
|
||||||
.flush_workers(4)
|
.flush_workers(4)
|
||||||
.compaction_workers(4);
|
.compaction_workers(4);
|
||||||
let keyspace = fjall::Keyspace::open(kv_config)?;
|
let keyspace = fjall::Keyspace::open(kv_config)?;
|
||||||
let persistent_items =
|
let data_partition =
|
||||||
keyspace.open_partition("items", fjall::PartitionCreateOptions::default())?;
|
keyspace.open_partition("data", fjall::PartitionCreateOptions::default())?;
|
||||||
|
let permissions_partition =
|
||||||
|
keyspace.open_partition("permissions", fjall::PartitionCreateOptions::default())?;
|
||||||
|
|
||||||
Ok(Store {
|
Ok(Store {
|
||||||
items,
|
rows,
|
||||||
indexes,
|
indexes,
|
||||||
keyspace,
|
keyspace,
|
||||||
persistent_items,
|
data_partition,
|
||||||
|
permissions_partition,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set(&mut self, _item: &Item) -> Result<()> {
|
pub fn set(&mut self, _item: &Row) -> Result<()> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves an item from the store by id. This is always an in-memory
|
/// Retrieves an item from the store by id. This is always an in-memory
|
||||||
/// operation and cannot fail.
|
/// operation and cannot fail.
|
||||||
pub fn get(&self, id: ItemId) -> Option<Arc<Item>> {
|
pub fn get(&self, id: RowId) -> Option<Arc<Row>> {
|
||||||
self.items.get(&id).cloned()
|
self.rows.get(&id).cloned()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn filter<F: Filter>(&self, _filter: F) -> Result<Vec<ItemId>> {
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
todo!()
|
pub struct Comparison {
|
||||||
|
index: IndexId,
|
||||||
|
value: IndexValue,
|
||||||
|
operator: ComparisonOperator,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn map<T, M: Map<T>>(&self, _transform: M) -> Result<Vec<T>> {
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
|
pub enum ComparisonOperator {
|
||||||
|
Eq,
|
||||||
|
Gt,
|
||||||
|
Gte,
|
||||||
|
Lt,
|
||||||
|
Lte,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||||
|
pub enum QueryOperation {
|
||||||
|
All,
|
||||||
|
Where(Comparison),
|
||||||
|
Limit(usize),
|
||||||
|
Offset(usize),
|
||||||
|
OrderBy(IndexId),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes a set of operations to query for data in the database. To read the
|
||||||
|
/// data, this gets transformed into a QuerySet, which contains the results.
|
||||||
|
pub struct Query {
|
||||||
|
operations: Vec<QueryOperation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Query {
|
||||||
|
pub fn new() -> Query {
|
||||||
|
Query {
|
||||||
|
operations: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn all(&mut self) -> &mut Self {
|
||||||
|
self.operations.push(QueryOperation::All);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filters down the set of rows via comparison operators
|
||||||
|
pub fn restrict(
|
||||||
|
&mut self,
|
||||||
|
index: IndexId,
|
||||||
|
value: IndexValue,
|
||||||
|
operator: ComparisonOperator,
|
||||||
|
) -> &mut Self {
|
||||||
|
self.operations.push(QueryOperation::Where(Comparison {
|
||||||
|
index,
|
||||||
|
value,
|
||||||
|
operator,
|
||||||
|
}));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn limit(&mut self, limit: usize) -> &mut Self {
|
||||||
|
self.operations.push(QueryOperation::Limit(limit));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn offset(&mut self, offset: usize) -> &mut Self {
|
||||||
|
self.operations.push(QueryOperation::Offset(offset));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn order_by(&mut self, index: IndexId) -> &mut Self {
|
||||||
|
self.operations.push(QueryOperation::OrderBy(index));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execute(&self) -> QuerySet {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: can filter and map be done in terms of an iterator?
|
/// The results of a Query, this will contain the concrete Rows which have been
|
||||||
// like, the real Rust iterator trait?
|
/// retrieved.
|
||||||
|
pub struct QuerySet {}
|
||||||
|
|
||||||
|
impl QuerySet {}
|
||||||
pub struct QuerySet {
|
|
||||||
indexes: Vec<Index>, // TODO: but use a ref, once this is figured out
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QuerySet {
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for QuerySet {
|
impl Iterator for QuerySet {
|
||||||
type Item = Item;
|
type Item = Row;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// How I'd like to do queries:
|
||||||
|
//
|
||||||
|
// store.query().via(OnlyProjects).
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
Loading…
Reference in a new issue