Skip to content
This repository was archived by the owner on Sep 12, 2018. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ chrono = "0.4"
failure = "0.1.1"
lazy_static = "0.2"
time = "0.1"
log = "0.4"
uuid = { version = "0.5", features = ["v4", "serde"] }

[dependencies.rusqlite]
Expand Down
102 changes: 98 additions & 4 deletions db/src/timelines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,11 @@ fn move_transactions_to(conn: &rusqlite::Connection, tx_ids: &[Entid], new_timel
Ok(())
}

fn remove_tx_from_datoms(conn: &rusqlite::Connection, tx_id: Entid) -> Result<()> {
conn.execute("DELETE FROM datoms WHERE e = ?", &[&tx_id])?;
Ok(())
}

fn is_timeline_empty(conn: &rusqlite::Connection, timeline: Entid) -> Result<bool> {
let mut stmt = conn.prepare("SELECT timeline FROM timelined_transactions WHERE timeline = ? GROUP BY timeline")?;
let rows = stmt.query_and_then(&[&timeline], |row| -> Result<i64> {
Expand Down Expand Up @@ -152,11 +157,22 @@ pub fn move_from_main_timeline(conn: &rusqlite::Connection, schema: &Schema,
let reversed_terms = reversed_terms_for(conn, *tx_id)?;

// Rewind schema and datoms.
let (_, _, new_schema, _) = transact_terms_with_action(
let (report, _, new_schema, _) = transact_terms_with_action(
conn, partition_map.clone(), schema, schema, NullWatcher(),
reversed_terms.into_iter().map(|t| t.rewrap()),
InternSet::new(), TransactorAction::Materialize
)?;

// Rewind operation generated a 'tx' and a 'txInstant' assertion, which got
// inserted into the 'datoms' table (due to TransactorAction::Materialize).
// This is problematic. If we transact a few more times, the transactor will
// generate the same 'tx', but with a different 'txInstant'.
// The end result will be a transaction which has a phantom
// retraction of a txInstant, since transactor operates against the state of
// 'datoms', and not against the 'transactions' table.
// A quick workaround is to just remove the bad txInstant datom.
// See test_clashing_tx_instants test case.
remove_tx_from_datoms(conn, report.tx_id)?;
last_schema = new_schema;
}

Expand Down Expand Up @@ -191,7 +207,7 @@ mod tests {
};
conn.partition_map = pmap.clone();
}

#[test]
fn test_pop_simple() {
let mut conn = TestConn::default();
Expand Down Expand Up @@ -284,7 +300,85 @@ mod tests {
"#);
}


#[test]
fn test_clashing_tx_instants() {
let mut conn = TestConn::default();
conn.sanitized_partition_map();

// Transact a basic schema.
assert_transact!(conn, r#"
[{:db/ident :person/name :db/valueType :db.type/string :db/cardinality :db.cardinality/one :db/unique :db.unique/identity :db/index true}]
"#);

// Make an assertion against our schema.
assert_transact!(conn, r#"[{:person/name "Vanya"}]"#);

// Move that assertion away from the main timeline.
let (new_schema, new_partition_map) = move_from_main_timeline(
&conn.sqlite, &conn.schema, conn.partition_map.clone(),
conn.last_tx_id().., 1
).expect("moved single tx");
update_conn(&mut conn, &new_schema, &new_partition_map);

// Assert that our datoms are now just the schema.
assert_matches!(conn.datoms(), "
[[?e :db/ident :person/name]
[?e :db/valueType :db.type/string]
[?e :db/cardinality :db.cardinality/one]
[?e :db/unique :db.unique/identity]
[?e :db/index true]]");
// Same for transactions.
assert_matches!(conn.transactions(), "
[[[?e :db/ident :person/name ?tx true]
[?e :db/valueType :db.type/string ?tx true]
[?e :db/cardinality :db.cardinality/one ?tx true]
[?e :db/unique :db.unique/identity ?tx true]
[?e :db/index true ?tx true]
[?tx :db/txInstant ?ms ?tx true]]]");

// Re-assert our initial fact against our schema.
assert_transact!(conn, r#"
[[:db/add "tempid" :person/name "Vanya"]]"#);

// Now, change that fact. This is the "clashing" transaction, if we're
// performing a timeline move using the transactor.
assert_transact!(conn, r#"
[[:db/add (lookup-ref :person/name "Vanya") :person/name "Ivan"]]"#);

// Assert that our datoms are now the schema and the final assertion.
assert_matches!(conn.datoms(), r#"
[[?e1 :db/ident :person/name]
[?e1 :db/valueType :db.type/string]
[?e1 :db/cardinality :db.cardinality/one]
[?e1 :db/unique :db.unique/identity]
[?e1 :db/index true]
[?e2 :person/name "Ivan"]]
"#);

// Assert that we have three correct looking transactions.
// This will fail if we're not cleaning up the 'datoms' table
// after the timeline move.
assert_matches!(conn.transactions(), r#"
[[
[?e1 :db/ident :person/name ?tx1 true]
[?e1 :db/valueType :db.type/string ?tx1 true]
[?e1 :db/cardinality :db.cardinality/one ?tx1 true]
[?e1 :db/unique :db.unique/identity ?tx1 true]
[?e1 :db/index true ?tx1 true]
[?tx1 :db/txInstant ?ms1 ?tx1 true]
]
[
[?e2 :person/name "Vanya" ?tx2 true]
[?tx2 :db/txInstant ?ms2 ?tx2 true]
]
[
[?e2 :person/name "Ivan" ?tx3 true]
[?e2 :person/name "Vanya" ?tx3 false]
[?tx3 :db/txInstant ?ms3 ?tx3 true]
]]
"#);
}

#[test]
fn test_pop_schema() {
let mut conn = TestConn::default();
Expand Down Expand Up @@ -432,7 +526,7 @@ mod tests {
assert_matches!(conn.datoms(), "[]");
assert_matches!(conn.transactions(), "[]");
assert_eq!(conn.partition_map, partition_map0);

// Assert all of schema's components individually, for some guidance in case of failures:
assert_eq!(conn.schema.entid_map, schema0.entid_map);
assert_eq!(conn.schema.ident_map, schema0.ident_map);
Expand Down
1 change: 0 additions & 1 deletion ffi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ pub use mentat::{
QueryResults,
RelResult,
Store,
Syncable,
TypedValue,
TxObserver,
TxReport,
Expand Down
11 changes: 10 additions & 1 deletion public-traits/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,12 @@ path = "lib.rs"
[features]
default = ["syncable"]
sqlcipher = ["rusqlite/sqlcipher"]
syncable = ["tolstoy_traits"]
syncable = ["tolstoy_traits", "hyper", "serde_json"]

[dependencies]
failure = "0.1.1"
failure_derive = "0.1.1"
uuid = "0.5"

[dependencies.rusqlite]
version = "0.13"
Expand Down Expand Up @@ -44,3 +45,11 @@ path = "../sql-traits"
[dependencies.tolstoy_traits]
path = "../tolstoy-traits"
optional = true

[dependencies.hyper]
version = "0.11"
optional = true

[dependencies.serde_json]
version = "1.0"
optional = true
60 changes: 57 additions & 3 deletions public-traits/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@
use std; // To refer to std::result::Result.

use std::collections::BTreeSet;
use std::error::Error;

use rusqlite;
use uuid;

use edn;

Expand Down Expand Up @@ -44,6 +46,12 @@ use tolstoy_traits::errors::{
TolstoyError,
};

#[cfg(feature = "syncable")]
use hyper;

#[cfg(feature = "syncable")]
use serde_json;

pub type Result<T> = std::result::Result<T, MentatError>;

#[derive(Debug, Fail)]
Expand Down Expand Up @@ -97,8 +105,8 @@ pub enum MentatError {

// It would be better to capture the underlying `rusqlite::Error`, but that type doesn't
// implement many useful traits, including `Clone`, `Eq`, and `PartialEq`.
#[fail(display = "SQL error: {}", _0)]
RusqliteError(String),
#[fail(display = "SQL error: {}, cause: {}", _0, _1)]
RusqliteError(String, String),

#[fail(display = "{}", _0)]
EdnParseError(#[cause] edn::ParseError),
Expand All @@ -118,9 +126,24 @@ pub enum MentatError {
#[fail(display = "{}", _0)]
SQLError(#[cause] SQLError),

#[fail(display = "{}", _0)]
UuidError(#[cause] uuid::ParseError),

#[cfg(feature = "syncable")]
#[fail(display = "{}", _0)]
TolstoyError(#[cause] TolstoyError),

#[cfg(feature = "syncable")]
#[fail(display = "{}", _0)]
NetworkError(#[cause] hyper::Error),

#[cfg(feature = "syncable")]
#[fail(display = "{}", _0)]
UriError(#[cause] hyper::error::UriError),

#[cfg(feature = "syncable")]
#[fail(display = "{}", _0)]
SerializationError(#[cause] serde_json::Error),
}

impl From<std::io::Error> for MentatError {
Expand All @@ -131,7 +154,17 @@ impl From<std::io::Error> for MentatError {

impl From<rusqlite::Error> for MentatError {
fn from(error: rusqlite::Error) -> MentatError {
MentatError::RusqliteError(error.to_string())
let cause = match error.cause() {
Some(e) => e.to_string(),
None => "".to_string()
};
MentatError::RusqliteError(error.to_string(), cause)
}
}

impl From<uuid::ParseError> for MentatError {
fn from(error: uuid::ParseError) -> MentatError {
MentatError::UuidError(error)
}
}

Expand Down Expand Up @@ -177,3 +210,24 @@ impl From<TolstoyError> for MentatError {
MentatError::TolstoyError(error)
}
}

#[cfg(feature = "syncable")]
impl From<serde_json::Error> for MentatError {
fn from(error: serde_json::Error) -> MentatError {
MentatError::SerializationError(error)
}
}

#[cfg(feature = "syncable")]
impl From<hyper::Error> for MentatError {
fn from(error: hyper::Error) -> MentatError {
MentatError::NetworkError(error)
}
}

#[cfg(feature = "syncable")]
impl From<hyper::error::UriError> for MentatError {
fn from(error: hyper::error::UriError) -> MentatError {
MentatError::UriError(error)
}
}
11 changes: 10 additions & 1 deletion public-traits/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,16 @@ extern crate db_traits;
extern crate query_pull_traits;
extern crate query_projector_traits;
extern crate query_algebrizer_traits;
extern crate tolstoy_traits;
extern crate sql_traits;
extern crate uuid;

#[cfg(feature = "syncable")]
extern crate tolstoy_traits;

#[cfg(feature = "syncable")]
extern crate hyper;

#[cfg(feature = "syncable")]
extern crate serde_json;

pub mod errors;
15 changes: 0 additions & 15 deletions src/conn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,6 @@ pub struct Conn {
pub(crate) tx_observer_service: Mutex<TxObservationService>,
}

pub trait Syncable {
fn sync(&mut self, server_uri: &String, user_uuid: &String) -> Result<()>;
}

impl Conn {
// Intentionally not public.
fn new(partition_map: PartitionMap, schema: Schema) -> Conn {
Expand All @@ -131,17 +127,6 @@ impl Conn {
}
}

/// Prepare the provided SQLite handle for use as a Mentat store. Creates tables but
/// _does not_ write the bootstrap schema. This constructor should only be used by
/// consumers that expect to populate raw transaction data themselves.

pub(crate) fn empty(sqlite: &mut rusqlite::Connection) -> Result<Conn> {
let (tx, db) = db::create_empty_current_version(sqlite)?;
tx.commit()?;
Ok(Conn::new(db.partition_map, db.schema))
}


pub fn connect(sqlite: &mut rusqlite::Connection) -> Result<Conn> {
let db = db::ensure_current_version(sqlite)?;
Ok(Conn::new(db.partition_map, db.schema))
Expand Down
14 changes: 13 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,13 +178,25 @@ pub mod query_builder;
pub mod store;
pub mod vocabulary;

#[cfg(feature = "syncable")]
mod sync;

#[cfg(feature = "syncable")]
pub use sync::{
Syncable,
};

#[cfg(feature = "syncable")]
pub use mentat_tolstoy::{
SyncReport,
};

pub use query_builder::{
QueryBuilder,
};

pub use conn::{
Conn,
Syncable,
};

pub use mentat_transaction::{
Expand Down
Loading