Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError
Exception: DatasetGenerationCastError
Message: An error occurred while generating the dataset
All the data files must have the same columns, but at some point there are 1 new columns ({'sha'}) and 3 missing columns ({'masked_commit_message', 'hash', 'is_merge'}).
This happened while the csv dataset builder was generating data using
hf://datasets/rsh-raj/influxdb-commits/validation.csv (at revision 3fc932f142f2ad735c47eb2727c2dae162f9b2bc)
Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Traceback: Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/builder.py", line 1831, in _prepare_split_single
writer.write_table(table)
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/arrow_writer.py", line 714, in write_table
pa_table = table_cast(pa_table, self._schema)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/table.py", line 2272, in table_cast
return cast_table_to_schema(table, schema)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/table.py", line 2218, in cast_table_to_schema
raise CastError(
datasets.table.CastError: Couldn't cast
sha: string
author: string
date: string
commit_message: string
git_diff: string
type: string
-- schema metadata --
pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' + 939
to
{'hash': Value('string'), 'date': Value('string'), 'author': Value('string'), 'commit_message': Value('string'), 'is_merge': Value('bool'), 'masked_commit_message': Value('string'), 'type': Value('string'), 'git_diff': Value('string')}
because column names don't match
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1455, in compute_config_parquet_and_info_response
parquet_operations = convert_to_parquet(builder)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1054, in convert_to_parquet
builder.download_and_prepare(
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/builder.py", line 894, in download_and_prepare
self._download_and_prepare(
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/builder.py", line 970, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/builder.py", line 1702, in _prepare_split
for job_id, done, content in self._prepare_split_single(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/.venv/lib/python3.12/site-packages/datasets/builder.py", line 1833, in _prepare_split_single
raise DatasetGenerationCastError.from_cast_error(
datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset
All the data files must have the same columns, but at some point there are 1 new columns ({'sha'}) and 3 missing columns ({'masked_commit_message', 'hash', 'is_merge'}).
This happened while the csv dataset builder was generating data using
hf://datasets/rsh-raj/influxdb-commits/validation.csv (at revision 3fc932f142f2ad735c47eb2727c2dae162f9b2bc)
Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
hash
string | date
string | author
string | commit_message
string | is_merge
bool | masked_commit_message
string | type
string | git_diff
string |
|---|---|---|---|---|---|---|---|
bfa54033bdc2a1d147f5d4b236867d5c0998518f
|
2022-01-22 02:31:13
|
Paul Dix
|
refactor: Clean up the Catalog API
| false
|
Clean up the Catalog API
|
refactor
|
diff --git a/ingester/src/data.rs b/ingester/src/data.rs
index b838faca54d..35eba458183 100644
--- a/ingester/src/data.rs
+++ b/ingester/src/data.rs
@@ -7,8 +7,8 @@ use uuid::Uuid;
use crate::server::IngesterServer;
use iox_catalog::interface::{
- KafkaPartition, KafkaTopicId, NamespaceId, PartitionId, RepoCollection, SequenceNumber,
- SequencerId, TableId, Tombstone,
+ Catalog, KafkaPartition, KafkaTopicId, NamespaceId, PartitionId, SequenceNumber, SequencerId,
+ TableId, Tombstone,
};
use mutable_batch::MutableBatch;
use parking_lot::RwLock;
@@ -54,11 +54,9 @@ pub struct Sequencers {
impl Sequencers {
/// One time initialize Sequencers of this Ingester
- pub async fn initialize<T: RepoCollection + Send + Sync>(
- ingester: &IngesterServer<'_, T>,
- ) -> Result<Self> {
+ pub async fn initialize<T: Catalog>(ingester: &IngesterServer<'_, T>) -> Result<Self> {
// Get sequencer ids from the catalog
- let sequencer_repro = ingester.iox_catalog.sequencer();
+ let sequencer_repro = ingester.iox_catalog.sequencers();
let mut sequencers = BTreeMap::default();
let topic = ingester.get_topic();
for shard in ingester.get_kafka_partitions() {
diff --git a/ingester/src/server.rs b/ingester/src/server.rs
index 11ce6dc553d..7c48be5389d 100644
--- a/ingester/src/server.rs
+++ b/ingester/src/server.rs
@@ -3,13 +3,13 @@
use std::sync::Arc;
-use iox_catalog::interface::{KafkaPartition, KafkaTopic, KafkaTopicId, RepoCollection};
+use iox_catalog::interface::{Catalog, KafkaPartition, KafkaTopic, KafkaTopicId};
/// The [`IngesterServer`] manages the lifecycle and contains all state for
/// an `ingester` server instance.
pub struct IngesterServer<'a, T>
where
- T: RepoCollection + Send + Sync,
+ T: Catalog,
{
/// Kafka Topic assigned to this ingester
kafka_topic: KafkaTopic,
@@ -21,7 +21,7 @@ where
impl<'a, T> IngesterServer<'a, T>
where
- T: RepoCollection + Send + Sync,
+ T: Catalog,
{
/// Initialize the Ingester
pub fn new(topic: KafkaTopic, shard_ids: Vec<KafkaPartition>, catalog: &'a Arc<T>) -> Self {
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index d72e91a4eee..01431ae9693 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -6,7 +6,6 @@ use snafu::{OptionExt, Snafu};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fmt::Formatter;
-use std::sync::Arc;
use uuid::Uuid;
#[derive(Debug, Snafu)]
@@ -247,32 +246,32 @@ impl std::fmt::Display for ParquetFileId {
}
}
-/// Container that can return repos for each of the catalog data types.
+/// Trait that contains methods for working with the catalog
#[async_trait]
-pub trait RepoCollection {
+pub trait Catalog: Send + Sync {
/// repo for kafka topics
- fn kafka_topic(&self) -> Arc<dyn KafkaTopicRepo + Sync + Send>;
+ fn kafka_topics(&self) -> &dyn KafkaTopicRepo;
/// repo fo rquery pools
- fn query_pool(&self) -> Arc<dyn QueryPoolRepo + Sync + Send>;
+ fn query_pools(&self) -> &dyn QueryPoolRepo;
/// repo for namespaces
- fn namespace(&self) -> Arc<dyn NamespaceRepo + Sync + Send>;
+ fn namespaces(&self) -> &dyn NamespaceRepo;
/// repo for tables
- fn table(&self) -> Arc<dyn TableRepo + Sync + Send>;
+ fn tables(&self) -> &dyn TableRepo;
/// repo for columns
- fn column(&self) -> Arc<dyn ColumnRepo + Sync + Send>;
+ fn columns(&self) -> &dyn ColumnRepo;
/// repo for sequencers
- fn sequencer(&self) -> Arc<dyn SequencerRepo + Sync + Send>;
+ fn sequencers(&self) -> &dyn SequencerRepo;
/// repo for partitions
- fn partition(&self) -> Arc<dyn PartitionRepo + Sync + Send>;
+ fn partitions(&self) -> &dyn PartitionRepo;
/// repo for tombstones
- fn tombstone(&self) -> Arc<dyn TombstoneRepo + Sync + Send>;
+ fn tombstones(&self) -> &dyn TombstoneRepo;
/// repo for parquet_files
- fn parquet_file(&self) -> Arc<dyn ParquetFileRepo + Sync + Send>;
+ fn parquet_files(&self) -> &dyn ParquetFileRepo;
}
/// Functions for working with Kafka topics in the catalog.
#[async_trait]
-pub trait KafkaTopicRepo {
+pub trait KafkaTopicRepo: Send + Sync {
/// Creates the kafka topic in the catalog or gets the existing record by name.
async fn create_or_get(&self, name: &str) -> Result<KafkaTopic>;
@@ -282,14 +281,14 @@ pub trait KafkaTopicRepo {
/// Functions for working with query pools in the catalog.
#[async_trait]
-pub trait QueryPoolRepo {
+pub trait QueryPoolRepo: Send + Sync {
/// Creates the query pool in the catalog or gets the existing record by name.
async fn create_or_get(&self, name: &str) -> Result<QueryPool>;
}
/// Functions for working with namespaces in the catalog
#[async_trait]
-pub trait NamespaceRepo {
+pub trait NamespaceRepo: Send + Sync {
/// Creates the namespace in the catalog. If one by the same name already exists, an
/// error is returned.
async fn create(
@@ -306,7 +305,7 @@ pub trait NamespaceRepo {
/// Functions for working with tables in the catalog
#[async_trait]
-pub trait TableRepo {
+pub trait TableRepo: Send + Sync {
/// Creates the table in the catalog or get the existing record by name.
async fn create_or_get(&self, name: &str, namespace_id: NamespaceId) -> Result<Table>;
@@ -316,7 +315,7 @@ pub trait TableRepo {
/// Functions for working with columns in the catalog
#[async_trait]
-pub trait ColumnRepo {
+pub trait ColumnRepo: Send + Sync {
/// Creates the column in the catalog or returns the existing column. Will return a
/// `Error::ColumnTypeMismatch` if the existing column type doesn't match the type
/// the caller is attempting to create.
@@ -333,7 +332,7 @@ pub trait ColumnRepo {
/// Functions for working with sequencers in the catalog
#[async_trait]
-pub trait SequencerRepo {
+pub trait SequencerRepo: Send + Sync {
/// create a sequencer record for the kafka topic and partition or return the existing record
async fn create_or_get(
&self,
@@ -358,7 +357,7 @@ pub trait SequencerRepo {
/// Functions for working with IOx partitions in the catalog. Note that these are how
/// IOx splits up data within a database, which is differenet than Kafka partitions.
#[async_trait]
-pub trait PartitionRepo {
+pub trait PartitionRepo: Send + Sync {
/// create or get a partition record for the given partition key, sequencer and table
async fn create_or_get(
&self,
@@ -373,7 +372,7 @@ pub trait PartitionRepo {
/// Functions for working with tombstones in the catalog
#[async_trait]
-pub trait TombstoneRepo {
+pub trait TombstoneRepo: Send + Sync {
/// create or get a tombstone
async fn create_or_get(
&self,
@@ -397,7 +396,7 @@ pub trait TombstoneRepo {
/// Functions for working with parquet file pointers in the catalog
#[async_trait]
-pub trait ParquetFileRepo {
+pub trait ParquetFileRepo: Send + Sync {
/// create the parquet file
#[allow(clippy::too_many_arguments)]
async fn create(
@@ -519,22 +518,19 @@ impl NamespaceSchema {
}
/// Gets the namespace schema including all tables and columns.
-pub async fn get_schema_by_name<T: RepoCollection + Send + Sync>(
+pub async fn get_schema_by_name(
name: &str,
- repo: &T,
+ catalog: &dyn Catalog,
) -> Result<Option<NamespaceSchema>> {
- let namespace_repo = repo.namespace();
- let table_repo = repo.table();
- let column_repo = repo.column();
-
- let namespace = namespace_repo
+ let namespace = catalog
+ .namespaces()
.get_by_name(name)
.await?
.context(NamespaceNotFoundSnafu { name })?;
// get the columns first just in case someone else is creating schema while we're doing this.
- let columns = column_repo.list_by_namespace_id(namespace.id).await?;
- let tables = table_repo.list_by_namespace_id(namespace.id).await?;
+ let columns = catalog.columns().list_by_namespace_id(namespace.id).await?;
+ let tables = catalog.tables().list_by_namespace_id(namespace.id).await?;
let mut namespace = NamespaceSchema::new(
namespace.id,
@@ -813,25 +809,22 @@ pub struct ParquetFile {
pub(crate) mod test_helpers {
use super::*;
use futures::{stream::FuturesOrdered, StreamExt};
-
- pub(crate) async fn test_repo<T, F>(new_repo: F)
- where
- T: RepoCollection + Send + Sync,
- F: Fn() -> T + Send + Sync,
- {
- test_kafka_topic(&new_repo()).await;
- test_query_pool(&new_repo()).await;
- test_namespace(&new_repo()).await;
- test_table(&new_repo()).await;
- test_column(&new_repo()).await;
- test_sequencer(&new_repo()).await;
- test_partition(&new_repo()).await;
- test_tombstone(&new_repo()).await;
- test_parquet_file(&new_repo()).await;
+ use std::sync::Arc;
+
+ pub(crate) async fn test_catalog(catalog: Arc<dyn Catalog>) {
+ test_kafka_topic(Arc::clone(&catalog)).await;
+ test_query_pool(Arc::clone(&catalog)).await;
+ test_namespace(Arc::clone(&catalog)).await;
+ test_table(Arc::clone(&catalog)).await;
+ test_column(Arc::clone(&catalog)).await;
+ test_sequencer(Arc::clone(&catalog)).await;
+ test_partition(Arc::clone(&catalog)).await;
+ test_tombstone(Arc::clone(&catalog)).await;
+ test_parquet_file(Arc::clone(&catalog)).await;
}
- async fn test_kafka_topic<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka_repo = repo.kafka_topic();
+ async fn test_kafka_topic(catalog: Arc<dyn Catalog>) {
+ let kafka_repo = catalog.kafka_topics();
let k = kafka_repo.create_or_get("foo").await.unwrap();
assert!(k.id > KafkaTopicId::new(0));
assert_eq!(k.name, "foo");
@@ -843,8 +836,8 @@ pub(crate) mod test_helpers {
assert!(k3.is_none());
}
- async fn test_query_pool<T: RepoCollection + Send + Sync>(repo: &T) {
- let query_repo = repo.query_pool();
+ async fn test_query_pool(catalog: Arc<dyn Catalog>) {
+ let query_repo = catalog.query_pools();
let q = query_repo.create_or_get("foo").await.unwrap();
assert!(q.id > QueryPoolId::new(0));
assert_eq!(q.name, "foo");
@@ -852,10 +845,10 @@ pub(crate) mod test_helpers {
assert_eq!(q, q2);
}
- async fn test_namespace<T: RepoCollection + Send + Sync>(repo: &T) {
- let namespace_repo = repo.namespace();
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
+ async fn test_namespace(catalog: Arc<dyn Catalog>) {
+ let namespace_repo = catalog.namespaces();
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
let namespace_name = "test_namespace";
let namespace = namespace_repo
@@ -881,53 +874,75 @@ pub(crate) mod test_helpers {
assert_eq!(namespace, found);
}
- async fn test_table<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
- let namespace = repo
- .namespace()
+ async fn test_table(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
+ let namespace = catalog
+ .namespaces()
.create("namespace_table_test", "inf", kafka.id, pool.id)
.await
.unwrap();
// test we can create or get a table
- let table_repo = repo.table();
- let t = table_repo
+ let t = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
- let tt = table_repo
+ let tt = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
assert!(t.id > TableId::new(0));
assert_eq!(t, tt);
- let tables = table_repo.list_by_namespace_id(namespace.id).await.unwrap();
+ let tables = catalog
+ .tables()
+ .list_by_namespace_id(namespace.id)
+ .await
+ .unwrap();
assert_eq!(vec![t], tables);
+
+ // test we can create a table of the same name in a different namespace
+ let namespace2 = catalog
+ .namespaces()
+ .create("two", "inf", kafka.id, pool.id)
+ .await
+ .unwrap();
+ assert_ne!(namespace, namespace2);
+ let test_table = catalog
+ .tables()
+ .create_or_get("test_table", namespace2.id)
+ .await
+ .unwrap();
+ assert_ne!(tt, test_table);
+ assert_eq!(test_table.namespace_id, namespace2.id)
}
- async fn test_column<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
- let namespace = repo
- .namespace()
+ async fn test_column(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
+ let namespace = catalog
+ .namespaces()
.create("namespace_column_test", "inf", kafka.id, pool.id)
.await
.unwrap();
- let table = repo
- .table()
+ let table = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
+ assert_eq!(table.namespace_id, namespace.id);
// test we can create or get a column
- let column_repo = repo.column();
- let c = column_repo
+ let c = catalog
+ .columns()
.create_or_get("column_test", table.id, ColumnType::Tag)
.await
.unwrap();
- let cc = column_repo
+ let cc = catalog
+ .columns()
.create_or_get("column_test", table.id, ColumnType::Tag)
.await
.unwrap();
@@ -935,7 +950,8 @@ pub(crate) mod test_helpers {
assert_eq!(c, cc);
// test that attempting to create an already defined column of a different type returns error
- let err = column_repo
+ let err = catalog
+ .columns()
.create_or_get("column_test", table.id, ColumnType::U64)
.await
.expect_err("should error with wrong column type");
@@ -949,35 +965,40 @@ pub(crate) mod test_helpers {
));
// test that we can create a column of the same name under a different table
- let table2 = repo
- .table()
+ let table2 = catalog
+ .tables()
.create_or_get("test_table_2", namespace.id)
.await
.unwrap();
- let ccc = column_repo
+ let ccc = catalog
+ .columns()
.create_or_get("column_test", table2.id, ColumnType::U64)
.await
.unwrap();
assert_ne!(c, ccc);
- let columns = column_repo
+ let columns = catalog
+ .columns()
.list_by_namespace_id(namespace.id)
.await
.unwrap();
assert_eq!(vec![c, ccc], columns);
}
- async fn test_sequencer<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo
- .kafka_topic()
+ async fn test_sequencer(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog
+ .kafka_topics()
.create_or_get("sequencer_test")
.await
.unwrap();
- let sequencer_repo = repo.sequencer();
// Create 10 sequencers
let created = (1..=10)
- .map(|partition| sequencer_repo.create_or_get(&kafka, KafkaPartition::new(partition)))
+ .map(|partition| {
+ catalog
+ .sequencers()
+ .create_or_get(&kafka, KafkaPartition::new(partition))
+ })
.collect::<FuturesOrdered<_>>()
.map(|v| {
let v = v.expect("failed to create sequencer");
@@ -987,7 +1008,8 @@ pub(crate) mod test_helpers {
.await;
// List them and assert they match
- let listed = sequencer_repo
+ let listed = catalog
+ .sequencers()
.list_by_kafka_topic(&kafka)
.await
.expect("failed to list sequencers")
@@ -999,7 +1021,8 @@ pub(crate) mod test_helpers {
// get by the sequencer id and partition
let kafka_partition = KafkaPartition::new(1);
- let sequencer = sequencer_repo
+ let sequencer = catalog
+ .sequencers()
.get_by_topic_id_and_partition(kafka.id, kafka_partition)
.await
.unwrap()
@@ -1007,42 +1030,45 @@ pub(crate) mod test_helpers {
assert_eq!(kafka.id, sequencer.kafka_topic_id);
assert_eq!(kafka_partition, sequencer.kafka_partition);
- let sequencer = sequencer_repo
+ let sequencer = catalog
+ .sequencers()
.get_by_topic_id_and_partition(kafka.id, KafkaPartition::new(523))
.await
.unwrap();
assert!(sequencer.is_none());
}
- async fn test_partition<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
- let namespace = repo
- .namespace()
+ async fn test_partition(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
+ let namespace = catalog
+ .namespaces()
.create("namespace_partition_test", "inf", kafka.id, pool.id)
.await
.unwrap();
- let table = repo
- .table()
+ let table = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
- let sequencer = repo
- .sequencer()
+ let sequencer = catalog
+ .sequencers()
.create_or_get(&kafka, KafkaPartition::new(1))
.await
.unwrap();
- let other_sequencer = repo
- .sequencer()
+ let other_sequencer = catalog
+ .sequencers()
.create_or_get(&kafka, KafkaPartition::new(2))
.await
.unwrap();
- let partition_repo = repo.partition();
-
let created = ["foo", "bar"]
.iter()
- .map(|key| partition_repo.create_or_get(key, sequencer.id, table.id))
+ .map(|key| {
+ catalog
+ .partitions()
+ .create_or_get(key, sequencer.id, table.id)
+ })
.collect::<FuturesOrdered<_>>()
.map(|v| {
let v = v.expect("failed to create partition");
@@ -1050,13 +1076,15 @@ pub(crate) mod test_helpers {
})
.collect::<BTreeMap<_, _>>()
.await;
- let _ = partition_repo
+ let _ = catalog
+ .partitions()
.create_or_get("asdf", other_sequencer.id, table.id)
.await
.unwrap();
// List them and assert they match
- let listed = partition_repo
+ let listed = catalog
+ .partitions()
.list_by_sequencer(sequencer.id)
.await
.expect("failed to list partitions")
@@ -1067,34 +1095,34 @@ pub(crate) mod test_helpers {
assert_eq!(created, listed);
}
- async fn test_tombstone<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
- let namespace = repo
- .namespace()
+ async fn test_tombstone(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
+ let namespace = catalog
+ .namespaces()
.create("namespace_tombstone_test", "inf", kafka.id, pool.id)
.await
.unwrap();
- let table = repo
- .table()
+ let table = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
- let other_table = repo
- .table()
+ let other_table = catalog
+ .tables()
.create_or_get("other", namespace.id)
.await
.unwrap();
- let sequencer = repo
- .sequencer()
+ let sequencer = catalog
+ .sequencers()
.create_or_get(&kafka, KafkaPartition::new(1))
.await
.unwrap();
- let tombstone_repo = repo.tombstone();
let min_time = Timestamp::new(1);
let max_time = Timestamp::new(10);
- let t1 = tombstone_repo
+ let t1 = catalog
+ .tombstones()
.create_or_get(
table.id,
sequencer.id,
@@ -1111,7 +1139,8 @@ pub(crate) mod test_helpers {
assert_eq!(t1.min_time, min_time);
assert_eq!(t1.max_time, max_time);
assert_eq!(t1.serialized_predicate, "whatevs");
- let t2 = tombstone_repo
+ let t2 = catalog
+ .tombstones()
.create_or_get(
other_table.id,
sequencer.id,
@@ -1122,7 +1151,8 @@ pub(crate) mod test_helpers {
)
.await
.unwrap();
- let t3 = tombstone_repo
+ let t3 = catalog
+ .tombstones()
.create_or_get(
table.id,
sequencer.id,
@@ -1134,43 +1164,44 @@ pub(crate) mod test_helpers {
.await
.unwrap();
- let listed = tombstone_repo
+ let listed = catalog
+ .tombstones()
.list_tombstones_by_sequencer_greater_than(sequencer.id, SequenceNumber::new(1))
.await
.unwrap();
assert_eq!(vec![t2, t3], listed);
}
- async fn test_parquet_file<T: RepoCollection + Send + Sync>(repo: &T) {
- let kafka = repo.kafka_topic().create_or_get("foo").await.unwrap();
- let pool = repo.query_pool().create_or_get("foo").await.unwrap();
- let namespace = repo
- .namespace()
+ async fn test_parquet_file(catalog: Arc<dyn Catalog>) {
+ let kafka = catalog.kafka_topics().create_or_get("foo").await.unwrap();
+ let pool = catalog.query_pools().create_or_get("foo").await.unwrap();
+ let namespace = catalog
+ .namespaces()
.create("namespace_parquet_file_test", "inf", kafka.id, pool.id)
.await
.unwrap();
- let table = repo
- .table()
+ let table = catalog
+ .tables()
.create_or_get("test_table", namespace.id)
.await
.unwrap();
- let other_table = repo
- .table()
+ let other_table = catalog
+ .tables()
.create_or_get("other", namespace.id)
.await
.unwrap();
- let sequencer = repo
- .sequencer()
+ let sequencer = catalog
+ .sequencers()
.create_or_get(&kafka, KafkaPartition::new(1))
.await
.unwrap();
- let partition = repo
- .partition()
+ let partition = catalog
+ .partitions()
.create_or_get("one", sequencer.id, table.id)
.await
.unwrap();
- let other_partition = repo
- .partition()
+ let other_partition = catalog
+ .partitions()
.create_or_get("one", sequencer.id, other_table.id)
.await
.unwrap();
@@ -1178,7 +1209,7 @@ pub(crate) mod test_helpers {
let min_time = Timestamp::new(1);
let max_time = Timestamp::new(10);
- let parquet_repo = repo.parquet_file();
+ let parquet_repo = catalog.parquet_files();
let parquet_file = parquet_repo
.create(
sequencer.id,
diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs
index a23de38af4f..3c5c5f23566 100644
--- a/iox_catalog/src/lib.rs
+++ b/iox_catalog/src/lib.rs
@@ -12,8 +12,8 @@
)]
use crate::interface::{
- column_type_from_field, ColumnSchema, ColumnType, Error, KafkaPartition, KafkaTopic,
- NamespaceSchema, QueryPool, RepoCollection, Result, Sequencer, SequencerId, TableId,
+ column_type_from_field, Catalog, ColumnSchema, ColumnType, Error, KafkaPartition, KafkaTopic,
+ NamespaceSchema, QueryPool, Result, Sequencer, SequencerId, TableId,
};
use futures::{stream::FuturesOrdered, StreamExt};
use influxdb_line_protocol::ParsedLine;
@@ -36,10 +36,10 @@ pub mod postgres;
/// If another writer attempts to create a column of the same name with a different
/// type at the same time and beats this caller to it, an error will be returned. If another
/// writer adds the same schema before this one, then this will load that schema here.
-pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
+pub async fn validate_or_insert_schema(
lines: Vec<ParsedLine<'_>>,
schema: &NamespaceSchema,
- repo: &T,
+ catalog: &dyn Catalog,
) -> Result<Option<NamespaceSchema>> {
// table name to table_id
let mut new_tables: BTreeMap<String, TableId> = BTreeMap::new();
@@ -66,8 +66,8 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
None => {
let entry = new_columns.entry(table.id).or_default();
if entry.get(key.as_str()).is_none() {
- let column_repo = repo.column();
- let column = column_repo
+ let column = catalog
+ .columns()
.create_or_get(key.as_str(), table.id, ColumnType::Tag)
.await?;
entry.insert(
@@ -97,8 +97,8 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
let entry = new_columns.entry(table.id).or_default();
if entry.get(key.as_str()).is_none() {
let data_type = column_type_from_field(value);
- let column_repo = repo.column();
- let column = column_repo
+ let column = catalog
+ .columns()
.create_or_get(key.as_str(), table.id, data_type)
.await?;
entry.insert(
@@ -113,15 +113,16 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
}
}
None => {
- let table_repo = repo.table();
- let new_table = table_repo.create_or_get(table_name, schema.id).await?;
+ let new_table = catalog
+ .tables()
+ .create_or_get(table_name, schema.id)
+ .await?;
let new_table_columns = new_columns.entry(new_table.id).or_default();
- let column_repo = repo.column();
-
if let Some(tagset) = &line.series.tag_set {
for (key, _) in tagset {
- let new_column = column_repo
+ let new_column = catalog
+ .columns()
.create_or_get(key.as_str(), new_table.id, ColumnType::Tag)
.await?;
new_table_columns.insert(
@@ -135,7 +136,8 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
}
for (key, value) in &line.field_set {
let data_type = column_type_from_field(value);
- let new_column = column_repo
+ let new_column = catalog
+ .columns()
.create_or_get(key.as_str(), new_table.id, data_type)
.await?;
new_table_columns.insert(
@@ -146,7 +148,8 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
},
);
}
- let time_column = column_repo
+ let time_column = catalog
+ .columns()
.create_or_get(TIME_COLUMN, new_table.id, ColumnType::Time)
.await?;
new_table_columns.insert(
@@ -173,19 +176,25 @@ pub async fn validate_or_insert_schema<T: RepoCollection + Sync + Send>(
/// Creates or gets records in the catalog for the shared kafka topic, query pool, and sequencers for
/// each of the partitions.
-pub async fn create_or_get_default_records<T: RepoCollection + Sync + Send>(
+pub async fn create_or_get_default_records(
kafka_partition_count: i32,
- repo: &T,
+ catalog: &dyn Catalog,
) -> Result<(KafkaTopic, QueryPool, BTreeMap<SequencerId, Sequencer>)> {
- let kafka_repo = repo.kafka_topic();
- let query_repo = repo.query_pool();
- let sequencer_repo = repo.sequencer();
-
- let kafka_topic = kafka_repo.create_or_get(SHARED_KAFKA_TOPIC).await?;
- let query_pool = query_repo.create_or_get(SHARED_QUERY_POOL).await?;
+ let kafka_topic = catalog
+ .kafka_topics()
+ .create_or_get(SHARED_KAFKA_TOPIC)
+ .await?;
+ let query_pool = catalog
+ .query_pools()
+ .create_or_get(SHARED_QUERY_POOL)
+ .await?;
let sequencers = (1..=kafka_partition_count)
- .map(|partition| sequencer_repo.create_or_get(&kafka_topic, KafkaPartition::new(partition)))
+ .map(|partition| {
+ catalog
+ .sequencers()
+ .create_or_get(&kafka_topic, KafkaPartition::new(partition))
+ })
.collect::<FuturesOrdered<_>>()
.map(|v| {
let v = v.expect("failed to create sequencer");
@@ -207,13 +216,13 @@ mod tests {
#[tokio::test]
async fn test_validate_or_insert_schema() {
- let repo = Arc::new(MemCatalog::new());
+ let repo = MemCatalog::new();
let (kafka_topic, query_pool, _) = create_or_get_default_records(2, &repo).await.unwrap();
let namespace_name = "validate_schema";
// now test with a new namespace
let namespace = repo
- .namespace()
+ .namespaces()
.create(namespace_name, "inf", kafka_topic.id, query_pool.id)
.await
.unwrap();
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index c4cf0333b13..b5e634ea815 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -2,16 +2,16 @@
//! used for testing or for an IOx designed to run without catalog persistence.
use crate::interface::{
- Column, ColumnId, ColumnRepo, ColumnType, Error, KafkaPartition, KafkaTopic, KafkaTopicId,
- KafkaTopicRepo, Namespace, NamespaceId, NamespaceRepo, ParquetFile, ParquetFileId,
- ParquetFileRepo, Partition, PartitionId, PartitionRepo, QueryPool, QueryPoolId, QueryPoolRepo,
- RepoCollection, Result, SequenceNumber, Sequencer, SequencerId, SequencerRepo, Table, TableId,
+ Catalog, Column, ColumnId, ColumnRepo, ColumnType, Error, KafkaPartition, KafkaTopic,
+ KafkaTopicId, KafkaTopicRepo, Namespace, NamespaceId, NamespaceRepo, ParquetFile,
+ ParquetFileId, ParquetFileRepo, Partition, PartitionId, PartitionRepo, QueryPool, QueryPoolId,
+ QueryPoolRepo, Result, SequenceNumber, Sequencer, SequencerId, SequencerRepo, Table, TableId,
TableRepo, Timestamp, Tombstone, TombstoneId, TombstoneRepo,
};
use async_trait::async_trait;
use std::convert::TryFrom;
use std::fmt::Formatter;
-use std::sync::{Arc, Mutex};
+use std::sync::Mutex;
use uuid::Uuid;
/// In-memory catalog that implements the `RepoCollection` and individual repo traits from
@@ -48,41 +48,41 @@ struct MemCollections {
parquet_files: Vec<ParquetFile>,
}
-impl RepoCollection for Arc<MemCatalog> {
- fn kafka_topic(&self) -> Arc<dyn KafkaTopicRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn KafkaTopicRepo + Sync + Send>
+impl Catalog for MemCatalog {
+ fn kafka_topics(&self) -> &dyn KafkaTopicRepo {
+ self
}
- fn query_pool(&self) -> Arc<dyn QueryPoolRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn QueryPoolRepo + Sync + Send>
+ fn query_pools(&self) -> &dyn QueryPoolRepo {
+ self
}
- fn namespace(&self) -> Arc<dyn NamespaceRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn NamespaceRepo + Sync + Send>
+ fn namespaces(&self) -> &dyn NamespaceRepo {
+ self
}
- fn table(&self) -> Arc<dyn TableRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn TableRepo + Sync + Send>
+ fn tables(&self) -> &dyn TableRepo {
+ self
}
- fn column(&self) -> Arc<dyn ColumnRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn ColumnRepo + Sync + Send>
+ fn columns(&self) -> &dyn ColumnRepo {
+ self
}
- fn sequencer(&self) -> Arc<dyn SequencerRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn SequencerRepo + Sync + Send>
+ fn sequencers(&self) -> &dyn SequencerRepo {
+ self
}
- fn partition(&self) -> Arc<dyn PartitionRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn PartitionRepo + Sync + Send>
+ fn partitions(&self) -> &dyn PartitionRepo {
+ self
}
- fn tombstone(&self) -> Arc<dyn TombstoneRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn TombstoneRepo + Sync + Send>
+ fn tombstones(&self) -> &dyn TombstoneRepo {
+ self
}
- fn parquet_file(&self) -> Arc<dyn ParquetFileRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn ParquetFileRepo + Sync + Send>
+ fn parquet_files(&self) -> &dyn ParquetFileRepo {
+ self
}
}
@@ -180,7 +180,11 @@ impl TableRepo for MemCatalog {
async fn create_or_get(&self, name: &str, namespace_id: NamespaceId) -> Result<Table> {
let mut collections = self.collections.lock().expect("mutex poisoned");
- let table = match collections.tables.iter().find(|t| t.name == name) {
+ let table = match collections
+ .tables
+ .iter()
+ .find(|t| t.name == name && t.namespace_id == namespace_id)
+ {
Some(t) => t,
None => {
let table = Table {
@@ -250,18 +254,22 @@ impl ColumnRepo for MemCatalog {
}
async fn list_by_namespace_id(&self, namespace_id: NamespaceId) -> Result<Vec<Column>> {
- let mut columns = vec![];
-
let collections = self.collections.lock().expect("mutex poisoned");
- for t in collections
+
+ let table_ids: Vec<_> = collections
.tables
.iter()
.filter(|t| t.namespace_id == namespace_id)
- {
- for c in collections.columns.iter().filter(|c| c.table_id == t.id) {
- columns.push(c.clone());
- }
- }
+ .map(|t| t.id)
+ .collect();
+ println!("tables: {:?}", collections.tables);
+ println!("table_ids: {:?}", table_ids);
+ let columns: Vec<_> = collections
+ .columns
+ .iter()
+ .filter(|c| table_ids.contains(&c.table_id))
+ .cloned()
+ .collect();
Ok(columns)
}
@@ -488,11 +496,10 @@ impl ParquetFileRepo for MemCatalog {
#[cfg(test)]
mod tests {
use super::*;
+ use std::sync::Arc;
#[tokio::test]
- async fn test_mem_repo() {
- let f = || Arc::new(MemCatalog::new());
-
- crate::interface::test_helpers::test_repo(f).await;
+ async fn test_catalog() {
+ crate::interface::test_helpers::test_catalog(Arc::new(MemCatalog::new())).await;
}
}
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 2b052a9738e..4fc600ea56d 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -1,16 +1,15 @@
//! A Postgres backed implementation of the Catalog
use crate::interface::{
- Column, ColumnRepo, ColumnType, Error, KafkaPartition, KafkaTopic, KafkaTopicId,
+ Catalog, Column, ColumnRepo, ColumnType, Error, KafkaPartition, KafkaTopic, KafkaTopicId,
KafkaTopicRepo, Namespace, NamespaceId, NamespaceRepo, ParquetFile, ParquetFileId,
ParquetFileRepo, Partition, PartitionId, PartitionRepo, QueryPool, QueryPoolId, QueryPoolRepo,
- RepoCollection, Result, SequenceNumber, Sequencer, SequencerId, SequencerRepo, Table, TableId,
- TableRepo, Timestamp, Tombstone, TombstoneRepo,
+ Result, SequenceNumber, Sequencer, SequencerId, SequencerRepo, Table, TableId, TableRepo,
+ Timestamp, Tombstone, TombstoneRepo,
};
use async_trait::async_trait;
use observability_deps::tracing::info;
use sqlx::{postgres::PgPoolOptions, Executor, Pool, Postgres};
-use std::sync::Arc;
use std::time::Duration;
use uuid::Uuid;
@@ -62,41 +61,41 @@ impl PostgresCatalog {
}
}
-impl RepoCollection for Arc<PostgresCatalog> {
- fn kafka_topic(&self) -> Arc<dyn KafkaTopicRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn KafkaTopicRepo + Sync + Send>
+impl Catalog for PostgresCatalog {
+ fn kafka_topics(&self) -> &dyn KafkaTopicRepo {
+ self
}
- fn query_pool(&self) -> Arc<dyn QueryPoolRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn QueryPoolRepo + Sync + Send>
+ fn query_pools(&self) -> &dyn QueryPoolRepo {
+ self
}
- fn namespace(&self) -> Arc<dyn NamespaceRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn NamespaceRepo + Sync + Send>
+ fn namespaces(&self) -> &dyn NamespaceRepo {
+ self
}
- fn table(&self) -> Arc<dyn TableRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn TableRepo + Sync + Send>
+ fn tables(&self) -> &dyn TableRepo {
+ self
}
- fn column(&self) -> Arc<dyn ColumnRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn ColumnRepo + Sync + Send>
+ fn columns(&self) -> &dyn ColumnRepo {
+ self
}
- fn sequencer(&self) -> Arc<dyn SequencerRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn SequencerRepo + Sync + Send>
+ fn sequencers(&self) -> &dyn SequencerRepo {
+ self
}
- fn partition(&self) -> Arc<dyn PartitionRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn PartitionRepo + Sync + Send>
+ fn partitions(&self) -> &dyn PartitionRepo {
+ self
}
- fn tombstone(&self) -> Arc<dyn TombstoneRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn TombstoneRepo + Sync + Send>
+ fn tombstones(&self) -> &dyn TombstoneRepo {
+ self
}
- fn parquet_file(&self) -> Arc<dyn ParquetFileRepo + Sync + Send> {
- Self::clone(self) as Arc<dyn ParquetFileRepo + Sync + Send>
+ fn parquet_files(&self) -> &dyn ParquetFileRepo {
+ self
}
}
@@ -586,6 +585,7 @@ fn is_fk_violation(e: &sqlx::Error) -> bool {
mod tests {
use super::*;
use std::env;
+ use std::sync::Arc;
// Helper macro to skip tests if TEST_INTEGRATION and the AWS environment variables are not set.
macro_rules! maybe_skip_integration {
@@ -624,17 +624,15 @@ mod tests {
}};
}
- async fn setup_db() -> Arc<PostgresCatalog> {
+ async fn setup_db() -> PostgresCatalog {
let dsn = std::env::var("DATABASE_URL").unwrap();
- Arc::new(
- PostgresCatalog::connect("test", SCHEMA_NAME, &dsn)
- .await
- .unwrap(),
- )
+ PostgresCatalog::connect("test", SCHEMA_NAME, &dsn)
+ .await
+ .unwrap()
}
#[tokio::test]
- async fn test_repo() {
+ async fn test_catalog() {
// If running an integration test on your laptop, this requires that you have Postgres
// running and that you've done the sqlx migrations. See the README in this crate for
// info to set it up.
@@ -642,10 +640,9 @@ mod tests {
let postgres = setup_db().await;
clear_schema(&postgres.pool).await;
+ let postgres: Arc<dyn Catalog> = Arc::new(postgres);
- let f = || Arc::clone(&postgres);
-
- crate::interface::test_helpers::test_repo(f).await;
+ crate::interface::test_helpers::test_catalog(postgres).await;
}
async fn clear_schema(pool: &Pool<Postgres>) {
|
d9ce92dad19cae62350db736b0c83aa5463b85a2
|
2023-06-27 20:05:04
|
Marco Neumann
|
fix: do not override all rustflags in circleci (#8061)
| false
|
do not override all rustflags in circleci (#8061)
|
fix
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 93d6d22856a..5a2a6c71ef5 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -86,7 +86,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
steps:
@@ -103,7 +103,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
steps:
@@ -126,7 +126,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
steps:
@@ -147,7 +147,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
# Turn warnings into errors
@@ -217,7 +217,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
RUST_BACKTRACE: "1"
@@ -257,7 +257,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
RUST_BACKTRACE: "1"
@@ -279,7 +279,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
RUST_BACKTRACE: "1"
@@ -303,7 +303,7 @@ jobs:
CARGO_INCREMENTAL: "0"
# Disable full debug symbol generation to speed up CI build
# "1" means line tables only, which is useful for panic tracebacks.
- RUSTFLAGS: "-C debuginfo=1"
+ CARGO_PROFILE_DEV_DEBUG: "1"
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
# The `2xlarge` resource class that we use has 32GB RAM but also 16 CPUs. This means we have 2GB RAM per core on
|
d3dd440c5fb0edc8259f6e6c265968a470c170cd
|
2020-11-03 23:07:50
|
Dan Moran
|
test: fix expected RP in existing test.
| false
|
fix expected RP in existing test.
|
test
|
diff --git a/tenant/service_onboarding_test.go b/tenant/service_onboarding_test.go
index d28b59c075d..cfbdb1b7562 100644
--- a/tenant/service_onboarding_test.go
+++ b/tenant/service_onboarding_test.go
@@ -182,4 +182,4 @@ func TestOnboardService_RetentionPolicy(t *testing.T) {
}
assert.Equal(t, onboard.Bucket.RetentionPeriod, retention, "Retention policy should pass through")
-}
\ No newline at end of file
+}
diff --git a/testing/onboarding.go b/testing/onboarding.go
index d60800dcc0c..214745a5d33 100644
--- a/testing/onboarding.go
+++ b/testing/onboarding.go
@@ -158,7 +158,7 @@ func OnboardInitialUser(
Org: "org1",
Bucket: "bucket1",
Password: "password1",
- RetentionPeriod: 24 * 7, // 1 week
+ RetentionPeriod: time.Hour * 24 * 7, // 1 week
},
},
wants: wants{
|
042a6a66d5b82c08c8ba6820c503356cd9f6ad54
|
2023-05-18 16:21:24
|
Andrew Lamb
|
refactor: remove old tsm import code (#7804)
| false
|
remove old tsm import code (#7804)
|
refactor
|
diff --git a/import_export/src/lib.rs b/import_export/src/lib.rs
index 7adc1382b8e..8b137891791 100644
--- a/import_export/src/lib.rs
+++ b/import_export/src/lib.rs
@@ -1 +1 @@
-pub mod tsm;
+
diff --git a/import_export/src/tsm/mod.rs b/import_export/src/tsm/mod.rs
deleted file mode 100644
index c27e39ca64f..00000000000
--- a/import_export/src/tsm/mod.rs
+++ /dev/null
@@ -1,382 +0,0 @@
-use chrono::{offset::FixedOffset, DateTime};
-use schema::InfluxFieldType;
-use serde::de::{Deserialize, Deserializer};
-use serde::ser::{Serialize, Serializer};
-use serde::*;
-use std::collections::{HashMap, HashSet};
-
-mod tsm_schema;
-
-// Public API
-pub use tsm_schema::{
- fetch::{fetch_schema, FetchError},
- merge::{SchemaMergeError, SchemaMerger},
- update_catalog::{update_iox_catalog, UpdateCatalogError},
- validate::{validate_schema, ValidationError},
-};
-
-/// This struct is used to build up schemas from TSM snapshots that we are going to use to bulk
-/// ingest. They will be merged, then validated to check for anomalies that will complicate bulk
-/// ingest such as tags/fields with the same name, or fields with different types across the whole
-/// dataset. It is not the same as an IOx schema, although it is similar and some of the merge code
-/// is similar. It's a transient data structure.
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
-pub struct AggregateTSMSchema {
- pub org_id: String,
- pub bucket_id: String,
- pub measurements: HashMap<String, AggregateTSMMeasurement>,
-}
-
-impl AggregateTSMSchema {
- pub fn types_are_valid(&self) -> bool {
- self.measurements.values().all(|m| {
- m.fields.values().all(|f| {
- f.types.len() == 1
- && InfluxFieldType::try_from(f.types.iter().next().unwrap()).is_ok()
- })
- })
- }
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
-pub struct AggregateTSMMeasurement {
- // Map of tag name -> tag; note that the schema we get from the TSM tool has these as arrays.
- // Using HashMaps internally to detect duplicates, so we have to do some custom serialisation
- // for tags and fields here.
- #[serde(
- serialize_with = "serialize_map_values",
- deserialize_with = "deserialize_tags"
- )]
- pub tags: HashMap<String, AggregateTSMTag>,
- #[serde(
- serialize_with = "serialize_map_values",
- deserialize_with = "deserialize_fields"
- )]
- pub fields: HashMap<String, AggregateTSMField>,
- pub earliest_time: DateTime<FixedOffset>,
- pub latest_time: DateTime<FixedOffset>,
-}
-
-fn serialize_map_values<S, K, V>(value: &HashMap<K, V>, serializer: S) -> Result<S::Ok, S::Error>
-where
- S: Serializer,
- V: Serialize,
-{
- serializer.collect_seq(value.values())
-}
-
-fn deserialize_tags<'de, D>(deserializer: D) -> Result<HashMap<String, AggregateTSMTag>, D::Error>
-where
- D: Deserializer<'de>,
-{
- let v: Vec<AggregateTSMTag> = Deserialize::deserialize(deserializer)?;
- Ok(v.into_iter().map(|t| (t.name.clone(), t)).collect())
-}
-
-fn deserialize_fields<'de, D>(
- deserializer: D,
-) -> Result<HashMap<String, AggregateTSMField>, D::Error>
-where
- D: Deserializer<'de>,
-{
- let v: Vec<AggregateTSMField> = Deserialize::deserialize(deserializer)?;
- Ok(v.into_iter().map(|f| (f.name.clone(), f)).collect())
-}
-
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
-pub struct AggregateTSMTag {
- pub name: String,
- pub values: HashSet<String>,
-}
-
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
-pub struct AggregateTSMField {
- pub name: String,
- pub types: HashSet<String>,
-}
-
-impl TryFrom<Vec<u8>> for AggregateTSMSchema {
- type Error = serde_json::Error;
-
- fn try_from(data: Vec<u8>) -> Result<Self, Self::Error> {
- serde_json::from_slice(&data)
- }
-}
-
-impl TryFrom<&str> for AggregateTSMSchema {
- type Error = serde_json::Error;
-
- fn try_from(data: &str) -> Result<Self, Self::Error> {
- serde_json::from_str(data)
- }
-}
-
-/// A variation on AggregateTSMSchema with the following differences:
-/// - no org and bucket
-/// - no earliest/latest time
-/// - no tags (that may change once we decide what to do about tags/fields with the same name- for
-/// now they'll fail validation and you can't fix it via the override)
-/// - fields have only one type
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
-pub struct AggregateTSMSchemaOverride {
- pub measurements: HashMap<String, AggregateTSMSchemaOverrideMeasurement>,
-}
-
-/// Field type override; note there is only one type
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
-pub struct AggregateTSMSchemaOverrideField {
- pub name: String,
- pub r#type: String,
-}
-
-/// Override for a measurement, not there are no tags as they can't be overridden
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
-pub struct AggregateTSMSchemaOverrideMeasurement {
- // Map of field name -> field; note that the schema of the config file has these as arrays.
- // Using HashMaps internally to avoid duplicates, so we have to do some custom serialisation
- // for fields here.
- #[serde(
- serialize_with = "serialize_map_values",
- deserialize_with = "deserialize_override_fields",
- default
- )]
- pub fields: HashMap<String, AggregateTSMSchemaOverrideField>,
-}
-
-fn deserialize_override_fields<'de, D>(
- deserializer: D,
-) -> Result<HashMap<String, AggregateTSMSchemaOverrideField>, D::Error>
-where
- D: Deserializer<'de>,
-{
- let v: Vec<AggregateTSMSchemaOverrideField> = Deserialize::deserialize(deserializer)?;
- Ok(v.into_iter().map(|f| (f.name.clone(), f)).collect())
-}
-
-impl TryFrom<Vec<u8>> for AggregateTSMSchemaOverride {
- type Error = serde_json::Error;
-
- fn try_from(data: Vec<u8>) -> Result<Self, Self::Error> {
- serde_json::from_slice(&data)
- }
-}
-
-impl TryFrom<&str> for AggregateTSMSchemaOverride {
- type Error = serde_json::Error;
-
- fn try_from(data: &str) -> Result<Self, Self::Error> {
- serde_json::from_str(data)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use assert_matches::assert_matches;
-
- #[tokio::test]
- async fn parses() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- assert_eq!(schema.org_id, "1234");
- assert_eq!(schema.bucket_id, "5678");
- assert_eq!(schema.measurements.len(), 1);
- assert!(schema.measurements.contains_key("cpu"));
- let measurement = schema.measurements.get("cpu").unwrap();
- assert_eq!(measurement.tags.len(), 1);
- let tag = &measurement.tags.values().next().unwrap();
- assert_eq!(tag.name, "host");
- assert_eq!(
- tag.values,
- HashSet::from(["server".to_string(), "desktop".to_string()])
- );
- let field = &measurement.fields.values().next().unwrap();
- assert_eq!(field.name, "usage");
- assert_eq!(field.types, HashSet::from(["Float".to_string()]));
- // exercise the Vec<u8> tryfrom impl too
- assert_eq!(schema, json.as_bytes().to_vec().try_into().unwrap());
- // now exercise the serialise code too
- let schema = AggregateTSMSchema {
- org_id: "1234".to_string(),
- bucket_id: "5678".to_string(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00.00Z").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00.00Z").unwrap(),
- },
- )]),
- };
- let _json = serde_json::to_string(&schema).unwrap();
- // ^ not asserting on the value because vector ordering changes so it would be flakey. it's
- // enough that it serialises without error
- }
-
- #[tokio::test]
- async fn type_validation_happy() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- assert!(schema.types_are_valid());
- }
-
- #[tokio::test]
- async fn type_validation_invalid_type() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["FloatyMcFloatFace"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- assert!(!schema.types_are_valid());
- }
-
- #[tokio::test]
- async fn type_validation_multiple_types() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- assert!(!schema.types_are_valid());
- }
-
- #[tokio::test]
- async fn override_parses() {
- let json = r#"
- {
- "measurements": {
- "cpu": {
- "fields": [
- { "name": "usage", "type": "Float" }
- ]
- }
- }
- }
- "#;
- let override_schema: AggregateTSMSchemaOverride = json.try_into().unwrap();
- assert_eq!(override_schema.measurements.len(), 1);
- assert!(override_schema.measurements.contains_key("cpu"));
- let measurement = override_schema.measurements.get("cpu").unwrap();
- let field = &measurement.fields.values().next().unwrap();
- assert_eq!(field.name, "usage");
- assert_eq!(field.r#type, "Float");
- // exercise the Vec<u8> tryfrom impl too
- assert_eq!(
- override_schema,
- json.as_bytes().to_vec().try_into().unwrap()
- );
- // now exercise the serialise code too, although this is only used in tests
- let schema = AggregateTSMSchemaOverride {
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMSchemaOverrideMeasurement {
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMSchemaOverrideField {
- name: "usage".to_string(),
- r#type: "Float".to_string(),
- },
- )]),
- },
- )]),
- };
- let _json = serde_json::to_string(&schema).unwrap();
- // ^ not asserting on the value because vector ordering changes so it would be flakey. it's
- // enough that it serialises without error
- }
-
- #[tokio::test]
- async fn override_fails_to_parse_multiple_types() {
- // this clearly breaks the schema but someone could conceivably try this by copy-paste
- // accident so let's be sure that it fails
- let json = r#"
- {
- "measurements": {
- "cpu": {
- "fields": [
- { "name": "usage", "types": ["Float", "Integer"] }
- ]
- }
- }
- }
- "#;
- let result: Result<AggregateTSMSchemaOverride, serde_json::Error> = json.try_into();
- assert_matches!(result, Err(serde_json::Error { .. }));
- }
-}
diff --git a/import_export/src/tsm/tsm_schema/fetch.rs b/import_export/src/tsm/tsm_schema/fetch.rs
deleted file mode 100644
index 7a98226bae1..00000000000
--- a/import_export/src/tsm/tsm_schema/fetch.rs
+++ /dev/null
@@ -1,59 +0,0 @@
-use std::sync::Arc;
-
-use futures::prelude::*;
-use object_store::{path::Path, DynObjectStore, GetResult};
-use thiserror::Error;
-use tokio::select;
-
-use crate::tsm::AggregateTSMSchema;
-
-// Possible errors from schema commands
-#[derive(Debug, Error)]
-pub enum FetchError {
- #[error("Error fetching schemas from object storage: {0}")]
- Fetching(#[from] object_store::Error),
-
- #[error("Error parsing schema from object storage: {0}")]
- Parsing(#[from] serde_json::Error),
-}
-
-pub async fn fetch_schema(
- object_store: Arc<DynObjectStore>,
- prefix: Option<&Path>,
- suffix: &str,
-) -> Result<Vec<AggregateTSMSchema>, FetchError> {
- let mut schemas: Vec<AggregateTSMSchema> = vec![];
- let mut results = object_store
- .list(prefix)
- .await
- .map_err(FetchError::Fetching)?;
- // TODO: refactor to do these concurrently using `buffered`
- loop {
- select! {
- item = results.next() => {
- match item {
- Some(item) => {
- let item = item.map_err(FetchError::Fetching)?;
- if !item.location.as_ref().ends_with(suffix) {
- continue;
- }
- let read_stream = object_store.get(&item.location).await?;
- if let GetResult::Stream(read_stream) = read_stream {
- let chunks: Vec<_> = read_stream.try_collect().await?;
- let mut buf = Vec::with_capacity(chunks.iter().map(|c| c.len()).sum::<usize>());
- for c in chunks {
- buf.extend(c);
- }
- let schema: AggregateTSMSchema = buf.try_into().map_err(FetchError::Parsing)?;
- schemas.push(schema);
- }
- }
- None => {
- break;
- }
- }
- }
- }
- }
- Ok(schemas)
-}
diff --git a/import_export/src/tsm/tsm_schema/merge.rs b/import_export/src/tsm/tsm_schema/merge.rs
deleted file mode 100644
index 91301bde14d..00000000000
--- a/import_export/src/tsm/tsm_schema/merge.rs
+++ /dev/null
@@ -1,1231 +0,0 @@
-use crate::tsm::{
- AggregateTSMField, AggregateTSMMeasurement, AggregateTSMSchema, AggregateTSMSchemaOverride,
- AggregateTSMTag,
-};
-
-use thiserror::Error;
-
-#[derive(Debug, Error)]
-pub enum SchemaMergeError {
- /// A schema was found that didn't have the right org and bucket
- #[error("Found org/bucket {0}/{1}; expected {2}/{3}")]
- OrgBucketMismatch(String, String, String, String),
-
- /// The merge operation found no schemas to merge
- #[error("No schemas found to merge when performing merge operation")]
- NothingToMerge,
-}
-
-pub struct SchemaMerger {
- org_id: String,
- bucket_id: String,
- schemas: Vec<AggregateTSMSchema>,
- schema_override: Option<AggregateTSMSchemaOverride>,
-}
-
-impl SchemaMerger {
- pub fn new(org_id: String, bucket_id: String, schemas: Vec<AggregateTSMSchema>) -> Self {
- Self {
- org_id,
- bucket_id,
- schemas,
- schema_override: None,
- }
- }
-
- pub fn with_schema_override(mut self, schema_override: AggregateTSMSchemaOverride) -> Self {
- self.schema_override = Some(schema_override);
- self
- }
-
- /// Run the merge operation on the list of schemas
- pub fn merge(&self) -> Result<AggregateTSMSchema, SchemaMergeError> {
- // ensure all schemas are for the same org/bucket
- if let Some(s) = self
- .schemas
- .iter()
- .find(|s| s.org_id != self.org_id || s.bucket_id != self.bucket_id)
- {
- return Err(SchemaMergeError::OrgBucketMismatch(
- s.org_id.clone(),
- s.bucket_id.clone(),
- self.org_id.clone(),
- self.bucket_id.clone(),
- ));
- }
- let mut merged_schema = self
- .schemas
- .iter()
- .cloned()
- .reduce(|merged, s| do_merge_schema(&self.org_id, &self.bucket_id, &merged, &s))
- .ok_or(SchemaMergeError::NothingToMerge)?;
- if let Some(schema_override) = &self.schema_override {
- // we have been given config to override parts of the merged schema. usually this comes
- // from a discussion with the customer after attempts to bulk ingest highlighted schema
- // conflicts. using this config file we can A) coerce the data (later, with another
- // tool) and B) modify the schema here before updating it into the IOx catalog, so the
- // coerced data arriving later will match.
- do_schema_override(&mut merged_schema, schema_override);
- }
- Ok(merged_schema)
- }
-}
-
-fn do_schema_override(
- merged_schema: &mut AggregateTSMSchema,
- override_schema: &AggregateTSMSchemaOverride,
-) {
- for (measurement_name, override_measurement) in &override_schema.measurements {
- // if the override refers to a measurement not in the schema it will be ignored
- if let Some(merged_measurement) = merged_schema.measurements.get_mut(measurement_name) {
- // we only support overrides for field types at this point. later we may support
- // resolving tags/fields with the same name somehow
- for (field_name, override_field) in &override_measurement.fields {
- // if the override refers to a field not in the schema it will be ignored
- if let Some(field) = merged_measurement.fields.get_mut(field_name) {
- // whatever types were in there, we don't care- replace with the override
- field.types.clear();
- field.types.insert(override_field.r#type.clone());
- }
- }
- }
- }
-}
-
-// NOTE: assumes org and bucket are the same for both (checked before calling this fn).
-//
-// this schema merging code is similar to what is used in the IOx router but i decided not to use
-// that because:
-// - i'm building schemas that, when merged, are potentially bad (e.g. multiple types- in order to
-// identify that very thing).
-// - i don't need the underlying parquet metadata struct for this type, it's just an interchange
-// struct to detect schema anomalies. that may change in the future but for now this simple code
-// will suffice.
-fn do_merge_schema(
- org_id: &str,
- bucket_id: &str,
- s1: &AggregateTSMSchema,
- s2: &AggregateTSMSchema,
-) -> AggregateTSMSchema {
- // start with everything in s1. for-each in s2, either merge or insert
- let mut merged_measurements = s1.measurements.clone();
- s2.measurements.iter().for_each(|s| {
- if let Some(m) = merged_measurements.get_mut(s.0) {
- do_merge_measurement(m, s.1);
- } else {
- // add it
- merged_measurements.insert(s.0.clone(), s.1.clone());
- }
- });
- AggregateTSMSchema {
- org_id: org_id.to_string(),
- bucket_id: bucket_id.to_string(),
- measurements: merged_measurements,
- }
-}
-
-fn do_merge_measurement(
- into_measurement: &mut AggregateTSMMeasurement,
- from_measurement: &AggregateTSMMeasurement,
-) {
- // merge tags
- from_measurement.tags.values().for_each(|from_tag| {
- if let Some(into_tag) = into_measurement.tags.get(&from_tag.name) {
- let mut new_tag = AggregateTSMTag {
- name: from_tag.name.clone(),
- values: into_tag.values.clone(),
- };
- new_tag.values.extend(from_tag.values.clone().into_iter());
- into_measurement.tags.insert(from_tag.name.clone(), new_tag);
- } else {
- into_measurement
- .tags
- .insert(from_tag.name.clone(), from_tag.clone());
- }
- });
- // merge fields
- from_measurement.fields.values().for_each(|from_field| {
- if let Some(into_field) = into_measurement.fields.get(&from_field.name) {
- let mut new_field = AggregateTSMField {
- name: from_field.name.clone(),
- types: into_field.types.clone(),
- };
- new_field.types.extend(from_field.types.clone().into_iter());
- into_measurement
- .fields
- .insert(from_field.name.clone(), new_field);
- } else {
- into_measurement
- .fields
- .insert(from_field.name.clone(), from_field.clone());
- }
- });
- // ensure sane time ranges have been given
- assert!(
- from_measurement
- .earliest_time
- .le(&from_measurement.latest_time)
- && into_measurement
- .earliest_time
- .le(&into_measurement.latest_time)
- );
- // merge earliest/latest times
- if from_measurement
- .earliest_time
- .lt(&into_measurement.earliest_time)
- {
- into_measurement.earliest_time = from_measurement.earliest_time;
- }
- if from_measurement
- .latest_time
- .gt(&into_measurement.latest_time)
- {
- into_measurement.latest_time = from_measurement.latest_time;
- }
-}
-
-#[cfg(test)]
-mod tests {
- use chrono::DateTime;
- use std::collections::{HashMap, HashSet};
-
- use super::*;
-
- #[tokio::test]
- async fn merge_measurements_adds_if_missing() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- assert_eq!(m1.tags.len(), 2);
- assert_eq!(m1.fields.len(), 2);
- }
-
- #[tokio::test]
- async fn merge_measurements_merges_tag_with_new_value() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["gadget".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- assert_eq!(m1.tags.len(), 1);
- assert_eq!(m1.fields.len(), 1);
- assert_eq!(
- m1.tags.values().next().unwrap().values,
- HashSet::from([
- "server".to_string(),
- "desktop".to_string(),
- "gadget".to_string()
- ])
- );
- }
-
- #[tokio::test]
- async fn merge_measurements_merges_tag_with_new_and_old_values() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["gadget".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- assert_eq!(m1.tags.len(), 1);
- assert_eq!(m1.fields.len(), 1);
- assert_eq!(
- m1.tags.values().next().unwrap().values,
- HashSet::from([
- "server".to_string(),
- "desktop".to_string(),
- "gadget".to_string()
- ])
- );
- }
-
- #[tokio::test]
- async fn merge_measurements_merges_field_with_new_type() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- assert_eq!(m1.tags.len(), 1);
- assert_eq!(m1.fields.len(), 1);
- assert_eq!(
- m1.fields.values().next().unwrap().types,
- HashSet::from(["Float".to_string(), "Integer".to_string(),])
- );
- }
-
- #[tokio::test]
- async fn merge_measurements_merges_field_with_new_and_old_types() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string(), "Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- assert_eq!(m1.tags.len(), 1);
- assert_eq!(m1.fields.len(), 1);
- assert_eq!(
- m1.fields.values().next().unwrap().types,
- HashSet::from(["Float".to_string(), "Integer".to_string(),])
- );
- }
-
- #[tokio::test]
- async fn merge_measurements_test_time_merge_1() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- // time range falls entirely within the first one
- earliest_time: DateTime::parse_from_rfc3339("2022-04-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-05-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- // result should always be the widest range, i.e. = the first range
- assert_eq!(m1.earliest_time.to_rfc3339(), "2022-01-01T00:00:00+00:00");
- assert_eq!(m1.latest_time.to_rfc3339(), "2022-07-07T06:00:00+00:00");
- }
-
- #[tokio::test]
- async fn merge_measurements_test_time_merge_2() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-02-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- // time range falls entirely outside the first one
- earliest_time: DateTime::parse_from_rfc3339("2022-06-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- // result should always be the widest range, i.e. = from start of first to end of second
- assert_eq!(m1.earliest_time.to_rfc3339(), "2022-01-01T00:00:00+00:00");
- assert_eq!(m1.latest_time.to_rfc3339(), "2022-07-07T06:00:00+00:00");
- }
-
- #[tokio::test]
- async fn merge_measurements_test_time_merge_3() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-04-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-05-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- // time range falls before the first one
- earliest_time: DateTime::parse_from_rfc3339("2022-02-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-03-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- // result should always be the widest range, i.e. = from start of second to end of first
- assert_eq!(m1.earliest_time.to_rfc3339(), "2022-02-01T00:00:00+00:00");
- assert_eq!(m1.latest_time.to_rfc3339(), "2022-05-07T06:00:00+00:00");
- }
-
- #[tokio::test]
- async fn merge_measurements_test_time_merge_4() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-04-01T00:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- // time range starts when the first one ends
- earliest_time: DateTime::parse_from_rfc3339("2022-04-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- // result should always be the widest range, i.e. = from start of first to end of second
- assert_eq!(m1.earliest_time.to_rfc3339(), "2022-01-01T00:00:00+00:00");
- assert_eq!(m1.latest_time.to_rfc3339(), "2022-07-07T06:00:00+00:00");
- }
-
- #[tokio::test]
- async fn merge_measurements_test_time_merge_5() {
- let mut m1 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let m2 = AggregateTSMMeasurement {
- tags: HashMap::from([(
- "sensor".to_string(),
- AggregateTSMTag {
- name: "sensor".to_string(),
- values: HashSet::from(["top".to_string(), "bottom".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- // time range is the same as the first one
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- do_merge_measurement(&mut m1, &m2);
- // result should always be the widest range, i.e. = from start of second to end of first
- assert_eq!(m1.earliest_time.to_rfc3339(), "2022-01-01T00:00:00+00:00");
- assert_eq!(m1.latest_time.to_rfc3339(), "2022-07-07T06:00:00+00:00");
- }
-
- #[tokio::test]
- async fn merge_schema_adds_missing_measurement() {
- let s1 = AggregateTSMSchema {
- org_id: "myorg".to_string(),
- bucket_id: "mybucket".to_string(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00")
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- },
- )]),
- };
- let s2 = AggregateTSMSchema {
- org_id: "myorg".to_string(),
- bucket_id: "mybucket".to_string(),
- measurements: HashMap::from([(
- "weather".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "location".to_string(),
- AggregateTSMTag {
- name: "location".to_string(),
- values: HashSet::from(["london".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00")
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- },
- )]),
- };
- let merged = do_merge_schema("myorg", "mybucket", &s1, &s2);
- assert_eq!(merged.org_id, "myorg".to_string());
- assert_eq!(merged.bucket_id, "mybucket".to_string());
- assert_eq!(merged.measurements.len(), 2);
- let mut measurement_names = merged.measurements.keys().cloned().collect::<Vec<_>>();
- measurement_names.sort();
- assert_eq!(
- measurement_names,
- vec!["cpu".to_string(), "weather".to_string()]
- );
- }
-
- #[tokio::test]
- async fn merge_schema_merges_measurement() {
- let s1 = AggregateTSMSchema {
- org_id: "myorg".to_string(),
- bucket_id: "mybucket".to_string(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00")
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-04-07T06:00:00+00:00").unwrap(),
- },
- )]),
- };
- let s2 = AggregateTSMSchema {
- org_id: "myorg".to_string(),
- bucket_id: "mybucket".to_string(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["gadget".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Integer".to_string(), "Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-05-01T00:00:00+00:00")
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- },
- )]),
- };
- let merged = do_merge_schema("myorg", "mybucket", &s1, &s2);
- assert_eq!(merged.org_id, "myorg".to_string());
- assert_eq!(merged.bucket_id, "mybucket".to_string());
- assert_eq!(merged.measurements.len(), 1);
- let measurement = merged.measurements.get("cpu").unwrap();
- assert_eq!(
- measurement.tags.keys().cloned().collect::<Vec<_>>(),
- vec!["host".to_string()]
- );
- assert_eq!(
- measurement.tags.values().cloned().collect::<Vec<_>>(),
- vec![AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from([
- "server".to_string(),
- "desktop".to_string(),
- "gadget".to_string()
- ])
- }]
- );
- assert_eq!(
- measurement.fields.keys().cloned().collect::<Vec<_>>(),
- vec!["usage".to_string()]
- );
- assert_eq!(
- measurement.fields.values().cloned().collect::<Vec<_>>(),
- vec![AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Integer".to_string(), "Float".to_string()])
- }]
- );
- assert_eq!(
- measurement.earliest_time.to_rfc3339(),
- "2022-01-01T00:00:00+00:00"
- );
- assert_eq!(
- measurement.latest_time.to_rfc3339(),
- "2022-07-07T06:00:00+00:00"
- );
- }
-
- #[tokio::test]
- async fn merge_schema_batch() {
- let org = "myorg".to_string();
- let bucket = "mybucket".to_string();
- let merger = SchemaMerger::new(
- org.clone(),
- bucket.clone(),
- vec![
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from([
- "server".to_string(),
- "desktop".to_string(),
- ]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2021-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2021-03-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["gadget".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2021-02-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2021-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "weather".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "location".to_string(),
- AggregateTSMTag {
- name: "location".to_string(),
- values: HashSet::from(["london".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-05-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org,
- bucket_id: bucket,
- measurements: HashMap::from([(
- "weather".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "location".to_string(),
- AggregateTSMTag {
- name: "location".to_string(),
- values: HashSet::from(["berlin".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-04-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- ],
- );
- let json = r#"
- {
- "org_id": "myorg",
- "bucket_id": "mybucket",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop", "gadget"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2021-01-01T00:00:00.00Z",
- "latest_time": "2021-07-07T06:00:00.00Z"
- },
- "weather": {
- "tags": [
- { "name": "location", "values": ["london", "berlin"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let expected: AggregateTSMSchema = json.try_into().unwrap();
- assert_eq!(merger.merge().unwrap(), expected);
- }
-
- #[tokio::test]
- async fn merge_schema_batch_with_override() {
- let json = r#"
- {
- "measurements": {
- "cpu": {
- "fields": [
- { "name": "usage", "type": "Float" }
- ]
- },
- "weather": {
- "fields": [
- { "name": "temperature", "type": "Float" }
- ]
- }
- }
- }
- "#;
- let override_schema: AggregateTSMSchemaOverride = json.try_into().unwrap();
- let org = "myorg".to_string();
- let bucket = "mybucket".to_string();
- let merger = SchemaMerger::new(
- org.clone(),
- bucket.clone(),
- vec![
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from([
- "server".to_string(),
- "desktop".to_string(),
- ]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "cpu".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["gadget".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org.clone(),
- bucket_id: bucket.clone(),
- measurements: HashMap::from([(
- "weather".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "location".to_string(),
- AggregateTSMTag {
- name: "location".to_string(),
- values: HashSet::from(["london".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- AggregateTSMSchema {
- org_id: org,
- bucket_id: bucket,
- measurements: HashMap::from([(
- "weather".to_string(),
- AggregateTSMMeasurement {
- tags: HashMap::from([(
- "location".to_string(),
- AggregateTSMTag {
- name: "location".to_string(),
- values: HashSet::from(["berlin".to_string()]),
- },
- )]),
- fields: HashMap::from([(
- "temperature".to_string(),
- AggregateTSMField {
- name: "temperature".to_string(),
- types: HashSet::from(["Integer".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339(
- "2022-01-01T00:00:00+00:00",
- )
- .unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00")
- .unwrap(),
- },
- )]),
- },
- ],
- )
- .with_schema_override(override_schema);
- let json = r#"
- {
- "org_id": "myorg",
- "bucket_id": "mybucket",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop", "gadget"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- },
- "weather": {
- "tags": [
- { "name": "location", "values": ["london", "berlin"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let expected: AggregateTSMSchema = json.try_into().unwrap();
- assert_eq!(merger.merge().unwrap(), expected);
- }
-
- #[tokio::test]
- async fn override_schema() {
- let json = r#"
- {
- "org_id": "myorg",
- "bucket_id": "mybucket",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop", "gadget"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- },
- "weather": {
- "tags": [
- { "name": "location", "values": ["london", "berlin"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let mut merged_schema: AggregateTSMSchema = json.try_into().unwrap();
- let json = r#"
- {
- "measurements": {
- "cpu": {
- "fields": [
- { "name": "usage", "type": "Float" }
- ]
- },
- "weather": {
- "fields": [
- { "name": "temperature", "type": "Integer" }
- ]
- }
- }
- }
- "#;
- let override_schema: AggregateTSMSchemaOverride = json.try_into().unwrap();
- do_schema_override(&mut merged_schema, &override_schema);
- assert_eq!(
- merged_schema
- .measurements
- .get("cpu")
- .unwrap()
- .fields
- .get("usage")
- .unwrap()
- .types,
- HashSet::from(["Float".to_string()])
- );
- assert_eq!(
- merged_schema
- .measurements
- .get("weather")
- .unwrap()
- .fields
- .get("temperature")
- .unwrap()
- .types,
- HashSet::from(["Integer".to_string()])
- );
- }
-}
diff --git a/import_export/src/tsm/tsm_schema/mod.rs b/import_export/src/tsm/tsm_schema/mod.rs
deleted file mode 100644
index 73aefb7a419..00000000000
--- a/import_export/src/tsm/tsm_schema/mod.rs
+++ /dev/null
@@ -1,4 +0,0 @@
-pub mod fetch;
-pub mod merge;
-pub mod update_catalog;
-pub mod validate;
diff --git a/import_export/src/tsm/tsm_schema/update_catalog.rs b/import_export/src/tsm/tsm_schema/update_catalog.rs
deleted file mode 100644
index 234f4619335..00000000000
--- a/import_export/src/tsm/tsm_schema/update_catalog.rs
+++ /dev/null
@@ -1,1063 +0,0 @@
-use crate::tsm::{AggregateTSMMeasurement, AggregateTSMSchema};
-use chrono::{format::StrftimeItems, offset::FixedOffset, DateTime, Duration};
-use data_types::{
- ColumnType, Namespace, NamespaceName, NamespaceSchema, OrgBucketMappingError, Partition,
- PartitionKey, TableSchema,
-};
-use iox_catalog::interface::{
- get_schema_by_name, CasFailure, Catalog, RepoCollection, SoftDeletedRows,
-};
-use schema::{
- sort::{adjust_sort_key_columns, SortKey, SortKeyBuilder},
- InfluxColumnType, InfluxFieldType, TIME_COLUMN_NAME,
-};
-use std::{collections::HashMap, fmt::Write, ops::DerefMut, sync::Arc};
-use thiserror::Error;
-
-#[derive(Debug, Error)]
-pub enum UpdateCatalogError {
- #[error("Error returned from the Catalog: {0}")]
- CatalogError(#[from] iox_catalog::interface::Error),
-
- #[error("Error returned from the Catalog: failed to cas sort key update")]
- SortKeyCasError,
-
- #[error("Couldn't construct namespace from org and bucket: {0}")]
- InvalidOrgBucket(#[from] OrgBucketMappingError),
-
- #[error("No namespace named {0} in Catalog")]
- NamespaceNotFound(String),
-
- #[error("Failed to update schema in IOx Catalog: {0}")]
- SchemaUpdateError(String),
-
- #[error("Error creating namespace: {0}")]
- NamespaceCreationError(String),
-
- #[error("Time calculation error when deriving partition key: {0}")]
- PartitionKeyCalculationError(String),
-}
-
-/// Given a merged schema, update the IOx catalog to either merge that schema into the existing one
-/// for the namespace, or create the namespace and schema using the merged schema.
-/// Will error if the namespace needs to be created but the user hasn't explicitly set the
-/// retention setting, allowing the user to not provide it if it's not needed.
-pub async fn update_iox_catalog(
- merged_tsm_schema: &AggregateTSMSchema,
- catalog: Arc<dyn Catalog>,
-) -> Result<(), UpdateCatalogError> {
- let namespace_name =
- NamespaceName::from_org_and_bucket(&merged_tsm_schema.org_id, &merged_tsm_schema.bucket_id)
- .map_err(UpdateCatalogError::InvalidOrgBucket)?;
- let mut repos = catalog.repositories().await;
- let iox_schema = match get_schema_by_name(
- namespace_name.as_str(),
- repos.deref_mut(),
- SoftDeletedRows::AllRows,
- )
- .await
- {
- Ok(iox_schema) => iox_schema,
- Err(iox_catalog::interface::Error::NamespaceNotFoundByName { .. }) => {
- // create the namespace
- let _namespace = create_namespace(namespace_name.as_str(), repos.deref_mut()).await?;
- // fetch the newly-created schema (which will be empty except for the time column,
- // which won't impact the merge we're about to do)
- match get_schema_by_name(
- namespace_name.as_str(),
- repos.deref_mut(),
- SoftDeletedRows::AllRows,
- )
- .await
- {
- Ok(iox_schema) => iox_schema,
- Err(e) => return Err(UpdateCatalogError::CatalogError(e)),
- }
- }
- Err(e) => {
- return Err(UpdateCatalogError::CatalogError(e));
- }
- };
-
- update_catalog_schema_with_merged(iox_schema, merged_tsm_schema, repos.deref_mut()).await?;
- Ok(())
-}
-
-async fn create_namespace<R>(name: &str, repos: &mut R) -> Result<Namespace, UpdateCatalogError>
-where
- R: RepoCollection + ?Sized,
-{
- let namespace_name = NamespaceName::new(name)
- .map_err(|_| UpdateCatalogError::NamespaceCreationError(name.to_string()))?;
- match repos.namespaces().create(&namespace_name, None).await {
- Ok(ns) => Ok(ns),
- Err(iox_catalog::interface::Error::NameExists { .. }) => {
- // presumably it got created in the meantime?
- repos
- .namespaces()
- .get_by_name(name, SoftDeletedRows::ExcludeDeleted)
- .await
- .map_err(UpdateCatalogError::CatalogError)?
- .ok_or_else(|| UpdateCatalogError::NamespaceNotFound(name.to_string()))
- }
- Err(e) => {
- eprintln!("Failed to create namespace");
- Err(UpdateCatalogError::CatalogError(e))
- }
- }
-}
-
-/// Merge our aggregate TSM schema into the IOx schema for the namespace in the catalog.
-/// This is basically the same as iox_catalog::validate_mutable_batch() but operates on
-/// AggregateTSMSchema instead of a MutableBatch (we don't have any data, only a schema)
-async fn update_catalog_schema_with_merged<R>(
- iox_schema: NamespaceSchema,
- merged_tsm_schema: &AggregateTSMSchema,
- repos: &mut R,
-) -> Result<(), UpdateCatalogError>
-where
- R: RepoCollection + ?Sized,
-{
- for (measurement_name, measurement) in &merged_tsm_schema.measurements {
- // measurement name -> table name - does it exist in the schema?
- let table = match iox_schema.tables.get(measurement_name) {
- Some(t) => t.clone(),
- None => {
- // it doesn't; create it and add a time column
- let mut table = repos
- .tables()
- .create_or_get(measurement_name, iox_schema.id)
- .await
- .map(|t| TableSchema::new_empty_from(&t))?;
- let time_col = repos
- .columns()
- .create_or_get("time", table.id, ColumnType::Time)
- .await?;
- table.add_column(time_col);
- table
- }
- };
- // batch of columns to add into the schema at the end
- let mut column_batch = HashMap::new();
- // fields and tags are both columns; tag is a special type of column.
- // check that the schema has all these columns or update accordingly.
- for tag in measurement.tags.values() {
- match table.columns.get(tag.name.as_str()) {
- Some(c) if c.is_tag() => {
- // nothing to do, all good
- }
- Some(_) => {
- // a column that isn't a tag exists; not good
- return Err(UpdateCatalogError::SchemaUpdateError(format!(
- "a non-tag column with name {} already exists in the schema",
- tag.name.clone()
- )));
- }
- None => {
- // column doesn't exist; add it
- let old = column_batch.insert(tag.name.as_str(), ColumnType::Tag);
- assert!(
- old.is_none(),
- "duplicate column name `{}` in new column batch shouldn't be possible",
- tag.name
- );
- }
- }
- }
- for field in measurement.fields.values() {
- // we validated this in the Command with types_are_valid() so these two error checks
- // should never fire
- let field_type = field.types.iter().next().ok_or_else(|| {
- UpdateCatalogError::SchemaUpdateError(format!(
- "field with no type cannot be converted into an IOx field: {}",
- field.name
- ))
- })?;
- let influx_column_type =
- InfluxColumnType::Field(InfluxFieldType::try_from(field_type).map_err(|e| {
- UpdateCatalogError::SchemaUpdateError(format!(
- "error converting field {} with type {} to an IOx field: {}",
- field.name, field_type, e,
- ))
- })?);
- match table.columns.get(field.name.as_str()) {
- Some(c) if c.matches_type(influx_column_type) => {
- // nothing to do, all good
- }
- Some(_) => {
- // a column that isn't a tag exists with that name; not good
- return Err(UpdateCatalogError::SchemaUpdateError(format!(
- "a column with name {} already exists in the schema with a different type",
- field.name
- )));
- }
- None => {
- // column doesn't exist; add it
- let old = column_batch
- .insert(field.name.as_str(), ColumnType::from(influx_column_type));
- assert!(
- old.is_none(),
- "duplicate column name `{}` in new column batch shouldn't be possible",
- field.name
- );
- }
- }
- }
- if !column_batch.is_empty() {
- // add all the new columns we have to create for this table in one batch.
- // it would have been nice to call this once outside the loop and thus put less
- // pressure on the catalog, but because ColumnUpsertRequest takes a slice i can't do
- // that with short-lived loop variables.
- // since this is a CLI tool rather than something called a lot on the write path, i
- // figure it's okay.
- repos
- .columns()
- .create_or_get_many_unchecked(table.id, column_batch)
- .await?;
- }
- // create a partition for every day in the date range.
- // N.B. this will need updating if we someday support partitioning by inputs other than
- // date, but this is what the router logic currently does so that would need to change too.
- let partition_keys =
- get_partition_keys_for_range(measurement.earliest_time, measurement.latest_time)?;
- for partition_key in partition_keys {
- // create the partition if it doesn't exist; new partitions get an empty sort key which
- // gets matched as `None`` in the code below
- let partition = repos
- .partitions()
- .create_or_get(partition_key, table.id)
- .await
- .map_err(UpdateCatalogError::CatalogError)?;
- // get the sort key from the partition, if it exists. create it or update it as
- // necessary
- if let (_metadata_sort_key, Some(sort_key)) = get_sort_key(&partition, measurement) {
- let sort_key = sort_key.to_columns().collect::<Vec<_>>();
- repos
- .partitions()
- .cas_sort_key(partition.id, Some(partition.sort_key), &sort_key)
- .await
- .map_err(|e| match e {
- CasFailure::ValueMismatch(_) => UpdateCatalogError::SortKeyCasError,
- CasFailure::QueryError(e) => UpdateCatalogError::CatalogError(e),
- })?;
- }
- }
- }
- Ok(())
-}
-
-fn get_sort_key(
- partition: &Partition,
- measurement: &AggregateTSMMeasurement,
-) -> (SortKey, Option<SortKey>) {
- let metadata_sort_key = partition.sort_key();
- match metadata_sort_key.as_ref() {
- Some(sk) => {
- // the partition already has a sort key; check if there are any modifications required
- // to it based on the primary key of this measurement. the second term of the tuple
- // returned contains the updated sort key for the catalog.
- let primary_key = compute_measurement_primary_key(measurement);
- adjust_sort_key_columns(sk, &primary_key)
- }
- None => {
- // the partition doesn't yet have a sort key (because it's newly created and set to
- // empty), so compute the optimal sort key based on the measurement schema. this will
- // use the cardinality of the tags in the aggregate TSM schema to set the sort key. IOx
- // makes the gamble that the first data written will be representative and that the
- // sort key calculated here will remain sufficiently optimal.
- // second term of the tuple is the same as the first because we are creating the sort
- // key.
- let sort_key = compute_measurement_sort_key(measurement);
- (sort_key.clone(), Some(sort_key))
- }
- }
-}
-
-fn compute_measurement_primary_key(measurement: &AggregateTSMMeasurement) -> Vec<&str> {
- let mut primary_keys: Vec<_> = measurement.tags.keys().map(|k| k.as_str()).collect();
- primary_keys.sort();
- primary_keys.push("time");
- primary_keys
-}
-
-// based on schema::sort::compute_sort_key but works with AggregateTSMMeasurement rather than
-// MutableBatch, which has done the tag value collation work for us already
-fn compute_measurement_sort_key(measurement: &AggregateTSMMeasurement) -> SortKey {
- // use the number of tag values from the measurement to compute cardinality; then sort
- let mut cardinalities: HashMap<String, u64> = HashMap::new();
- measurement.tags.values().for_each(|tag| {
- cardinalities.insert(
- tag.name.clone(),
- tag.values.len().try_into().expect("usize -> u64 overflow"),
- );
- });
- let mut cardinalities: Vec<_> = cardinalities.into_iter().collect();
- cardinalities.sort_by_cached_key(|x| (x.1, x.0.clone()));
-
- // build a sort key from the tag cardinality data
- let mut builder = SortKeyBuilder::with_capacity(cardinalities.len() + 1);
- for (col, _) in cardinalities {
- builder = builder.with_col(col)
- }
- builder = builder.with_col(TIME_COLUMN_NAME);
- builder.build()
-}
-
-fn get_partition_keys_for_range(
- earliest_time: DateTime<FixedOffset>,
- latest_time: DateTime<FixedOffset>,
-) -> Result<Vec<PartitionKey>, UpdateCatalogError> {
- if latest_time.lt(&earliest_time) {
- // we have checked this elsewhere but just guarding against refactors!
- return Err(UpdateCatalogError::PartitionKeyCalculationError(
- "latest time earlier than earliest time".to_string(),
- ));
- }
- let mut keys = vec![];
- let mut d = earliest_time;
- while d <= latest_time {
- keys.push(datetime_to_partition_key(&d)?);
- d += Duration::days(1);
- }
- // the above while loop logic will miss the end date for a range that is less than a day but
- // crosses a date boundary, so...
- keys.push(datetime_to_partition_key(&latest_time)?);
- keys.dedup();
- Ok(keys)
-}
-
-fn datetime_to_partition_key(
- datetime: &DateTime<FixedOffset>,
-) -> Result<PartitionKey, UpdateCatalogError> {
- let mut partition_key = String::new();
- write!(
- partition_key,
- "{}",
- datetime.format_with_items(StrftimeItems::new("%Y-%m-%d")),
- )
- .map_err(|e| UpdateCatalogError::PartitionKeyCalculationError(e.to_string()))?;
- Ok(partition_key.into())
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::tsm::{AggregateTSMField, AggregateTSMTag};
- use assert_matches::assert_matches;
- use data_types::{PartitionId, TableId};
- use iox_catalog::{
- mem::MemCatalog,
- test_helpers::{arbitrary_namespace, arbitrary_table},
- };
- use std::collections::HashSet;
-
- #[tokio::test]
- async fn needs_creating() {
- // init a test catalog stack
- let metrics = Arc::new(metric::Registry::default());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
-
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let agg_schema: AggregateTSMSchema = json.try_into().unwrap();
- update_iox_catalog(&agg_schema, Arc::clone(&catalog))
- .await
- .expect("schema update worked");
- let mut repos = catalog.repositories().await;
- let iox_schema = get_schema_by_name(
- "1234_5678",
- repos.deref_mut(),
- SoftDeletedRows::ExcludeDeleted,
- )
- .await
- .expect("got schema");
- assert_eq!(iox_schema.tables.len(), 1);
- let table = iox_schema.tables.get("cpu").expect("got table");
- assert_eq!(table.column_count(), 3); // one tag & one field, plus time
- let tag = table.columns.get("host").expect("got tag");
- assert!(tag.is_tag());
- let field = table.columns.get("usage").expect("got field");
- assert_eq!(
- field.column_type,
- InfluxColumnType::Field(InfluxFieldType::Float)
- );
- // check that the partitions were created and the sort keys are correct
- let partitions = repos
- .partitions()
- .list_by_table_id(table.id)
- .await
- .expect("got partitions");
- // number of days in the date range of the schema
- assert_eq!(partitions.len(), 188);
- // check the sort keys of the first and last as a sanity check (that code is tested more
- // thoroughly below)
- let (first_partition, last_partition) = {
- let mut partitions_iter = partitions.into_iter();
- (
- partitions_iter.next().expect("partitions not empty"),
- partitions_iter.last().expect("partitions not empty"),
- )
- };
- let first_sort_key = first_partition.sort_key;
- let last_sort_key = last_partition.sort_key;
- // ensure sort key is updated; new columns get appended after existing ones only
- assert_eq!(first_sort_key, vec!["host", "time"]);
- assert_eq!(last_sort_key, vec!["host", "time"]);
- }
-
- #[tokio::test]
- async fn needs_merging() {
- // init a test catalog stack
- let metrics = Arc::new(metric::Registry::default());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
- // We need txn to go out of scope to release the lock before update_iox_catalog
- {
- let mut txn = catalog.repositories().await;
- // create namespace, table and columns for weather measurement
- let namespace = arbitrary_namespace(&mut *txn, "1234_5678").await;
- let table = arbitrary_table(&mut *txn, "weather", &namespace).await;
- let time_col = txn
- .columns()
- .create_or_get("time", table.id, ColumnType::Time)
- .await
- .expect("column created");
- let mut table = TableSchema::new_empty_from(&table);
- table.add_column(time_col);
- let location_col = txn
- .columns()
- .create_or_get("city", table.id, ColumnType::Tag)
- .await
- .expect("column created");
- let temperature_col = txn
- .columns()
- .create_or_get("temperature", table.id, ColumnType::F64)
- .await
- .expect("column created");
- table.add_column(location_col);
- table.add_column(temperature_col);
- }
-
- // merge with aggregate schema that has some overlap
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "weather": {
- "tags": [
- { "name": "country", "values": ["United Kingdom"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float"] },
- { "name": "humidity", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let agg_schema: AggregateTSMSchema = json.try_into().unwrap();
- update_iox_catalog(&agg_schema, Arc::clone(&catalog))
- .await
- .expect("schema update worked");
- let mut repos = catalog.repositories().await;
- let iox_schema = get_schema_by_name(
- "1234_5678",
- repos.deref_mut(),
- SoftDeletedRows::ExcludeDeleted,
- )
- .await
- .expect("got schema");
- assert_eq!(iox_schema.tables.len(), 1);
- let table = iox_schema.tables.get("weather").expect("got table");
- assert_eq!(table.column_count(), 5); // two tags, two fields, plus time
- let tag1 = table.columns.get("city").expect("got tag");
- assert!(tag1.is_tag());
- let tag2 = table.columns.get("country").expect("got tag");
- assert!(tag2.is_tag());
- let field1 = table.columns.get("temperature").expect("got field");
- assert_eq!(
- field1.column_type,
- InfluxColumnType::Field(InfluxFieldType::Float)
- );
- let field2 = table.columns.get("humidity").expect("got field");
- assert_eq!(
- field2.column_type,
- InfluxColumnType::Field(InfluxFieldType::Float)
- );
- }
-
- #[tokio::test]
- async fn needs_merging_duplicate_tag_field_name() {
- // init a test catalog stack
- let metrics = Arc::new(metric::Registry::default());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
- // We need txn to go out of scope to release the lock before update_iox_catalog
- {
- let mut txn = catalog.repositories().await;
- // create namespace, table and columns for weather measurement
- let namespace = arbitrary_namespace(&mut *txn, "1234_5678").await;
- let table = arbitrary_table(&mut *txn, "weather", &namespace).await;
- let time_col = txn
- .columns()
- .create_or_get("time", table.id, ColumnType::Time)
- .await
- .expect("column created");
- let mut table = TableSchema::new_empty_from(&table);
- table.add_column(time_col);
- let temperature_col = txn
- .columns()
- .create_or_get("temperature", table.id, ColumnType::F64)
- .await
- .expect("column created");
- table.add_column(temperature_col);
- }
-
- // merge with aggregate schema that has some issue that will trip a catalog error
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "weather": {
- "tags": [
- { "name": "temperature", "values": ["unseasonably_warm"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let agg_schema: AggregateTSMSchema = json.try_into().unwrap();
- let err = update_iox_catalog(&agg_schema, Arc::clone(&catalog))
- .await
- .expect_err("should fail catalog update");
- assert_matches!(err, UpdateCatalogError::SchemaUpdateError(_));
- assert!(err
- .to_string()
- .ends_with("a non-tag column with name temperature already exists in the schema"));
- }
-
- #[tokio::test]
- async fn needs_merging_column_exists_different_type() {
- // init a test catalog stack
- let metrics = Arc::new(metric::Registry::default());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
-
- // We need txn to go out of scope to release the lock before update_iox_catalog
- {
- let mut txn = catalog.repositories().await;
- // create namespace, table and columns for weather measurement
- let namespace = arbitrary_namespace(&mut *txn, "1234_5678").await;
- let table = arbitrary_table(&mut *txn, "weather", &namespace).await;
- let time_col = txn
- .columns()
- .create_or_get("time", table.id, ColumnType::Time)
- .await
- .expect("column created");
- let mut table = TableSchema::new_empty_from(&table);
- table.add_column(time_col);
- let temperature_col = txn
- .columns()
- .create_or_get("temperature", table.id, ColumnType::F64)
- .await
- .expect("column created");
- table.add_column(temperature_col);
- }
-
- // merge with aggregate schema that has some issue that will trip a catalog error
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "weather": {
- "tags": [
- ],
- "fields": [
- { "name": "temperature", "types": ["Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let agg_schema: AggregateTSMSchema = json.try_into().unwrap();
- let err = update_iox_catalog(&agg_schema, Arc::clone(&catalog))
- .await
- .expect_err("should fail catalog update");
- assert_matches!(err, UpdateCatalogError::SchemaUpdateError(_));
- assert!(err.to_string().ends_with(
- "a column with name temperature already exists in the schema with a different type"
- ));
- }
-
- #[tokio::test]
- async fn partition_keys_from_datetime_range_midday_to_midday() {
- let earliest_time = DateTime::parse_from_rfc3339("2022-10-30T12:00:00+00:00")
- .expect("rfc3339 date parsing failed");
- let latest_time = DateTime::parse_from_rfc3339("2022-11-01T12:00:00+00:00")
- .expect("rfc3339 date parsing failed");
- let keys = get_partition_keys_for_range(earliest_time, latest_time)
- .expect("error creating partition keys in test");
- assert_eq!(
- keys,
- vec![
- "2022-10-30".into(),
- "2022-10-31".into(),
- "2022-11-01".into()
- ]
- );
- }
-
- #[tokio::test]
- async fn partition_keys_from_datetime_range_within_a_day() {
- let earliest_time = DateTime::parse_from_rfc3339("2022-10-31T00:00:00+00:00")
- .expect("rfc3339 date parsing failed");
- let latest_time = DateTime::parse_from_rfc3339("2022-10-31T12:00:00+00:00")
- .expect("rfc3339 date parsing failed");
- let keys = get_partition_keys_for_range(earliest_time, latest_time)
- .expect("error creating partition keys in test");
- assert_eq!(keys, vec!["2022-10-31".into(),]);
- }
-
- #[tokio::test]
- async fn partition_keys_from_datetime_range_equal() {
- let earliest_time = DateTime::parse_from_rfc3339("2022-10-31T23:59:59+00:00")
- .expect("rfc3339 date parsing failed");
- let latest_time = DateTime::parse_from_rfc3339("2022-10-31T23:59:59+00:00")
- .expect("rfc3339 date parsing failed");
- let keys = get_partition_keys_for_range(earliest_time, latest_time)
- .expect("error creating partition keys in test");
- assert_eq!(keys, vec!["2022-10-31".into(),]);
- }
-
- #[tokio::test]
- async fn partition_keys_from_datetime_range_across_day_boundary() {
- let earliest_time = DateTime::parse_from_rfc3339("2022-10-31T23:59:59+00:00")
- .expect("rfc3339 date parsing failed");
- let latest_time = DateTime::parse_from_rfc3339("2022-11-01T00:00:01+00:00")
- .expect("rfc3339 date parsing failed");
- let keys = get_partition_keys_for_range(earliest_time, latest_time)
- .expect("error creating partition keys in test");
- assert_eq!(keys, vec!["2022-10-31".into(), "2022-11-01".into()]);
- }
-
- #[tokio::test]
- async fn compute_primary_key_not_sorted() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- // add something sorted after time lexicographically to tickle a bug as i write a
- // failing test
- (
- "zazzle".to_string(),
- AggregateTSMTag {
- name: "zazzle".to_string(),
- values: HashSet::from(["true".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let pk = compute_measurement_primary_key(&m);
- assert_eq!(pk, vec!["arch", "host", "zazzle", "time"]);
- }
-
- #[tokio::test]
- async fn compute_primary_key_already_sorted() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let pk = compute_measurement_primary_key(&m);
- assert_eq!(pk, vec!["arch", "host", "time"]);
- }
-
- #[tokio::test]
- async fn compute_sort_key_not_sorted() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let sk = compute_measurement_sort_key(&m);
- let sk = sk.to_columns().collect::<Vec<_>>();
- assert_eq!(sk, vec!["host", "arch", "time"]);
- }
-
- #[tokio::test]
- async fn compute_sort_key_already_sorted() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let sk = compute_measurement_sort_key(&m);
- let sk = sk.to_columns().collect::<Vec<_>>();
- assert_eq!(sk, vec!["host", "arch", "time"]);
- }
-
- #[tokio::test]
- async fn compute_sort_key_already_sorted_more_tags() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- (
- "os".to_string(),
- AggregateTSMTag {
- name: "os".to_string(),
- values: HashSet::from([
- "linux".to_string(),
- "windows".to_string(),
- "osx".to_string(),
- "freebsd".to_string(),
- ]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let sk = compute_measurement_sort_key(&m);
- let sk = sk.to_columns().collect::<Vec<_>>();
- assert_eq!(sk, vec!["host", "arch", "os", "time"]);
- }
-
- #[tokio::test]
- async fn get_sort_key_was_empty() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let partition = Partition {
- id: PartitionId::new(1),
- table_id: TableId::new(1),
- partition_key: PartitionKey::from("2022-06-21"),
- sort_key: Vec::new(),
- new_file_at: None,
- };
- let sort_key = get_sort_key(&partition, &m).1.unwrap();
- let sort_key = sort_key.to_columns().collect::<Vec<_>>();
- // ensure sort key is updated with the computed one
- assert_eq!(sort_key, vec!["host", "arch", "time"]);
- }
-
- #[tokio::test]
- async fn get_sort_key_no_change() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let partition = Partition {
- id: PartitionId::new(1),
- table_id: TableId::new(1),
- partition_key: PartitionKey::from("2022-06-21"),
- // N.B. sort key is already what it will computed to; here we're testing the `adjust_sort_key_columns` code path
- sort_key: vec!["host".to_string(), "arch".to_string(), "time".to_string()],
- new_file_at: None,
- };
- // ensure sort key is unchanged
- let _maybe_updated_sk = get_sort_key(&partition, &m).1;
- assert_matches!(None::<SortKey>, _maybe_updated_sk);
- }
-
- #[tokio::test]
- async fn get_sort_key_with_changes_1() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let partition = Partition {
- id: PartitionId::new(1),
- table_id: TableId::new(1),
- partition_key: PartitionKey::from("2022-06-21"),
- // N.B. is missing host so will need updating
- sort_key: vec!["arch".to_string(), "time".to_string()],
- new_file_at: None,
- };
- let sort_key = get_sort_key(&partition, &m).1.unwrap();
- let sort_key = sort_key.to_columns().collect::<Vec<_>>();
- // ensure sort key is updated; host would have been sorted first but it got added later so
- // it won't be
- assert_eq!(sort_key, vec!["arch", "host", "time"]);
- }
-
- #[tokio::test]
- async fn get_sort_key_with_changes_2() {
- let m = AggregateTSMMeasurement {
- tags: HashMap::from([
- (
- "arch".to_string(),
- AggregateTSMTag {
- name: "arch".to_string(),
- values: HashSet::from([
- "amd64".to_string(),
- "x86".to_string(),
- "i386".to_string(),
- ]),
- },
- ),
- (
- "host".to_string(),
- AggregateTSMTag {
- name: "host".to_string(),
- values: HashSet::from(["server".to_string(), "desktop".to_string()]),
- },
- ),
- ]),
- fields: HashMap::from([(
- "usage".to_string(),
- AggregateTSMField {
- name: "usage".to_string(),
- types: HashSet::from(["Float".to_string()]),
- },
- )]),
- earliest_time: DateTime::parse_from_rfc3339("2022-01-01T00:00:00+00:00").unwrap(),
- latest_time: DateTime::parse_from_rfc3339("2022-07-07T06:00:00+00:00").unwrap(),
- };
- let partition = Partition {
- id: PartitionId::new(1),
- table_id: TableId::new(1),
- partition_key: PartitionKey::from("2022-06-21"),
- // N.B. is missing arch so will need updating
- sort_key: vec!["host".to_string(), "time".to_string()],
- new_file_at: None,
- };
- let sort_key = get_sort_key(&partition, &m).1.unwrap();
- let sort_key = sort_key.to_columns().collect::<Vec<_>>();
- // ensure sort key is updated; new columns get appended after existing ones only
- assert_eq!(sort_key, vec!["host", "arch", "time"]);
- }
-}
diff --git a/import_export/src/tsm/tsm_schema/validate.rs b/import_export/src/tsm/tsm_schema/validate.rs
deleted file mode 100644
index e86e9bee664..00000000000
--- a/import_export/src/tsm/tsm_schema/validate.rs
+++ /dev/null
@@ -1,136 +0,0 @@
-use crate::tsm::AggregateTSMSchema;
-use thiserror::Error;
-
-// Possible validation errors
-#[derive(Debug, Error)]
-pub enum ValidationError {
- #[error("Measurement '{measurement}' has a tag and field with the same name: {name}")]
- TagAndFieldSameName { measurement: String, name: String },
-
- #[error(
- "Measurement '{measurement}' has field '{name}' with multiple types: {:?}",
- types
- )]
- FieldWithMultipleTypes {
- measurement: String,
- name: String,
- types: Vec<String>,
- },
-}
-
-pub fn validate_schema(schema: &AggregateTSMSchema) -> Result<(), Vec<ValidationError>> {
- let mut errors: Vec<ValidationError> = vec![];
- for (measurement_name, measurement) in &schema.measurements {
- if let Some(tag_name) = measurement
- .tags
- .keys()
- .find(|&t| measurement.fields.contains_key(t))
- {
- errors.push(ValidationError::TagAndFieldSameName {
- measurement: measurement_name.clone(),
- name: tag_name.clone(),
- });
- }
- if let Some(field) = measurement.fields.values().find(|f| f.types.len() > 1) {
- errors.push(ValidationError::FieldWithMultipleTypes {
- measurement: measurement_name.clone(),
- name: field.name.clone(),
- types: field.types.iter().cloned().collect::<Vec<_>>(),
- });
- }
- }
- if !errors.is_empty() {
- Err(errors)
- } else {
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use assert_matches::assert_matches;
-
- #[tokio::test]
- async fn good() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "cpu": {
- "tags": [
- { "name": "host", "values": ["server", "desktop"] }
- ],
- "fields": [
- { "name": "usage", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- assert_matches!(validate_schema(&schema), Ok(_));
- }
-
- #[tokio::test]
- async fn tag_and_field_same_name() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "weather": {
- "tags": [
- { "name": "temperature", "values": ["true"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- let errors = validate_schema(&schema).expect_err("should fail to validate schema");
- assert_eq!(errors.len(), 1);
- assert_matches!(
- errors.get(0),
- Some(ValidationError::TagAndFieldSameName { .. })
- );
- }
-
- #[tokio::test]
- async fn field_with_multiple_types() {
- let json = r#"
- {
- "org_id": "1234",
- "bucket_id": "5678",
- "measurements": {
- "weather": {
- "tags": [
- { "name": "location", "values": ["London", "Berlin"] }
- ],
- "fields": [
- { "name": "temperature", "types": ["Float", "Integer"] }
- ],
- "earliest_time": "2022-01-01T00:00:00.00Z",
- "latest_time": "2022-07-07T06:00:00.00Z"
- }
- }
- }
- "#;
- let schema: AggregateTSMSchema = json.try_into().unwrap();
- let errors = validate_schema(&schema).expect_err("should fail to validate schema");
- assert_eq!(errors.len(), 1);
- assert_matches!(
- errors.get(0),
- Some(ValidationError::FieldWithMultipleTypes { .. })
- );
- }
-}
diff --git a/influxdb_iox/src/commands/import/mod.rs b/influxdb_iox/src/commands/import/mod.rs
deleted file mode 100644
index 2fb94ba509b..00000000000
--- a/influxdb_iox/src/commands/import/mod.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-use thiserror::Error;
-
-mod schema;
-
-#[derive(Debug, Error)]
-pub enum ImportError {
- #[error("Error in schema command: {0}")]
- SchemaError(#[from] schema::SchemaCommandError),
-}
-
-#[derive(Debug, clap::Parser)]
-pub struct Config {
- #[clap(subcommand)]
- command: Command,
-}
-
-#[derive(clap::Parser, Debug)]
-pub enum Command {
- /// Operations related to schema analysis.
- #[clap(subcommand)]
- Schema(Box<schema::Config>),
-}
-
-/// Handle variants of the schema command.
-pub async fn command(config: Config) -> Result<(), ImportError> {
- match config.command {
- Command::Schema(schema_config) => schema::command(*schema_config)
- .await
- .map_err(ImportError::SchemaError),
- }
-}
diff --git a/influxdb_iox/src/commands/import/schema.rs b/influxdb_iox/src/commands/import/schema.rs
deleted file mode 100644
index 41cb2ea264e..00000000000
--- a/influxdb_iox/src/commands/import/schema.rs
+++ /dev/null
@@ -1,175 +0,0 @@
-use std::{
- fmt::{Display, Formatter},
- fs,
- path::PathBuf,
- sync::Arc,
-};
-
-use clap::Parser;
-use clap_blocks::{
- catalog_dsn::CatalogDsnConfig,
- object_store::{make_object_store, ObjectStoreConfig},
-};
-use iox_time::{SystemProvider, TimeProvider};
-use object_store::{path::Path, DynObjectStore};
-use object_store_metrics::ObjectStoreMetrics;
-use thiserror::Error;
-
-use import_export::tsm::{
- fetch_schema, update_iox_catalog, validate_schema, AggregateTSMSchemaOverride, FetchError,
- SchemaMergeError, SchemaMerger, UpdateCatalogError, ValidationError,
-};
-
-use crate::process_info::setup_metric_registry;
-
-// Possible errors from schema commands
-#[derive(Debug, Error)]
-pub enum SchemaCommandError {
- #[error("Cannot parse object store config: {0}")]
- ObjectStoreParsing(#[from] clap_blocks::object_store::ParseError),
-
- #[error("Catalog DSN error: {0}")]
- CatalogDsn(#[from] clap_blocks::catalog_dsn::Error),
-
- #[error("Error fetching schemas from object storage: {0}")]
- Fetching(#[from] FetchError),
-
- #[error("Error merging schemas: {0}")]
- Merging(#[from] SchemaMergeError),
-
- #[error("Schema conflicts during merge:\n{0}")]
- Validating(#[from] ValidationErrors),
-
- #[error("Merged schema must have one valid Influx type only")]
- InvalidFieldTypes(),
-
- #[error("Error updating IOx catalog with merged schema: {0}")]
- UpdateCatalogError(#[from] UpdateCatalogError),
-
- #[error("Error reading schema override file from disk: {0}")]
- SchemaOverrideFileReadError(#[from] std::io::Error),
-
- #[error("Error parsing schema override file: {0}")]
- SchemaOverrideParseError(#[from] serde_json::Error),
-}
-
-#[derive(Debug, Error)]
-pub struct ValidationErrors(Vec<ValidationError>);
-
-impl Display for ValidationErrors {
- fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- self.0.iter().fold(Ok(()), |result, e| {
- result.and_then(|_| writeln!(f, "- {e}"))
- })
- }
-}
-
-#[derive(Parser, Debug)]
-pub enum Config {
- /// Merge schemas produced in the IOx prestep
- Merge(MergeConfig),
-}
-
-/// Merge schema created in pre-step
-#[derive(Parser, Debug)]
-pub struct MergeConfig {
- #[clap(flatten)]
- object_store: ObjectStoreConfig,
-
- #[clap(flatten)]
- catalog_dsn: CatalogDsnConfig,
-
- #[clap(long)]
- /// Retention setting setting (used only if we need to create the namespace)
- retention: Option<String>,
-
- #[clap(long)]
- /// The Org ID of the schemas to merge
- org_id: String,
-
- #[clap(long)]
- /// The Bucket ID of the schemas to merge
- bucket_id: String,
-
- #[clap(long)]
- /// The filename prefix to look for in the object store bucket.
- /// Convention is to use `{org_id}/{bucket_id}/{job_name}/`
- prefix: String,
-
- #[clap(long, default_value = ".schema.json")]
- /// The filename suffix to look for in the object store bucket
- suffix: String,
-
- #[clap(long)]
- /// Filename of schema override file used to instruct this tool on how to resolve schema
- /// conflicts in the TSM schemas before updating the schema in the IOx catalog.
- schema_override_file: Option<PathBuf>,
-}
-
-/// Entry-point for the schema command
-pub async fn command(config: Config) -> Result<(), SchemaCommandError> {
- match config {
- Config::Merge(merge_config) => {
- let time_provider = Arc::new(SystemProvider::new()) as Arc<dyn TimeProvider>;
- let metrics = setup_metric_registry();
-
- let object_store = make_object_store(&merge_config.object_store)
- .map_err(SchemaCommandError::ObjectStoreParsing)?;
- // Decorate the object store with a metric recorder.
- let object_store: Arc<DynObjectStore> = Arc::new(ObjectStoreMetrics::new(
- object_store,
- time_provider,
- &metrics,
- ));
-
- let catalog = merge_config
- .catalog_dsn
- .get_catalog("import", Arc::clone(&metrics))
- .await?;
-
- // fetch the TSM schemas and merge into one aggregate schema
- let schemas = fetch_schema(
- Arc::clone(&object_store),
- Some(&Path::from(merge_config.prefix)),
- &merge_config.suffix,
- )
- .await?;
- let mut merger = SchemaMerger::new(
- merge_config.org_id.clone(),
- merge_config.bucket_id.clone(),
- schemas,
- );
-
- // load a schema override file, if provided, to resolve field type conflicts
- if let Some(schema_override_file) = merge_config.schema_override_file {
- let data = fs::read(schema_override_file)
- .map_err(SchemaCommandError::SchemaOverrideFileReadError)?;
- let schema_override: AggregateTSMSchemaOverride = data
- .try_into()
- .map_err(SchemaCommandError::SchemaOverrideParseError)?;
- merger = merger.with_schema_override(schema_override);
- }
-
- // note that this will also apply the schema override, if the user provided one
- let merged_tsm_schema = merger.merge().map_err(SchemaCommandError::Merging)?;
- // just print the merged schema for now; we'll do more with this in future PRs
- println!("Merged schema:\n{merged_tsm_schema:?}");
-
- // don't proceed unless we produce a valid merged schema
- if let Err(errors) = validate_schema(&merged_tsm_schema) {
- return Err(SchemaCommandError::Validating(ValidationErrors(errors)));
- }
-
- // From here we can happily .unwrap() the field types knowing they're valid
- if !merged_tsm_schema.types_are_valid() {
- return Err(SchemaCommandError::InvalidFieldTypes());
- }
-
- // given we have a valid aggregate TSM schema, fetch the schema for the namespace from
- // the IOx catalog, if it exists, and update it with our aggregate schema
- update_iox_catalog(&merged_tsm_schema, Arc::clone(&catalog)).await?;
-
- Ok(())
- }
- }
-}
diff --git a/influxdb_iox/src/main.rs b/influxdb_iox/src/main.rs
index b71da1bb9c2..114ed69c712 100644
--- a/influxdb_iox/src/main.rs
+++ b/influxdb_iox/src/main.rs
@@ -35,7 +35,6 @@ use tokio::runtime::Runtime;
mod commands {
pub mod catalog;
pub mod debug;
- pub mod import;
pub mod namespace;
pub mod query;
pub mod query_ingester;
@@ -209,9 +208,6 @@ enum Command {
/// Query the ingester only
QueryIngester(commands::query_ingester::Config),
- /// Commands related to the bulk ingest of data
- Import(commands::import::Config),
-
/// Various commands for namespace manipulation
Namespace(commands::namespace::Config),
}
@@ -373,13 +369,6 @@ fn main() -> Result<(), std::io::Error> {
std::process::exit(ReturnCode::Failure as _)
}
}
- Some(Command::Import(config)) => {
- let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
- if let Err(e) = commands::import::command(config).await {
- eprintln!("{e}");
- std::process::exit(ReturnCode::Failure as _)
- }
- }
Some(Command::Namespace(config)) => {
let _tracing_guard = handle_init_logs(init_simple_logs(log_verbose_count));
let connection = connection(grpc_host).await;
|
a82dc6f5f0e9a5b0a80dab59ecd2498c6bf0c55d
|
2021-10-19 22:44:08
|
Andrew Lamb
|
chore: Update datafusion + arrow (#2903)
| false
|
Update datafusion + arrow (#2903)
|
chore
| "diff --git a/Cargo.lock b/Cargo.lock\nindex 8b8fdc08ef8..c487cc1a412 100644\n--- a/Cargo.lock\n+++ (...TRUNCATED)
|
f9d0e3714467400fa09bcf6f4b4d246cc9530b61
|
2022-08-08 15:09:26
|
Andrew Lamb
|
chore: reduce h2 and hyper logging level in tests (#5332)
| false
|
reduce h2 and hyper logging level in tests (#5332)
|
chore
| "diff --git a/.circleci/config.yml b/.circleci/config.yml\nindex c0cf2618fd5..11b53bb7100 100644\n--(...TRUNCATED)
|
e573ca006141f3d2f168e8e56aa6c0ea35e18e44
|
2020-05-26 06:27:39
|
Jake Goulding
|
test: Combine duplicate tests
| false
|
Combine duplicate tests
|
test
| "diff --git a/src/line_parser.rs b/src/line_parser.rs\nindex 0aac3989592..88daea6981f 100644\n--- a/(...TRUNCATED)
|
b5ce0e4c4d5fb84f0f90d9f4bc1892eefa5c37fc
|
2023-02-03 18:56:32
|
Dom Dwyer
|
refactor: remove test-only checksum
| false
|
remove test-only checksum
|
refactor
| "diff --git a/wal/src/blocking/reader.rs b/wal/src/blocking/reader.rs\nindex 80f6bb27315..29ae7fb2f9(...TRUNCATED)
|
b4b7822f2b41edefb4aa3f1800077bb01548de21
|
2023-07-25 18:14:37
|
Dom Dwyer
|
perf: cache summary statistics in partition FSM
| false
|
cache summary statistics in partition FSM
|
perf
| "diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs\nindex b76efaae636..a0dbd14e593 100644\n(...TRUNCATED)
|
61bc6df75b976568bc2ee8cc11fbe8e28e425027
|
2018-08-27 21:16:13
|
Nathaniel Cook
|
fix: Fix missing builtins in REPL
| false
|
Fix missing builtins in REPL
|
fix
| "diff --git a/query/compile.go b/query/compile.go\nindex 5d45cb70bfc..2aa4dfb9866 100644\n--- a/quer(...TRUNCATED)
|
End of preview.
No dataset card yet
- Downloads last month
- 11