Fully asynchronous, future-enabled Apache Druid client library for rust programming language.
The library provides staticly typed API for Native Queries
The library is hosted on crates.io.
toml
[dependencies]
druid-io = "*"
Connect to a druid cluster throughly staticly provided list of brokers:
```rust
let druidclient = DruidClient::new(vec!["localhost:8082".tostring()]); ```
Connector to Druid cluster through Zookeeper - supports autodiscovery of new brokers and load balancing:
```rust
TODO: ```
See Timeseries query documentation
```rust
pub struct TimeAggr { count: usize, count_fraction: f32, user: String, }
let timeseries = Timeseries {
datasource: DataSource::table("wikipedia"),
limit: Some(10),
descending: false,
granularity: Granularity::All,
filter: Some(Filter::selector("user", "Taffe316")),
aggregations: vec![
Aggregation::count("count"),
Aggregation::StringFirst {
name: "user".into(),
fieldname: "user".into(),
maxstringbytes: 1024,
},
],
postaggregations: vec![PostAggregation::Arithmetic {
name: "countfraction".into(),
function: "/".into(),
fields: vec![
PostAggregator::fieldaccess("countpercent", "count"),
PostAggregator::constant("hundred", 100.into()),
],
ordering: None,
}],
intervals: vec!["-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z".into()],
context: context,
};
let result = tokiotest::blockon(druid_client.timeseries::
```
See Apache Druid TopN query documentation
```rust
struct WikiPage {
page: String,
user: Option
let topn = TopN {
datasource: DataSource::table("wikipedia"),
dimension: Dimension::default("page"),
threshold: 10,
metric: "count".into(),
aggregations: vec![
Aggregation::count("count"),
Aggregation::StringFirst {
name: "user".into(),
fieldname: "user".into(),
maxstringbytes: 1024,
},
],
intervals: vec!["-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z".into()],
granularity: Granularity::All,
context: Default::default(),
};
let druidclient = DruidClient::new(vec!["localhost:8082".tostring()]);
let result = tokiotest::blockon(druidclient.topn::
```
See Apache Druid GroupBy query documentation
```rust
let groupby = GroupBy {
datasource: DataSource::table("wikipedia"),
dimensions: vec![Dimension::Default {
dimension: "page".into(),
outputname: "page".into(),
outputtype: OutputType::STRING,
}],
limitspec: Some(LimitSpec {
limit: 10,
columns: vec![OrderByColumnSpec::new(
"page",
Ordering::Descending,
SortingOrder::Alphanumeric,
)],
}),
granularity: Granularity::All,
filter: Some(Filter::selector("user", "Taffe316")),
aggregations: vec![
Aggregation::count("count"),
Aggregation::StringFirst {
name: "user".into(),
fieldname: "user".into(),
maxstringbytes: 1024,
},
],
postaggregations: vec![PostAggregation::Arithmetic {
name: "countfraction".into(),
function: "/".into(),
fields: vec![
PostAggregator::fieldaccess("countpercent", "count"),
PostAggregator::constant("hundred", 100.into()),
],
ordering: None,
}],
having: Some(HavingSpec::greaterthan("countfraction", 0.01.into())),
intervals: vec!["-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z".into()],
subtotalspec: Default::default(),
context: Default::default(),
};
let result = tokiotest::blockon(druidclient.groupby::
```
See Apache Druid TimeBoundary query documentation
Let's try something more complex: inner join
```rust
struct ScanEvent {
#[serde(rename(deserialize = "_time"))]
time: usize,
cityname: Option
let result = tokiotest::blockon(druid_client.scan::
```