Specify and implement reverse for ReadIndex and ReadBatch
This commit is contained in:
parent
3ac6970a24
commit
3c8e4df397
7 changed files with 40 additions and 5 deletions
|
@ -379,6 +379,7 @@ Query parameters:
|
|||
| `start` | `null` | First partition key to list, in lexicographical order |
|
||||
| `end` | `null` | Last partition key to list (excluded) |
|
||||
| `limit` | `null` | Maximum number of partition keys to list |
|
||||
| `reverse` | `false` | Iterate in reverse lexicographical order |
|
||||
|
||||
The response consists in a JSON object that repeats the parameters of the query and gives the result (see below).
|
||||
|
||||
|
@ -415,6 +416,7 @@ HTTP/1.1 200 OK
|
|||
start: null,
|
||||
end: null,
|
||||
limit: null,
|
||||
reverse: false,
|
||||
partitionKeys: [
|
||||
{ pk: "keys", n: 3043 },
|
||||
{ pk: "mailbox:INBOX", n: 42 },
|
||||
|
@ -485,13 +487,14 @@ JSON struct with the following fields:
|
|||
| `start` | `null` | The sort key of the first item to read |
|
||||
| `end` | `null` | The sort key of the last item to read (excluded) |
|
||||
| `limit` | `null` | The maximum number of items to return |
|
||||
| `reverse` | `false` | Iterate in reverse lexicographical order on sort keys |
|
||||
| `singleItem` | `false` | Whether to return only the item with sort key `start` |
|
||||
| `conflictsOnly` | `false` | Whether to return only items that have several concurrent values |
|
||||
| `tombstones` | `false` | Whether or not to return tombstone lines to indicate the presence of old deleted items |
|
||||
|
||||
|
||||
For each of the searches, triplets are listed and returned separately. The
|
||||
semantics of `prefix`, `start`, `end` and `limit` are the same as for ReadIndex. The
|
||||
semantics of `prefix`, `start`, `end`, `limit` and `reverse` are the same as for ReadIndex. The
|
||||
additionnal parameter `singleItem` allows to get a single item, whose sort key
|
||||
is the one given in `start`. Parameters `conflictsOnly` and `tombstones`
|
||||
control additional filters on the items that are returned.
|
||||
|
@ -553,6 +556,7 @@ HTTP/1.1 200 OK
|
|||
start: null,
|
||||
end: null,
|
||||
limit: null,
|
||||
reverse: false,
|
||||
conflictsOnly: false,
|
||||
tombstones: false,
|
||||
singleItem: false,
|
||||
|
@ -570,6 +574,7 @@ HTTP/1.1 200 OK
|
|||
start: "001892831",
|
||||
end: null,
|
||||
limit: 3,
|
||||
reverse: false,
|
||||
conflictsOnly: false,
|
||||
tombstones: false,
|
||||
singleItem: false,
|
||||
|
@ -589,6 +594,7 @@ HTTP/1.1 200 OK
|
|||
conflictsOnly: false,
|
||||
tombstones: false,
|
||||
limit: null,
|
||||
reverse: false,
|
||||
singleItem: true,
|
||||
items: [
|
||||
{ sk: "0", ct: "opaquetoken999", v: ["b64binarystuff999"] },
|
||||
|
|
|
@ -164,7 +164,8 @@ impl ApiHandler for K2VApiServer {
|
|||
start,
|
||||
end,
|
||||
limit,
|
||||
} => handle_read_index(garage, bucket_id, prefix, start, end, limit).await,
|
||||
reverse,
|
||||
} => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await,
|
||||
Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await,
|
||||
Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await,
|
||||
Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await,
|
||||
|
|
|
@ -115,7 +115,7 @@ async fn handle_read_batch_query(
|
|||
&query.end,
|
||||
query.limit,
|
||||
Some(filter),
|
||||
EnumerationOrder::Forward,
|
||||
EnumerationOrder::from_reverse(query.reverse),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -133,6 +133,7 @@ async fn handle_read_batch_query(
|
|||
start: query.start,
|
||||
end: query.end,
|
||||
limit: query.limit,
|
||||
reverse: query.reverse,
|
||||
single_item: query.single_item,
|
||||
conflicts_only: query.conflicts_only,
|
||||
tombstones: query.tombstones,
|
||||
|
@ -278,6 +279,8 @@ struct ReadBatchQuery {
|
|||
end: Option<String>,
|
||||
#[serde(default)]
|
||||
limit: Option<u64>,
|
||||
#[serde(default)]
|
||||
reverse: bool,
|
||||
#[serde(default, rename = "singleItem")]
|
||||
single_item: bool,
|
||||
#[serde(default, rename = "conflictsOnly")]
|
||||
|
@ -294,6 +297,7 @@ struct ReadBatchResponse {
|
|||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
limit: Option<u64>,
|
||||
reverse: bool,
|
||||
#[serde(rename = "singleItem")]
|
||||
single_item: bool,
|
||||
#[serde(rename = "conflictsOnly")]
|
||||
|
|
|
@ -22,7 +22,10 @@ pub async fn handle_read_index(
|
|||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
limit: Option<u64>,
|
||||
reverse: Option<bool>,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let reverse = reverse.unwrap_or(false);
|
||||
|
||||
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
||||
|
||||
let (partition_keys, more, next_start) = read_range(
|
||||
|
@ -33,7 +36,7 @@ pub async fn handle_read_index(
|
|||
&end,
|
||||
limit,
|
||||
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
|
||||
EnumerationOrder::Forward,
|
||||
EnumerationOrder::from_reverse(reverse),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -47,6 +50,7 @@ pub async fn handle_read_index(
|
|||
start,
|
||||
end,
|
||||
limit,
|
||||
reverse,
|
||||
partition_keys: partition_keys
|
||||
.into_iter()
|
||||
.map(|part| {
|
||||
|
@ -76,6 +80,7 @@ struct ReadIndexResponse {
|
|||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
limit: Option<u64>,
|
||||
reverse: bool,
|
||||
|
||||
#[serde(rename = "partitionKeys")]
|
||||
partition_keys: Vec<ReadIndexResponseEntry>,
|
||||
|
|
|
@ -39,6 +39,7 @@ pub enum Endpoint {
|
|||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
limit: Option<u64>,
|
||||
reverse: Option<bool>,
|
||||
},
|
||||
ReadItem {
|
||||
partition_key: String,
|
||||
|
@ -101,7 +102,7 @@ impl Endpoint {
|
|||
EMPTY => ReadItem (query::sort_key),
|
||||
],
|
||||
no_key: [
|
||||
EMPTY => ReadIndex (query_opt::prefix, query_opt::start, query_opt::end, opt_parse::limit),
|
||||
EMPTY => ReadIndex (query_opt::prefix, query_opt::start, query_opt::end, opt_parse::limit, opt_parse::reverse),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -236,6 +237,7 @@ generateQueryParameters! {
|
|||
"causality_token" => causality_token,
|
||||
"end" => end,
|
||||
"limit" => limit,
|
||||
"reverse" => reverse,
|
||||
"sort_key" => sort_key,
|
||||
"timeout" => timeout
|
||||
}
|
||||
|
|
|
@ -112,6 +112,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -132,6 +133,7 @@ async fn test_batch() {
|
|||
"start": "c",
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -150,6 +152,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": 1,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -165,6 +168,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -267,6 +271,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -286,6 +291,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -302,6 +308,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": "d.2",
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -317,6 +324,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": 1,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -332,6 +340,7 @@ async fn test_batch() {
|
|||
"start": "d.2",
|
||||
"end": null,
|
||||
"limit": 1,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -347,6 +356,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": 2,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
@ -427,6 +437,7 @@ async fn test_batch() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"conflictsOnly": false,
|
||||
"tombstones": false,
|
||||
"singleItem": false,
|
||||
|
|
|
@ -27,6 +27,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [],
|
||||
"more": false,
|
||||
"nextStart": null
|
||||
|
@ -100,6 +101,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [
|
||||
{
|
||||
"pk": "root",
|
||||
|
@ -167,6 +169,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [
|
||||
{
|
||||
"pk": "root",
|
||||
|
@ -234,6 +237,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [
|
||||
{
|
||||
"pk": "root",
|
||||
|
@ -302,6 +306,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [
|
||||
{
|
||||
"pk": "root",
|
||||
|
@ -323,6 +328,7 @@ async fn test_items_and_indices() {
|
|||
"start": null,
|
||||
"end": null,
|
||||
"limit": null,
|
||||
"reverse": false,
|
||||
"partitionKeys": [],
|
||||
"more": false,
|
||||
"nextStart": null
|
||||
|
|
Loading…
Reference in a new issue