2021-03-21 23:00:09 +00:00
|
|
|
//! Module containing types related to computing nodes which should receive a copy of data blocks
|
2021-04-06 03:25:28 +00:00
|
|
|
//! and metadata
|
2021-03-05 15:22:29 +00:00
|
|
|
use std::collections::{HashMap, HashSet};
|
|
|
|
use std::convert::TryInto;
|
2021-02-21 12:11:10 +00:00
|
|
|
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
|
|
|
|
use garage_util::data::*;
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
/// A partition id, which is stored on 16 bits
|
|
|
|
/// i.e. we have up to 2**16 partitions.
|
|
|
|
/// (in practice we have exactly 2**PARTITION_BITS partitions)
|
2021-03-16 11:18:03 +00:00
|
|
|
pub type Partition = u16;
|
|
|
|
|
2021-03-05 15:22:29 +00:00
|
|
|
// TODO: make this constant parametrizable in the config file
|
|
|
|
// For deployments with many nodes it might make sense to bump
|
|
|
|
// it up to 10.
|
|
|
|
// Maximum value : 16
|
2021-03-21 23:00:09 +00:00
|
|
|
/// How many bits from the hash are used to make partitions. Higher numbers means more fairness in
|
|
|
|
/// presence of numerous nodes, but exponentially bigger ring. Max 16
|
2021-03-05 15:22:29 +00:00
|
|
|
pub const PARTITION_BITS: usize = 8;
|
|
|
|
|
|
|
|
const PARTITION_MASK_U16: u16 = ((1 << PARTITION_BITS) - 1) << (16 - PARTITION_BITS);
|
|
|
|
|
2021-04-07 11:39:34 +00:00
|
|
|
/// The user-defined configuration of the cluster's nodes
|
2021-02-21 12:11:10 +00:00
|
|
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
|
|
pub struct NetworkConfig {
|
2021-03-21 23:00:09 +00:00
|
|
|
/// Map of each node's id to it's configuration
|
2021-05-02 21:13:08 +00:00
|
|
|
pub members: HashMap<Uuid, NetworkConfigEntry>,
|
2021-03-21 23:00:09 +00:00
|
|
|
/// Version of this config
|
2021-02-21 12:11:10 +00:00
|
|
|
pub version: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl NetworkConfig {
|
|
|
|
pub(crate) fn new() -> Self {
|
2021-02-23 17:46:25 +00:00
|
|
|
Self {
|
2021-02-21 12:11:10 +00:00
|
|
|
members: HashMap::new(),
|
|
|
|
version: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// The overall configuration of one (possibly remote) node
|
2021-02-21 12:11:10 +00:00
|
|
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
|
|
pub struct NetworkConfigEntry {
|
2021-03-21 23:00:09 +00:00
|
|
|
/// Datacenter at which this entry belong. This infromation might be used to perform a better
|
|
|
|
/// geodistribution
|
2021-05-28 11:18:31 +00:00
|
|
|
pub zone: String,
|
2021-03-21 23:00:09 +00:00
|
|
|
/// The (relative) capacity of the node
|
2021-05-28 10:36:22 +00:00
|
|
|
/// If this is set to None, the node does not participate in storing data for the system
|
|
|
|
/// and is only active as an API gateway to other nodes
|
|
|
|
pub capacity: Option<u32>,
|
2021-03-21 23:00:09 +00:00
|
|
|
/// A tag to recognize the entry, not used for other things than display
|
2021-02-21 12:11:10 +00:00
|
|
|
pub tag: String,
|
|
|
|
}
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
impl NetworkConfigEntry {
|
|
|
|
pub fn capacity_string(&self) -> String {
|
|
|
|
match self.capacity {
|
|
|
|
Some(c) => format!("{}", c),
|
|
|
|
None => "gateway".to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// A ring distributing fairly objects to nodes
|
2021-02-21 12:11:10 +00:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Ring {
|
2021-05-28 10:36:22 +00:00
|
|
|
/// The replication factor for this ring
|
|
|
|
pub replication_factor: usize,
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// The network configuration used to generate this ring
|
2021-02-21 12:11:10 +00:00
|
|
|
pub config: NetworkConfig,
|
2021-05-28 10:36:22 +00:00
|
|
|
|
|
|
|
// Internal order of nodes used to make a more compact representation of the ring
|
|
|
|
nodes: Vec<Uuid>,
|
|
|
|
|
|
|
|
// The list of entries in the ring
|
|
|
|
ring: Vec<RingEntry>,
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
// Type to store compactly the id of a node in the system
|
|
|
|
// Change this to u16 the day we want to have more than 256 nodes in a cluster
|
|
|
|
type CompactNodeType = u8;
|
|
|
|
|
|
|
|
// The maximum number of times an object might get replicated
|
|
|
|
// This must be at least 3 because Garage supports 3-way replication
|
|
|
|
// Here we use 6 so that the size of a ring entry is 8 bytes
|
|
|
|
// (2 bytes partition id, 6 bytes node numbers as u8s)
|
|
|
|
const MAX_REPLICATION: usize = 6;
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// An entry in the ring
|
2021-02-21 12:11:10 +00:00
|
|
|
#[derive(Clone, Debug)]
|
2021-05-28 10:36:22 +00:00
|
|
|
struct RingEntry {
|
|
|
|
// The two first bytes of the first hash that goes in this partition
|
|
|
|
// (the next bytes are zeroes)
|
|
|
|
hash_prefix: u16,
|
|
|
|
// The nodes that store this partition, stored as a list of positions in the `nodes`
|
|
|
|
// field of the Ring structure
|
|
|
|
// Only items 0 up to ring.replication_factor - 1 are used, others are zeros
|
|
|
|
nodes_buf: [CompactNodeType; MAX_REPLICATION],
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Ring {
|
2021-03-21 23:00:09 +00:00
|
|
|
// TODO this function MUST be refactored, it's 100 lines long, with a 50 lines loop, going up to 6
|
|
|
|
// levels of imbrication. It is basically impossible to test, maintain, or understand for an
|
|
|
|
// outsider.
|
2021-05-28 10:36:22 +00:00
|
|
|
pub(crate) fn new(config: NetworkConfig, replication_factor: usize) -> Self {
|
2021-03-05 15:22:29 +00:00
|
|
|
// Create a vector of partition indices (0 to 2**PARTITION_BITS-1)
|
|
|
|
let partitions_idx = (0usize..(1usize << PARTITION_BITS)).collect::<Vec<_>>();
|
|
|
|
|
2021-05-28 11:18:31 +00:00
|
|
|
let zones = config
|
2021-03-05 15:22:29 +00:00
|
|
|
.members
|
|
|
|
.iter()
|
2021-05-28 10:36:22 +00:00
|
|
|
.filter(|(_id, info)| info.capacity.is_some())
|
2021-05-28 11:18:31 +00:00
|
|
|
.map(|(_id, info)| info.zone.as_str())
|
2021-03-05 15:22:29 +00:00
|
|
|
.collect::<HashSet<&str>>();
|
2021-05-28 11:18:31 +00:00
|
|
|
let n_zones = zones.len();
|
2021-03-05 15:22:29 +00:00
|
|
|
|
|
|
|
// Prepare ring
|
2021-05-02 21:13:08 +00:00
|
|
|
let mut partitions: Vec<Vec<(&Uuid, &NetworkConfigEntry)>> = partitions_idx
|
2021-03-05 15:22:29 +00:00
|
|
|
.iter()
|
|
|
|
.map(|_i| Vec::new())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Create MagLev priority queues for each node
|
|
|
|
let mut queues = config
|
|
|
|
.members
|
|
|
|
.iter()
|
2021-05-28 10:36:22 +00:00
|
|
|
.filter(|(_id, info)| info.capacity.is_some())
|
2021-03-05 15:22:29 +00:00
|
|
|
.map(|(node_id, node_info)| {
|
|
|
|
let mut parts = partitions_idx
|
|
|
|
.iter()
|
|
|
|
.map(|i| {
|
|
|
|
let part_data =
|
|
|
|
[&u16::to_be_bytes(*i as u16)[..], node_id.as_slice()].concat();
|
|
|
|
(*i, fasthash(&part_data[..]))
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
parts.sort_by_key(|(_i, h)| *h);
|
|
|
|
let parts_i = parts.iter().map(|(i, _h)| *i).collect::<Vec<_>>();
|
|
|
|
(node_id, node_info, parts_i, 0)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
2021-03-10 13:52:03 +00:00
|
|
|
let max_capacity = config
|
2021-03-05 15:22:29 +00:00
|
|
|
.members
|
|
|
|
.iter()
|
2021-05-28 10:36:22 +00:00
|
|
|
.filter_map(|(_, node_info)| node_info.capacity)
|
2021-03-05 15:22:29 +00:00
|
|
|
.fold(0, std::cmp::max);
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
assert!(replication_factor <= MAX_REPLICATION);
|
|
|
|
|
2021-03-05 15:22:29 +00:00
|
|
|
// Fill up ring
|
2021-05-28 10:36:22 +00:00
|
|
|
for rep in 0..replication_factor {
|
2021-03-05 15:22:29 +00:00
|
|
|
queues.sort_by_key(|(ni, _np, _q, _p)| {
|
|
|
|
let queue_data = [&u16::to_be_bytes(rep as u16)[..], ni.as_slice()].concat();
|
|
|
|
fasthash(&queue_data[..])
|
|
|
|
});
|
|
|
|
|
|
|
|
for (_, _, _, pos) in queues.iter_mut() {
|
|
|
|
*pos = 0;
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
2021-03-05 15:22:29 +00:00
|
|
|
|
|
|
|
let mut remaining = partitions_idx.len();
|
|
|
|
while remaining > 0 {
|
|
|
|
let remaining0 = remaining;
|
2021-03-10 13:52:03 +00:00
|
|
|
for i_round in 0..max_capacity {
|
2021-03-05 15:22:29 +00:00
|
|
|
for (node_id, node_info, q, pos) in queues.iter_mut() {
|
2021-05-28 10:36:22 +00:00
|
|
|
if i_round >= node_info.capacity.unwrap() {
|
2021-03-05 15:22:29 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-04-09 00:32:42 +00:00
|
|
|
for (pos2, &qv) in q.iter().enumerate().skip(*pos) {
|
2021-03-05 15:22:29 +00:00
|
|
|
if partitions[qv].len() != rep {
|
|
|
|
continue;
|
|
|
|
}
|
2021-05-28 11:18:31 +00:00
|
|
|
let p_zns = partitions[qv]
|
2021-03-05 15:22:29 +00:00
|
|
|
.iter()
|
2021-05-28 11:18:31 +00:00
|
|
|
.map(|(_id, info)| info.zone.as_str())
|
2021-03-05 15:22:29 +00:00
|
|
|
.collect::<HashSet<&str>>();
|
2021-05-28 11:18:31 +00:00
|
|
|
if (p_zns.len() < n_zones
|
|
|
|
&& !p_zns.contains(&node_info.zone.as_str()))
|
|
|
|
|| (p_zns.len() == n_zones
|
2021-03-05 15:22:29 +00:00
|
|
|
&& !partitions[qv].iter().any(|(id, _i)| id == node_id))
|
|
|
|
{
|
|
|
|
partitions[qv].push((node_id, node_info));
|
|
|
|
remaining -= 1;
|
|
|
|
*pos = pos2 + 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if remaining == remaining0 {
|
|
|
|
// No progress made, exit
|
|
|
|
warn!("Could not build ring, not enough nodes configured.");
|
|
|
|
return Self {
|
2021-05-28 10:36:22 +00:00
|
|
|
replication_factor,
|
2021-03-05 15:22:29 +00:00
|
|
|
config,
|
2021-05-28 10:36:22 +00:00
|
|
|
nodes: vec![],
|
2021-03-05 15:22:29 +00:00
|
|
|
ring: vec![],
|
|
|
|
};
|
|
|
|
}
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
// Make a canonical order for nodes
|
|
|
|
let nodes = config
|
|
|
|
.members
|
|
|
|
.iter()
|
|
|
|
.filter(|(_id, info)| info.capacity.is_some())
|
|
|
|
.map(|(id, _)| *id)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
let nodes_rev = nodes
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, id)| (*id, i as CompactNodeType))
|
|
|
|
.collect::<HashMap<Uuid, CompactNodeType>>();
|
|
|
|
|
2021-03-05 15:22:29 +00:00
|
|
|
let ring = partitions
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, nodes)| {
|
|
|
|
let top = (i as u16) << (16 - PARTITION_BITS);
|
2021-05-28 10:36:22 +00:00
|
|
|
let nodes = nodes
|
|
|
|
.iter()
|
|
|
|
.map(|(id, _info)| *nodes_rev.get(id).unwrap())
|
|
|
|
.collect::<Vec<CompactNodeType>>();
|
|
|
|
assert!(nodes.len() == replication_factor);
|
|
|
|
let mut nodes_buf = [0u8; MAX_REPLICATION];
|
|
|
|
nodes_buf[..replication_factor].copy_from_slice(&nodes[..]);
|
2021-03-05 15:22:29 +00:00
|
|
|
RingEntry {
|
2021-05-28 10:36:22 +00:00
|
|
|
hash_prefix: top,
|
|
|
|
nodes_buf,
|
2021-03-05 15:22:29 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2021-02-21 12:11:10 +00:00
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
Self {
|
|
|
|
replication_factor,
|
|
|
|
config,
|
|
|
|
nodes,
|
|
|
|
ring,
|
|
|
|
}
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// Get the partition in which data would fall on
|
2021-05-28 10:36:22 +00:00
|
|
|
pub fn partition_of(&self, position: &Hash) -> Partition {
|
|
|
|
let top = u16::from_be_bytes(position.as_slice()[0..2].try_into().unwrap());
|
2021-03-16 10:14:27 +00:00
|
|
|
top >> (16 - PARTITION_BITS)
|
|
|
|
}
|
|
|
|
|
2021-04-06 03:25:28 +00:00
|
|
|
/// Get the list of partitions and the first hash of a partition key that would fall in it
|
2021-03-16 11:18:03 +00:00
|
|
|
pub fn partitions(&self) -> Vec<(Partition, Hash)> {
|
|
|
|
let mut ret = vec![];
|
|
|
|
|
|
|
|
for (i, entry) in self.ring.iter().enumerate() {
|
2021-05-28 10:36:22 +00:00
|
|
|
let mut location = [0u8; 32];
|
|
|
|
location[..2].copy_from_slice(&u16::to_be_bytes(entry.hash_prefix)[..]);
|
|
|
|
ret.push((i as u16, location.into()));
|
2021-03-16 11:18:03 +00:00
|
|
|
}
|
2021-04-09 00:32:42 +00:00
|
|
|
if !ret.is_empty() {
|
2021-03-16 11:18:03 +00:00
|
|
|
assert_eq!(ret[0].1, [0u8; 32].into());
|
|
|
|
}
|
|
|
|
|
|
|
|
ret
|
|
|
|
}
|
|
|
|
|
2021-03-21 23:00:09 +00:00
|
|
|
/// Walk the ring to find the n servers in which data should be replicated
|
2021-05-28 10:36:22 +00:00
|
|
|
pub fn get_nodes(&self, position: &Hash, n: usize) -> Vec<Uuid> {
|
2021-03-05 15:22:29 +00:00
|
|
|
if self.ring.len() != 1 << PARTITION_BITS {
|
2021-03-10 20:50:09 +00:00
|
|
|
warn!("Ring not yet ready, read/writes will be lost!");
|
2021-03-05 15:22:29 +00:00
|
|
|
return vec![];
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
let partition_idx = self.partition_of(position) as usize;
|
2021-03-05 15:22:29 +00:00
|
|
|
let partition = &self.ring[partition_idx];
|
2021-02-21 12:11:10 +00:00
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
let top = u16::from_be_bytes(position.as_slice()[0..2].try_into().unwrap());
|
|
|
|
// Check that we haven't messed up our partition table, i.e. that this partition
|
|
|
|
// table entrey indeed corresponds to the item we are storing
|
|
|
|
assert_eq!(
|
|
|
|
partition.hash_prefix & PARTITION_MASK_U16,
|
|
|
|
top & PARTITION_MASK_U16
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(n <= self.replication_factor);
|
|
|
|
partition.nodes_buf[..n]
|
|
|
|
.iter()
|
|
|
|
.map(|i| self.nodes[*i as usize])
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2021-02-21 12:11:10 +00:00
|
|
|
|
2021-05-28 10:36:22 +00:00
|
|
|
#[test]
|
|
|
|
fn test_ring_entry_size() {
|
|
|
|
assert_eq!(std::mem::size_of::<RingEntry>(), 8);
|
2021-02-21 12:11:10 +00:00
|
|
|
}
|
|
|
|
}
|