396 lines
13 KiB
C
396 lines
13 KiB
C
#include <sys/timerfd.h>
|
|
#include "algo_utils.h"
|
|
#include "utils.h"
|
|
#include "url.h"
|
|
#include "proxy.h"
|
|
#include "timer.h"
|
|
|
|
struct timer_info {
|
|
uint16_t health_id;
|
|
uint8_t prevlink;
|
|
uint16_t min_blocked_pkt;
|
|
struct algo_ctx* algo;
|
|
};
|
|
|
|
struct queued_pkt {
|
|
int link_fd;
|
|
int idx;
|
|
uint16_t id;
|
|
uint8_t on;
|
|
struct algo_ctx* algo;
|
|
};
|
|
|
|
struct rr_ctx {
|
|
uint8_t my_links;
|
|
uint16_t my_links_ver;
|
|
uint8_t remote_links;
|
|
int64_t mjit;
|
|
uint16_t health_id;
|
|
uint16_t health_id_late;
|
|
uint16_t content_id;
|
|
uint16_t sent_id;
|
|
uint8_t current_link;
|
|
struct internet_packet prev_packet;
|
|
struct timespec emit_time;
|
|
struct queued_pkt real[PACKET_BUFFER_SIZE];
|
|
struct timer_info wait[PACKET_BUFFER_SIZE];
|
|
};
|
|
|
|
void show_link_availability(struct rr_ctx* rr) {
|
|
printf("Links availability: my_links[");
|
|
for (int i = 0; i < 8; i++) {
|
|
if (rr->my_links & 1 << i) printf("U");
|
|
else printf("-");
|
|
}
|
|
printf("], rem_links[");
|
|
for (int i = 0; i < 8; i++) {
|
|
if (rr->remote_links & 1 << i) printf("U");
|
|
else printf("-");
|
|
}
|
|
printf("]\n");
|
|
}
|
|
|
|
void expired_wait (struct evt_core_ctx* ctx, void* user);
|
|
void expired_late(struct evt_core_ctx* ctx, void* user);
|
|
void on_timeout_health (struct evt_core_ctx* ctx, void* user);
|
|
|
|
void rr_pkt_register(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
|
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
char buffer[16];
|
|
url_get_port (buffer, fdinfo->url);
|
|
int link_num = atoi(buffer) - 7500; // @FIXME Hardcoded
|
|
uint16_t real_idx = bp->ip.ap.content.health.id % PACKET_BUFFER_SIZE;
|
|
uint16_t wait_idx = (bp->ip.ap.content.health.id - 1) % PACKET_BUFFER_SIZE;
|
|
|
|
//printf("Selected url %s for pkt %d to be queued for delivery\n", fdinfo->url, bp->ip.ap.str.id);
|
|
|
|
// 0. Update remote links
|
|
if (ring_lt(rr->recv_id_late, bp->ip.ap.str.id) && !(rr->remote_links & 1 << link_num)) {
|
|
printf("Activate link=%d | ", link_num);
|
|
rr->remote_links |= 1 << link_num; // Make sure that the link is marked as working
|
|
show_link_availability (rr);
|
|
}
|
|
|
|
// 1. Update my links I can use thanks to target feedback
|
|
if (bp->ip.ap.str.id > rr->my_links_ver && bp->ip.ap.str.bitfield != rr->my_links) {
|
|
rr->my_links = bp->ip.ap.str.bitfield;
|
|
rr->my_links_ver = bp->ip.ap.str.id;
|
|
printf("Update my links | ");
|
|
show_link_availability (rr);
|
|
}
|
|
|
|
// 2. If packet arrived too late or already queued, we discard it
|
|
if (ring_ge(rr->recv_id, bp->ip.ap.str.id) || rr->real[real_idx].id == bp->ip.ap.str.id) {
|
|
// Packet has already been delivered or dropped, we free the buffer
|
|
fprintf(stderr, "Packet %d arrived too late (current: %d) or already received\n", bp->ip.ap.str.id, rr->recv_id);
|
|
mv_buffer_wtof (app_ctx, fdinfo);
|
|
return;
|
|
}
|
|
|
|
// 3. If packet arrived too early, we wait for its predecessors
|
|
//printf("%d < %d = %d\n", rr->recv_id, bp->ip.ap.str.id - 1, ring_lt(rr->recv_id, bp->ip.ap.str.id - 1));
|
|
if (ring_lt(rr->recv_id, bp->ip.ap.str.id - 1)) {
|
|
int64_t timeout = rr->mjit - (int64_t) bp->ip.ap.str.deltat;
|
|
//printf("%ld - %ld = %ld\n", rr->mjit, (int64_t) bp->ip.ap.str.deltat, timeout);
|
|
if (timeout <= 0) timeout = 0;
|
|
|
|
if (rr->wait[wait_idx].on && rr->wait[wait_idx].id != bp->ip.ap.str.id - 1) {
|
|
fprintf(stderr, "Waiting array overlap, BUG: [\n");
|
|
for (int i = 0; i < PACKET_BUFFER_SIZE; i++) {
|
|
printf("\t%d => %d\n", rr->wait[i].id, rr->wait[i].on);
|
|
}
|
|
printf("] - could be replaced by drop\n");
|
|
exit(EXIT_FAILURE);
|
|
} else if (!rr->wait[wait_idx].on) {
|
|
rr->wait[wait_idx].on = 1;
|
|
rr->wait[wait_idx].id = bp->ip.ap.str.id - 1;
|
|
rr->wait[wait_idx].link_num = bp->ip.ap.str.prevlink;
|
|
rr->wait[wait_idx].algo = app_ctx;
|
|
set_timeout(ctx, timeout, &rr->wait[wait_idx], expired_wait);
|
|
}
|
|
}
|
|
|
|
// 4. We queue the packet to keep it
|
|
if (rr->real[real_idx].on && rr->real[real_idx].id != bp->ip.ap.str.id) {
|
|
fprintf(stderr, "Real array is full for id=%d, idx=%d, BUG: [\n", bp->ip.ap.str.id, real_idx);
|
|
for (int i = 0; i < PACKET_BUFFER_SIZE; i++) {
|
|
printf("\t%d => %d\n", rr->real[i].id, rr->real[i].on);
|
|
}
|
|
printf("] - could be replaced by drop\n");
|
|
exit(EXIT_FAILURE);
|
|
} else if (!rr->real[real_idx].on) {
|
|
rr->real[real_idx].on = 2;
|
|
rr->real[real_idx].id = bp->ip.ap.str.id;
|
|
rr->real[real_idx].idx = real_idx;
|
|
rr->real[real_idx].link_fd = fdinfo->fd;
|
|
rr->real[real_idx].algo = app_ctx;
|
|
mv_buffer_rtoa(app_ctx, fdinfo, &rr->real[real_idx].idx);
|
|
|
|
// 5. We register a timer for this packet to create a reactivation window for broken links
|
|
set_timeout(ctx, rr->mjit + 1, &rr->real[real_idx], expired_late);
|
|
|
|
//printf("%d is added to real as %d\n", bp->ip.ap.str.id, idx_real);
|
|
} else {
|
|
fprintf(stdout, "Packet %d already received (current: %d)\n", bp->ip.ap.str.id, rr->recv_id);
|
|
mv_buffer_wtof (app_ctx, fdinfo);
|
|
}
|
|
|
|
}
|
|
|
|
void rr_deliver(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct deferred_pkt* dp) {
|
|
struct evt_core_fdinfo *to_fdinfo = NULL;
|
|
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
char url[255];
|
|
|
|
// 1. Marked the packet as handled
|
|
dp->on--;
|
|
|
|
// 2. Get the buffer
|
|
struct buffer_packet* bp = get_app_buffer (app_ctx, &dp->idx);
|
|
assert(bp->ip.ap.headers.cmd == CMD_CLEAR);
|
|
|
|
// 3. We update our cursor
|
|
rr->recv_id = bp->ip.ap.content.clear.id;
|
|
|
|
// 4. Find its target
|
|
sprintf(url, "udp:write:127.0.0.1:%d", bp->ip.ap.content.clear.port);
|
|
to_fdinfo = evt_core_get_from_url (ctx, url);
|
|
if (to_fdinfo == NULL) {
|
|
fprintf(stderr, "No fd for URL %s in udp:write for tcp-read. Dropping packet :( \n", url);
|
|
//mv_buffer_wtor (app_ctx, fdinfo, bp);
|
|
mv_buffer_atof (app_ctx, &dp->idx);
|
|
}
|
|
|
|
// 5. We move the buffer and notify the target
|
|
//mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
|
mv_buffer_atow (app_ctx, &dp->idx, to_fdinfo);
|
|
main_on_udp_write(ctx, to_fdinfo);
|
|
}
|
|
|
|
void rr_pkt_manage_links(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
|
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
|
|
assert(bp->ip.ap.headers.cmd == CMD_HEALTH);
|
|
|
|
if (ring_le(bp->ip.ap.health.id, rr->health_id_late)) goto release;
|
|
|
|
int64_t timeout = rr->mjit - (int64_t) bp->ip.ap.content.health.deltat;
|
|
if (timeout <= 0) timeout = 0;
|
|
uint64_t idx = bp->ip.ap.content.health.id % PACKET_BUFFER_SIZE;
|
|
|
|
rr->wait[idx].health_id = bp->ip.content.health.id;
|
|
rr->wait[idx].prevlink = bp->ip.ap.content.health.prevlink;
|
|
rr->wait[idx].min_blocked_pkt = bp->ip.ap.content.health.min_blocked_pkt;
|
|
rr->wait[idx].algo = app_ctx;
|
|
|
|
set_timeout (ctx, timeout, &rr->wait[idx], on_timeout_health);
|
|
|
|
release:
|
|
mv_buffer_rtof(app_ctx, fdinfo);
|
|
}
|
|
|
|
void rr_pkt_unroll(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx) {
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
struct evt_core_fdinfo* fdinfo = NULL;
|
|
struct buffer_packet* bp = NULL;
|
|
|
|
while(1) {
|
|
//printf("Trying to deliver %d\n", rr->recv_id+1);
|
|
struct deferred_pkt* def = &rr->real[(rr->recv_id+1) % PACKET_BUFFER_SIZE];
|
|
if (!def->on) break;
|
|
fdinfo = evt_core_get_from_fd (ctx, def->link_fd);
|
|
if (fdinfo == NULL) {
|
|
fprintf(stderr, "An error occured as the link seems to be closed for the requested fd\n");
|
|
rr->recv_id++;
|
|
continue;
|
|
}
|
|
|
|
rr_deliver(ctx, fdinfo, def);
|
|
//printf("Delivered %d\n", rr->recv_id);
|
|
}
|
|
}
|
|
|
|
//------
|
|
|
|
int algo_rr_on_stream(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
|
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
|
|
if (bp->ip.ap.headers.cmd == CMD_CLEAR) {
|
|
// 1. Register packet in our queue
|
|
rr_pkt_register(ctx, fdinfo, bp);
|
|
|
|
// 2. Process queue
|
|
rr_pkt_unroll (ctx, app_ctx);
|
|
} else if (bp->ip.ap.headers.cmd == CMD_HEALTH) {
|
|
rr_pkt_manage_links(ctx, fdinfo, bp);
|
|
}
|
|
|
|
return 0;
|
|
co_error:
|
|
perror("Failed to TCP read");
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
int algo_rr_on_datagram(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
|
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
struct evt_core_fdinfo *to_fdinfo = NULL;
|
|
char url[255];
|
|
|
|
// 1. Prepare RR state and packet values
|
|
struct timespec curr;
|
|
int secs, nsecs;
|
|
uint64_t mili_sec;
|
|
|
|
if (clock_gettime(CLOCK_MONOTONIC, &curr) == -1){
|
|
perror("clock_gettime error");
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
secs = curr.tv_sec - rr->emit_time.tv_sec;
|
|
nsecs = curr.tv_nsec - rr->emit_time.tv_nsec;
|
|
mili_sec = secs * 1000 + nsecs / 1000000;
|
|
if (mili_sec > rr->mjit) mili_sec = rr->mjit;
|
|
|
|
bp->ip.ap.str.id = rr->sent_id;
|
|
bp->ip.ap.str.flags = 0;
|
|
bp->ip.ap.str.deltat = mili_sec;
|
|
bp->ip.ap.str.bitfield = rr->remote_links;
|
|
bp->ip.ap.str.prevlink = rr->current_link;
|
|
|
|
if (app_ctx->ap.redundant_data == 1) {
|
|
append_buffer(&bp->ip.ap, 1, &rr->prev_packet.ap); // We append previous packet
|
|
append_buffer(&rr->prev_packet.ap, 0, &bp->ip.ap); // We store current packet for next time
|
|
bp->ap_count++;
|
|
}
|
|
//printf("Will send packet id=%d\n", bp->ip.ap.str.id);
|
|
|
|
rr->emit_time = curr;
|
|
rr->sent_id++;
|
|
|
|
// 2. Try to find someone to send it
|
|
int max = 16;
|
|
uint8_t sel_link = rr->current_link;
|
|
while(max-- >= 0) {
|
|
sel_link = (sel_link + 1) % app_ctx->ap.links;
|
|
sprintf(url, "tcp:write:127.0.0.1:%d", 7500 + sel_link); //@FIXME Hardcoded
|
|
to_fdinfo = evt_core_get_from_url (ctx, url);
|
|
if (to_fdinfo == NULL) continue; // Missing link
|
|
if (app_ctx->ap.is_waiting_bootstrap && !app_ctx->is_rdy) goto not_ready; // Some links are down
|
|
if (!app_ctx->ap.is_healing || rr->my_links & (1 << sel_link)) {
|
|
rr->current_link = sel_link;
|
|
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo);
|
|
main_on_tcp_write(ctx, to_fdinfo);
|
|
return 0;
|
|
} else {
|
|
dup_buffer_tow(app_ctx, bp, to_fdinfo);
|
|
main_on_tcp_write(ctx, to_fdinfo);
|
|
}
|
|
}
|
|
|
|
not_ready:
|
|
// 3. We find no up target
|
|
fprintf(stderr, "Still bootstrapping or no link to forward data from %s in udp-read. Dropping packet :( \n", fdinfo->url);
|
|
mv_buffer_wtof (app_ctx, fdinfo);
|
|
return 0;
|
|
|
|
co_error:
|
|
perror("Failed to UDP read");
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
void on_timeout_health (struct evt_core_ctx* ctx, void* raw) {
|
|
struct timer_info* t = raw;
|
|
struct algo_ctx* app_ctx = t->algo->cat->app_ctx;
|
|
struct rr_ctx* rr = app_ctx->misc;
|
|
|
|
// 1. Update link recovery window if needed
|
|
if (ring_gt(t->health_id, rr->health_id_late)) rr->health_id_late = t->health_id;
|
|
|
|
// 2. Blacklist previous link if needed
|
|
uint16_t prev_health_id = (t->health_id - 1);
|
|
uint16_t prev_health_idx = prev_health_id % PACKET_BUFFER_SIZE;
|
|
struct timer_info* t_old = rr->wait[prev_health_idx];
|
|
if (t_old->health_id != prev_health_id) {
|
|
printf("Blacklist link=%d | ", t->prevlink);
|
|
rr->remote_links &= 0xff ^ 1 << t->prevlink;
|
|
show_link_availability (rr);
|
|
}
|
|
|
|
// 3. Deliver blocked packets
|
|
while (ring_gt(t->min_blocked_pkt, rr->content_id)) {
|
|
rr->content_id++;
|
|
rr_pkt_unroll (ctx, app_ctx);
|
|
}
|
|
}
|
|
|
|
void expired_wait(struct evt_core_ctx* ctx, void* user) {
|
|
struct waited_pkt* pkt = user;
|
|
struct rr_ctx* rr = pkt->algo->misc;
|
|
|
|
// 1. Release lock
|
|
pkt->on = 0;
|
|
|
|
// 2. We will not reactivate link for this packet
|
|
if (ring_lt(rr->recv_id_late, pkt->id)) rr->recv_id_late = pkt->id;
|
|
|
|
/*
|
|
// 3. Stop if packet has been received and delivered
|
|
if (ring_le (pkt->id, rr->recv_id)) return;
|
|
|
|
printf("Timer reached for packet %d\n", pkt->id);
|
|
*/
|
|
|
|
// 4. BLACKLIST LINK
|
|
printf("Blacklist link=%d | ", pkt->link_num);
|
|
rr->remote_links &= 0xff ^ 1 << pkt->link_num;
|
|
show_link_availability (rr);
|
|
|
|
// 5. Deliver following packets
|
|
while (ring_lt(rr->recv_id, pkt->id)) {
|
|
rr->recv_id++;
|
|
rr_pkt_unroll (ctx, pkt->algo);
|
|
}
|
|
}
|
|
|
|
void expired_late(struct evt_core_ctx* ctx, void* user) {
|
|
struct deferred_pkt* pkt = user;
|
|
struct rr_ctx* rr = pkt->algo->misc;
|
|
|
|
pkt->on--;
|
|
if (ring_lt(rr->recv_id_late, pkt->id)) rr->recv_id_late = pkt->id;
|
|
}
|
|
|
|
int algo_rr_on_err(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|
// We do nothing
|
|
return 0;
|
|
}
|
|
|
|
void algo_rr_free(void* v) {
|
|
struct rr_ctx* rr = v;
|
|
free(rr);
|
|
}
|
|
|
|
void algo_rr_init(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx, struct algo_params* ap) {
|
|
struct rr_ctx* rr = malloc(sizeof(struct rr_ctx));
|
|
if (rr == NULL) {
|
|
perror("malloc failed for rr_init.");
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
memset(rr, 0, sizeof(struct rr_ctx));
|
|
rr->mjit = 200;
|
|
rr->my_links = 0xff;
|
|
rr->remote_links = 0xff;
|
|
rr->sent_id = 1;
|
|
rr->recv_id = 0;
|
|
rr->recv_id_late = 0;
|
|
app_ctx->misc = rr;
|
|
app_ctx->free_misc = algo_rr_free;
|
|
|
|
init_timer(ctx);
|
|
}
|
|
|