diff --git a/.flake-modules/go-forwarder.nix b/.flake-modules/go-forwarder.nix
deleted file mode 100644
index 34a38cf1..00000000
--- a/.flake-modules/go-forwarder.nix
+++ /dev/null
@@ -1,71 +0,0 @@
-# Configures the Golang support and builds the forwarder
-# TODO: split this up in the future as this is unrelated tasks??
-
-# Top-level parameters that are bound to the provider flake
-# These are passed from `flake.nix` using importApply
-{
- localSelf,
- flake-parts-lib,
- nixpkgs-lib,
- ...
-}:
-
-# These values would bind to the consumer flake when this flake module is imported:
-{
- config,
- self,
- inputs,
- getSystem,
- moduleWithSystem,
- withSystem,
- ...
-}:
-
-# The actual flake-parts module configuration
-{
- perSystem =
- {
- config,
- self',
- inputs',
- pkgs,
- system,
- ...
- }:
- let
-# flakeRoot = nixpkgs-lib.getExe config.flake-root.package;
-#
-# # Build the networking/forwarder Go utility.
-# forwarder = pkgs.buildGoModule {
-# pname = "exo-forwarder";
-# version = "0.1.0";
-# src = "${flakeRoot}/networking/forwarder";
-#
-# vendorHash = "sha256-BXIGg2QYqHDz2TNe8hLAGC6jVlffp9766H+WdkkuVgA=";
-#
-# # Only the main package at the repository root needs building.
-# subPackages = [ "." ];
-# };
- in
- {
- packages = {
-# inherit forwarder;
- };
-
- apps = {
-# forwarder = {
-# type = "app";
-# program = "${forwarder}/bin/forwarder";
-# };
- };
-
- make-shells.default = {
- # Go 1.24 compiler – align with go.mod
- packages = [ pkgs.go_1_24 ];
- shellHook = ''
- GOPATH="''$(${nixpkgs-lib.getExe config.flake-root.package})"/.go_cache
- export GOPATH
- '';
- };
- };
-}
diff --git a/.flake-modules/just-flake.nix b/.flake-modules/just-flake.nix
deleted file mode 100644
index 2208a58c..00000000
--- a/.flake-modules/just-flake.nix
+++ /dev/null
@@ -1,54 +0,0 @@
-# Provides pretty banner & command index for this flake
-
-# Top-level parameters that are bound to the provider flake
-# These are passed from `flake.nix` using importApply
-{
- localSelf,
- flake-parts-lib,
- nixpkgs-lib,
- just-flake,
- ...
-}:
-
-# These values would bind to the consumer flake when this flake module is imported:
-{
- config,
- self,
- inputs,
- getSystem,
- moduleWithSystem,
- withSystem,
- ...
-}:
-
-# The actual flake-parts module configuration
-{
- imports = [ just-flake.flakeModule ];
- perSystem =
- {
- config,
- self',
- inputs',
- pkgs,
- system,
- ...
- }:
- {
- just-flake.features = {
- # treefmt.enable = true;
- # rust.enable = true;
- # convco.enable = true;
- # hello = {
- # enable = true;
- # justfile = ''
- # hello:
- # echo Hello World
- # '';
- # };
- };
-
- make-shells.default = {
- inputsFrom = [ config.just-flake.outputs.devShell ];
- };
- };
-}
diff --git a/.flake-modules/macmon.nix b/.flake-modules/macmon.nix
deleted file mode 100644
index 5df0cdf4..00000000
--- a/.flake-modules/macmon.nix
+++ /dev/null
@@ -1,30 +0,0 @@
-# Provides macmon binary for the worker.
-
-# These values would bind to the consumer flake when this flake module is imported:
-{
- config,
- self,
- inputs,
- getSystem,
- moduleWithSystem,
- withSystem,
- ...
-}:
-
-# The actual flake-parts module configuration
-{
- perSystem =
- {
- config,
- self',
- inputs',
- pkgs,
- system,
- ...
- }:
- {
- make-shells.default = {
- packages = if (system == "aarch64-darwin") then ([ pkgs.macmon ]) else ([]);
- };
- };
-}
diff --git a/.gitignore b/.gitignore
index 310df30d..19b4dd09 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,8 +12,6 @@ hosts*.json
# TODO figure out how to properly solve the issue with these target directories showing up
networking/target/
networking/topology/target/
-rust/target/
-rust/Cargo.lock
build/
dist/
@@ -26,4 +24,4 @@ dist/
just-flake.just
# for the gitingest enthusiasts
-digest.txt
\ No newline at end of file
+digest.txt
diff --git a/.idea/exo-v2.iml b/.idea/exo-v2.iml
index 5357eaa9..aa638174 100644
--- a/.idea/exo-v2.iml
+++ b/.idea/exo-v2.iml
@@ -10,11 +10,19 @@
+
+
+
+
+
+
+
+
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
index 94a25f7f..35eb1ddf 100644
--- a/.idea/vcs.xml
+++ b/.idea/vcs.xml
@@ -1,6 +1,6 @@
-
+
\ No newline at end of file
diff --git a/copy_model.sh b/copy_model.sh
new file mode 100755
index 00000000..f5c985aa
--- /dev/null
+++ b/copy_model.sh
@@ -0,0 +1,133 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# copy_model.sh: clone ~/.exo/models from SOURCE to one or more TARGETS using scp -3.
+# Username defaults:
+# - If host is "aN" and no user given, username defaults to "aN".
+# - Otherwise defaults to $(whoami), unless you pass user@host.
+#
+# Examples:
+# ./copy_model.sh a1 a2 a3
+# ./copy_model.sh a1 frank@a2 192.168.1.3
+
+if [ $# -lt 2 ]; then
+ echo "Usage: $0 SOURCE TARGET [TARGET...]" >&2
+ exit 2
+fi
+
+SOURCE="$1"
+shift
+TARGETS=("$@")
+
+DEFAULT_USER="$(whoami)"
+MODELS_REL=".exo/models" # relative under $HOME
+
+timestamp() { date "+%Y-%m-%d %H:%M:%S"; }
+
+split_user_host() {
+ local in="$1"
+ if [[ "$in" == *"@"* ]]; then
+ printf "%s|%s" "${in%%@*}" "${in#*@}"
+ else
+ printf "|%s" "$in"
+ fi
+}
+
+resolve_ip() {
+ local hostish="$1"
+ if [[ "$hostish" =~ ^a([0-9]+)$ ]]; then
+ echo "192.168.1.${BASH_REMATCH[1]}"
+ else
+ echo "$hostish"
+ fi
+}
+
+default_user_for() {
+ local hostish="$1"
+ if [[ "$hostish" =~ ^a([0-9]+)$ ]]; then
+ echo "$hostish"
+ else
+ echo "$DEFAULT_USER"
+ fi
+}
+
+SSH_OPTS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o ConnectTimeout=10)
+SSHPASS_BIN="$(command -v sshpass || true)"
+SCP_BIN="${SCP_BIN:-scp}"
+
+read -s -p "Password for all hosts: " PASS
+echo
+if [ -n "$SSHPASS_BIN" ]; then
+ echo "$(timestamp) sshpass found: will provide the password non-interactively."
+else
+ echo "$(timestamp) WARNING: sshpass not found — you’ll be prompted by scp/ssh per hop unless keys are set up."
+fi
+
+# Build source endpoint (default username logic)
+IFS='|' read -r SRC_USER_RAW SRC_HOSTISH <<<"$(split_user_host "$SOURCE")"
+SRC_USER="${SRC_USER_RAW:-$(default_user_for "$SRC_HOSTISH")}"
+SRC_IP="$(resolve_ip "$SRC_HOSTISH")"
+SRC_HOST="${SRC_USER}@${SRC_IP}"
+
+echo "$(timestamp) Source: ${SRC_HOST}:~/${MODELS_REL}"
+echo "$(timestamp) Targets: ${#TARGETS[@]}"
+
+# Helper to run a simple remote command via ssh (for mkdir -p checks)
+ssh_run() {
+ local host="$1"
+ shift
+ if [ -n "$SSHPASS_BIN" ]; then
+ sshpass -p "$PASS" ssh "${SSH_OPTS[@]}" "$host" "$@"
+ else
+ ssh "${SSH_OPTS[@]}" "$host" "$@"
+ fi
+}
+
+# Ensure source dir exists (create if missing, per your request)
+ssh_run "$SRC_HOST" "mkdir -p ~/${MODELS_REL}"
+
+failures=0
+count=0
+for T in "${TARGETS[@]}"; do
+ count=$((count + 1))
+ IFS='|' read -r T_USER_RAW T_HOSTISH <<<"$(split_user_host "$T")"
+ T_USER="${T_USER_RAW:-$(default_user_for "$T_HOSTISH")}"
+ T_IP="$(resolve_ip "$T_HOSTISH")"
+ T_HOST="${T_USER}@${T_IP}"
+
+ echo "============================================================"
+ echo "$(timestamp) [${count}/${#TARGETS[@]}] ${SRC_HOST} ==> ${T_HOST}"
+ echo "$(timestamp) Ensuring destination directory exists…"
+ ssh_run "$T_HOST" "mkdir -p ~/${MODELS_REL%/*}" # ~/.exo
+
+ # Copy the whole "models" directory into ~/.exo on the target.
+ # scp -3 = copy between two remotes via local; -r recursive; -p preserve times/modes
+ if [ -n "$SSHPASS_BIN" ]; then
+ echo "$(timestamp) Running: scp -3 -rp ${SRC_HOST}:~/${MODELS_REL} ${T_HOST}:~/.exo/"
+ if sshpass -p "$PASS" "$SCP_BIN" "${SSH_OPTS[@]}" -3 -rp \
+ "${SRC_HOST}:~/${MODELS_REL}" \
+ "${T_HOST}:~/.exo/"; then
+ echo "$(timestamp) [${count}] Done: ${T_HOST}"
+ else
+ echo "$(timestamp) [${count}] ERROR during scp to ${T_HOST}" >&2
+ failures=$((failures + 1))
+ fi
+ else
+ echo "$(timestamp) Running: scp -3 -rp ${SRC_HOST}:~/${MODELS_REL} ${T_HOST}:~/.exo/"
+ if "$SCP_BIN" "${SSH_OPTS[@]}" -3 -rp \
+ "${SRC_HOST}:~/${MODELS_REL}" \
+ "${T_HOST}:~/.exo/"; then
+ echo "$(timestamp) [${count}] Done: ${T_HOST}"
+ else
+ echo "$(timestamp) [${count}] ERROR during scp to ${T_HOST}" >&2
+ failures=$((failures + 1))
+ fi
+ fi
+done
+
+echo "============================================================"
+if [ "$failures" -eq 0 ]; then
+ echo "$(timestamp) All transfers completed successfully."
+else
+ echo "$(timestamp) Completed with ${failures} failure(s)."
+fi
diff --git a/dashboard/index.html b/dashboard/index.html
index 433746fe..85f94589 100644
--- a/dashboard/index.html
+++ b/dashboard/index.html
@@ -943,7 +943,7 @@
}
const result = await response.json();
- showLaunchStatus(`Instance launched successfully: ${result.instance_id}`, 'success');
+ showLaunchStatus('Instance launched successfully');
// Reset form
modelSelect.value = '';
diff --git a/flake.lock b/flake.lock
index bc30d2b3..35076eff 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,5 +1,26 @@
{
"nodes": {
+ "fenix": {
+ "inputs": {
+ "nixpkgs": [
+ "nixpkgs"
+ ],
+ "rust-analyzer-src": "rust-analyzer-src"
+ },
+ "locked": {
+ "lastModified": 1755585599,
+ "narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
+ "owner": "nix-community",
+ "repo": "fenix",
+ "rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "fenix",
+ "type": "github"
+ }
+ },
"flake-compat": {
"flake": false,
"locked": {
@@ -102,12 +123,30 @@
},
"root": {
"inputs": {
+ "fenix": "fenix",
"flake-parts": "flake-parts",
"flake-root": "flake-root",
"just-flake": "just-flake",
"make-shell": "make-shell",
"nixpkgs": "nixpkgs"
}
+ },
+ "rust-analyzer-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1755504847,
+ "narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
+ "owner": "rust-lang",
+ "repo": "rust-analyzer",
+ "rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
+ "type": "github"
+ },
+ "original": {
+ "owner": "rust-lang",
+ "ref": "nightly",
+ "repo": "rust-analyzer",
+ "type": "github"
+ }
}
},
"root": "root",
diff --git a/flake.nix b/flake.nix
index 0098a869..b1f69a86 100644
--- a/flake.nix
+++ b/flake.nix
@@ -20,47 +20,39 @@
# Provides flake integration with [Just](https://just.systems/man/en/)
just-flake.url = "github:juspay/just-flake";
+
+ # Provides Rust dev-env integration:
+ fenix = {
+ url = "github:nix-community/fenix";
+ inputs.nixpkgs.follows = "nixpkgs";
+ };
};
+ # TODO: figure out caching story
+ # nixConfig = {
+ # # nix community cachix
+ # extra-trusted-public-keys = "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=";
+ # extra-substituters = "https://nix-community.cachix.org";
+ # };
+
outputs =
inputs@{
flake-parts,
...
}:
flake-parts.lib.mkFlake { inherit inputs; } (
- {
- flake-parts-lib,
- self,
- ...
- }:
- let
- nixpkgs-lib = inputs.nixpkgs.lib;
-
- # A wraper around importApply that supplies default parameters
- importApply' =
- path: extraParams:
- (flake-parts-lib.importApply path (
- nixpkgs-lib.recursiveUpdate {
- localSelf = self;
- inherit flake-parts-lib;
- inherit nixpkgs-lib;
- } extraParams
- ));
-
- # instantiate all the flake modules, passing custom arguments to them as needed
- flakeModules = {
- flakeRoot = importApply' ./.flake-modules/flake-root.nix { inherit (inputs) flake-root; };
- justFlake = importApply' ./.flake-modules/just-flake.nix { inherit (inputs) just-flake; };
- goForwarder = importApply' ./.flake-modules/go-forwarder.nix { };
- };
- in
+ { flake-parts-lib, self, ... }:
{
imports = [
inputs.make-shell.flakeModules.default
- flakeModules.flakeRoot
- flakeModules.justFlake
- flakeModules.goForwarder
- ./.flake-modules/macmon.nix
+
+ ./nix/modules/pkgs-init.nix # nixpkgs overlays manager
+ ./nix/modules/flake-root.nix
+ ./nix/modules/just-flake.nix
+ ./nix/modules/macmon.nix
+ ./nix/modules/python.nix
+ ./nix/modules/rust.nix
+ ./nix/modules/go-forwarder.nix
];
systems = [
"x86_64-linux"
@@ -75,55 +67,31 @@
system,
...
}:
- let
- buildInputs = with pkgs; [
- ];
- nativeBuildInputs = with pkgs; [
- ];
- in
{
# Per-system attributes can be defined here. The self' and inputs'
# module parameters provide easy access to attributes of the same
# system.
# NOTE: pkgs is equivalent to inputs'.nixpkgs.legacyPackages.hello;
- apps = {
- python-lsp = {
- type = "app";
- program = "${pkgs.basedpyright}/bin/basedpyright-langserver";
- };
- default = self'.apps.forwarder;
- };
+ apps = { };
make-shells.default = {
packages = [
- pkgs.python313
- pkgs.uv
pkgs.protobuf
- pkgs.basedpyright
- pkgs.ruff
];
- nativeBuildInputs =
- with pkgs;
- [
- nixpkgs-fmt
- cmake
- ]
- ++ buildInputs
- ++ nativeBuildInputs;
-
- # Arguments which are intended to be environment variables in the shell environment
- # should be changed to attributes of the `env` option
- env = {
- # fixes libstdc++.so issues and libgl.so issues
- LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib";
- };
+ nativeBuildInputs = with pkgs; [
+ nixpkgs-fmt
+ ];
shellHook = ''
export GO_BUILD_DIR=$(git rev-parse --show-toplevel)/build;
export DASHBOARD_DIR=$(git rev-parse --show-toplevel)/dashboard;
'';
+ # Arguments which are intended to be environment variables in the shell environment
+ # should be changed to attributes of the `env` option
+ env = { };
+
# Arbitrary mkDerivation arguments should be changed to be attributes of the `additionalArguments` option
additionalArguments = { };
};
diff --git a/kill_remote.sh b/kill_remote.sh
new file mode 100755
index 00000000..727b3261
--- /dev/null
+++ b/kill_remote.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+###############################################################################
+# Args & prerequisites
+###############################################################################
+if [[ $# -gt 1 ]]; then
+ echo "Usage: $0 [hosts_file]" >&2
+ exit 1
+fi
+HOSTS_FILE=${1:-hosts.txt}
+
+###############################################################################
+# Load hosts.txt (works on macOS Bash 3.2 and Bash 4+)
+###############################################################################
+if [[ ! -f "$HOSTS_FILE" ]]; then
+ echo "Error: $HOSTS_FILE not found"
+ exit 1
+fi
+
+if builtin command -v mapfile >/dev/null 2>&1; then
+ mapfile -t HOSTS <"$HOSTS_FILE"
+else
+ HOSTS=()
+ while IFS= read -r h; do
+ [[ -n "$h" ]] && HOSTS+=("$h")
+ done <"$HOSTS_FILE"
+fi
+[[ ${#HOSTS[@]} -gt 0 ]] || {
+ echo "No hosts found in $HOSTS_FILE"
+ exit 1
+}
+
+###############################################################################
+# Helper – run a remote command and capture rc/stderr/stdout
+###############################################################################
+ssh_opts=(-o StrictHostKeyChecking=no
+ -o LogLevel=ERROR)
+
+run_remote() { # $1 host $2 command
+ local host=$1 cmd=$2 rc
+ if ssh "${ssh_opts[@]}" "$host" "$cmd"; then
+ rc=0
+ else
+ rc=$?
+ fi
+ return $rc
+}
+
+###############################################################################
+# Kill exo everywhere (parallel)
+###############################################################################
+echo "=== Killing exo on ${#HOSTS[@]} host(s) ==="
+fail=0
+for h in "${HOSTS[@]}"; do
+ (
+ run_remote "$h" 'pkill -f exo || true'
+ ) || fail=1 &
+done
+wait
+((fail == 0)) || {
+ echo "❌ Some hosts could not be reached—check SSH access."
+ exit 1
+}
+echo "✓ exo processes killed on all reachable hosts."
\ No newline at end of file
diff --git a/.flake-modules/flake-root.nix b/nix/modules/flake-root.nix
similarity index 55%
rename from .flake-modules/flake-root.nix
rename to nix/modules/flake-root.nix
index 02ca1735..6b000405 100644
--- a/.flake-modules/flake-root.nix
+++ b/nix/modules/flake-root.nix
@@ -2,39 +2,14 @@
# 1. ${lib.getExe config.flake-root.package}
# 2. $FLAKE_ROOT environment-varible
-# Top-level parameters that are bound to the provider flake
-# These are passed from `flake.nix` using importApply
-{
- localSelf,
- flake-parts-lib,
- nixpkgs-lib,
- flake-root,
- ...
-}:
-
# These values would bind to the consumer flake when this flake module is imported:
-{
- config,
- self,
- inputs,
- getSystem,
- moduleWithSystem,
- withSystem,
- ...
-}:
+{ inputs, ... }:
# The actual flake-parts module configuration
{
- imports = [ flake-root.flakeModule ];
+ imports = [ inputs.flake-root.flakeModule ];
perSystem =
- {
- config,
- self',
- inputs',
- pkgs,
- system,
- ...
- }:
+ { config, ... }:
{
flake-root.projectRootFile = "flake.nix"; # Not necessary, as flake.nix is the default
diff --git a/nix/modules/go-forwarder.nix b/nix/modules/go-forwarder.nix
new file mode 100644
index 00000000..1ef6857c
--- /dev/null
+++ b/nix/modules/go-forwarder.nix
@@ -0,0 +1,19 @@
+{
+ perSystem =
+ {
+ config,
+ pkgs,
+ lib,
+ ...
+ }:
+ {
+ make-shells.default = {
+ # Go 1.24 compiler – align with go.mod
+ packages = [ pkgs.go_1_24 ];
+ shellHook = ''
+ GOPATH="''$(${lib.getExe config.flake-root.package})"/.go_cache
+ export GOPATH
+ '';
+ };
+ };
+}
diff --git a/nix/modules/just-flake.nix b/nix/modules/just-flake.nix
new file mode 100644
index 00000000..e7a0d2db
--- /dev/null
+++ b/nix/modules/just-flake.nix
@@ -0,0 +1,26 @@
+# Provides pretty banner & command index for this flake
+
+{ inputs, ... }:
+{
+ imports = [ inputs.just-flake.flakeModule ];
+ perSystem =
+ { config, ... }:
+ {
+ just-flake.features = {
+ # treefmt.enable = true;
+ # rust.enable = true;
+ # convco.enable = true;
+ # hello = {
+ # enable = true;
+ # justfile = ''
+ # hello:
+ # echo Hello World
+ # '';
+ # };
+ };
+
+ make-shells.default = {
+ inputsFrom = [ config.just-flake.outputs.devShell ];
+ };
+ };
+}
diff --git a/nix/modules/macmon.nix b/nix/modules/macmon.nix
new file mode 100644
index 00000000..23fa9457
--- /dev/null
+++ b/nix/modules/macmon.nix
@@ -0,0 +1,12 @@
+{
+ perSystem =
+ { lib, pkgs, ... }:
+ lib.mkMerge [
+ (lib.mkIf pkgs.stdenv.isDarwin {
+ make-shells.default = {
+ packages = [ pkgs.macmon ];
+ };
+ })
+ ];
+
+}
diff --git a/nix/modules/pkgs-init.nix b/nix/modules/pkgs-init.nix
new file mode 100644
index 00000000..f75c5944
--- /dev/null
+++ b/nix/modules/pkgs-init.nix
@@ -0,0 +1,62 @@
+# Single module responsible for collecting all overlays and instantiating in one go
+
+{
+ flake-parts-lib,
+ inputs,
+ self,
+ specialArgs,
+ ...
+}:
+let
+ inherit (flake-parts-lib) mkPerSystemOption;
+in
+{
+ options.perSystem = mkPerSystemOption (
+ {
+ system,
+ config,
+ lib,
+ options,
+ pkgs,
+ self',
+ ...
+ }@args:
+ let
+ inherit (lib.types)
+ attrsOf
+ listOf
+ submoduleWith
+ raw
+ ;
+ in
+ {
+ options.pkgs-init.overlays = lib.mkOption {
+ description = ''
+ List of nixpkgs overlays (functions of the form: final: prev: { ... }).
+ Any module can append. Order matters.
+ '';
+ default = [ ];
+ example = [
+ (final: prev: {
+ my-hello = prev.hello;
+ })
+ ];
+ type = lib.types.listOf lib.types.unspecified;
+ };
+ options.pkgs-init.importArgs = lib.mkOption {
+ description = "Extra arguments merged into the nixpkgs import call.";
+ default = { };
+ type = lib.types.attrs;
+ };
+ config = {
+ _module.args.pkgs = import inputs.nixpkgs (
+ {
+ inherit system;
+ overlays = config.pkgs-init.overlays;
+ }
+ // config.pkgs-init.importArgs
+ );
+ };
+ }
+ );
+}
diff --git a/nix/modules/python.nix b/nix/modules/python.nix
new file mode 100644
index 00000000..ccda8358
--- /dev/null
+++ b/nix/modules/python.nix
@@ -0,0 +1,20 @@
+# Configures Python shell
+
+{
+ perSystem =
+ { pkgs, ... }:
+ {
+ make-shells.default = {
+ packages = [
+ pkgs.python313
+ pkgs.uv
+ pkgs.ruff
+ pkgs.basedpyright
+ ];
+
+ shellHook = ''
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${pkgs.python313}/lib
+ '';
+ };
+ };
+}
diff --git a/nix/modules/rust.nix b/nix/modules/rust.nix
new file mode 100644
index 00000000..1eb4865d
--- /dev/null
+++ b/nix/modules/rust.nix
@@ -0,0 +1,25 @@
+# Configures Rust shell
+
+{ inputs, ... }:
+{
+ perSystem =
+ { pkgs, ... }:
+ {
+ pkgs-init.overlays = [
+ inputs.fenix.overlays.default
+ ];
+
+ make-shells.default = {
+ packages = [
+ (pkgs.fenix.complete.withComponents [
+ "cargo"
+ "rustc"
+ "clippy"
+ "rustfmt"
+ "rust-src"
+ ])
+ pkgs.rustup # literally only added to make RustRover happy (otherwise useless)
+ ];
+ };
+ };
+}
diff --git a/pyproject.toml b/pyproject.toml
index ba64ebba..8759a9d7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -33,6 +33,9 @@ dependencies = [
"cobs>=1.2.2",
"loguru>=0.7.3",
"textual>=5.3.0",
+ "exo_pyo3_bindings", # rust bindings
+ "anyio>=4.10.0",
+ "bidict>=0.23.1",
]
[project.scripts]
@@ -61,8 +64,12 @@ darwin = [
[tool.uv.workspace]
members = [
"scripts",
+ "rust/exo_pyo3_bindings",
]
+[tool.uv.sources]
+exo_pyo3_bindings = { workspace = true }
+
[build-system]
requires = ["uv_build>=0.8.9,<0.9.0"]
build-backend = "uv_build"
@@ -87,7 +94,7 @@ reportUnnecessaryTypeIgnoreComment = "error"
pythonVersion = "3.13"
pythonPlatform = "Darwin"
-exclude = ["**/.venv", "**/venv", "**/__pycache__", "**/exo_scripts"]
+exclude = ["**/.venv", "**/venv", "**/__pycache__", "**/exo_scripts", "**/.direnv", "**/rust"]
stubPath = "typings"
[[tool.basedpyright.executionEnvironments]]
diff --git a/remote_git.sh b/remote_git.sh
index 5c9c003d..73ce84bd 100755
--- a/remote_git.sh
+++ b/remote_git.sh
@@ -4,47 +4,49 @@ set -euo pipefail
###############################################################################
# Args & prerequisites
###############################################################################
-if [[ $# -lt 2 ]]; then
- echo "Usage: $0 [git_args...]" >&2
+if [[ $# -lt 1 ]]; then
+ echo "Usage: $0 [git_args...]" >&2
echo "Examples:" >&2
- echo " $0 mypassword pull" >&2
- echo " $0 mypassword checkout main" >&2
- echo " $0 mypassword status" >&2
- echo " $0 mypassword fetch --all" >&2
+ echo " $0 pull" >&2
+ echo " $0 checkout main" >&2
+ echo " $0 status" >&2
+ echo " $0 fetch --all" >&2
exit 1
fi
-PASSWORD=$1
-shift # Remove password from args
-GIT_CMD="$*" # Remaining args form the git command
-HOSTS_FILE=${HOSTS_FILE:-hosts.json}
-
-for prog in jq sshpass; do
- command -v "$prog" >/dev/null ||
- { echo "Error: $prog not installed."; exit 1; }
-done
+GIT_CMD="$*" # All args form the git command
+HOSTS_FILE=${HOSTS_FILE:-hosts.txt}
###############################################################################
-# Load hosts.json (works on macOS Bash 3.2 and Bash 4+)
+# Load hosts.txt (works on macOS Bash 3.2 and Bash 4+)
###############################################################################
+if [[ ! -f "$HOSTS_FILE" ]]; then
+ echo "Error: $HOSTS_FILE not found"
+ exit 1
+fi
+
if builtin command -v mapfile >/dev/null 2>&1; then
- mapfile -t HOSTS < <(jq -r '.[]' "$HOSTS_FILE")
+ mapfile -t HOSTS <"$HOSTS_FILE"
else
HOSTS=()
- while IFS= read -r h; do HOSTS+=("$h"); done < <(jq -r '.[]' "$HOSTS_FILE")
+ while IFS= read -r h; do
+ [[ -n "$h" ]] && HOSTS+=("$h")
+ done <"$HOSTS_FILE"
fi
-[[ ${#HOSTS[@]} -gt 0 ]] || { echo "No hosts found in $HOSTS_FILE"; exit 1; }
+[[ ${#HOSTS[@]} -gt 0 ]] || {
+ echo "No hosts found in $HOSTS_FILE"
+ exit 1
+}
###############################################################################
# Helper – run a remote command and capture rc/stderr/stdout
###############################################################################
ssh_opts=(-o StrictHostKeyChecking=no
- -o NumberOfPasswordPrompts=1 # allow sshpass to answer exactly once
- -o LogLevel=ERROR)
+ -o LogLevel=ERROR)
-run_remote () { # $1 host $2 command
+run_remote() { # $1 host $2 command
local host=$1 cmd=$2 rc
- if sshpass -p "$PASSWORD" ssh "${ssh_opts[@]}" "$host" "$cmd"; then
+ if ssh "${ssh_opts[@]}" "$host" "$cmd"; then
rc=0
else
rc=$?
@@ -72,9 +74,9 @@ done
wait
echo ""
-if (( fail == 0 )); then
+if ((fail == 0)); then
echo "🎉 Git command executed successfully on all hosts!"
else
echo "⚠️ Some hosts failed—see above."
exit 1
-fi
\ No newline at end of file
+fi
diff --git a/run_remote.sh b/run_remote.sh
index 87ee2638..2b654e10 100755
--- a/run_remote.sh
+++ b/run_remote.sh
@@ -4,38 +4,42 @@ set -euo pipefail
###############################################################################
# Args & prerequisites
###############################################################################
-if [[ $# -lt 1 || $# -gt 2 ]]; then
- echo "Usage: $0 [hosts_file]" >&2 ; exit 1
+if [[ $# -gt 1 ]]; then
+ echo "Usage: $0 [hosts_file]" >&2
+ exit 1
fi
-PASSWORD=$1
-HOSTS_FILE=${2:-hosts.json}
-
-for prog in jq sshpass; do
- command -v "$prog" >/dev/null ||
- { echo "Error: $prog not installed."; exit 1; }
-done
+HOSTS_FILE=${1:-hosts.txt}
###############################################################################
-# Load hosts.json (works on macOS Bash 3.2 and Bash 4+)
+# Load hosts.txt (works on macOS Bash 3.2 and Bash 4+)
###############################################################################
+if [[ ! -f "$HOSTS_FILE" ]]; then
+ echo "Error: $HOSTS_FILE not found"
+ exit 1
+fi
+
if builtin command -v mapfile >/dev/null 2>&1; then
- mapfile -t HOSTS < <(jq -r '.[]' "$HOSTS_FILE")
+ mapfile -t HOSTS <"$HOSTS_FILE"
else
HOSTS=()
- while IFS= read -r h; do HOSTS+=("$h"); done < <(jq -r '.[]' "$HOSTS_FILE")
+ while IFS= read -r h; do
+ [[ -n "$h" ]] && HOSTS+=("$h")
+ done <"$HOSTS_FILE"
fi
-[[ ${#HOSTS[@]} -gt 0 ]] || { echo "No hosts found in $HOSTS_FILE"; exit 1; }
+[[ ${#HOSTS[@]} -gt 0 ]] || {
+ echo "No hosts found in $HOSTS_FILE"
+ exit 1
+}
###############################################################################
# Helper – run a remote command and capture rc/stderr/stdout
###############################################################################
ssh_opts=(-o StrictHostKeyChecking=no
- -o NumberOfPasswordPrompts=1 # allow sshpass to answer exactly once
- -o LogLevel=ERROR)
+ -o LogLevel=ERROR)
-run_remote () { # $1 host $2 command
+run_remote() { # $1 host $2 command
local host=$1 cmd=$2 rc
- if sshpass -p "$PASSWORD" ssh "${ssh_opts[@]}" "$host" "$cmd"; then
+ if ssh "${ssh_opts[@]}" "$host" "$cmd"; then
rc=0
else
rc=$?
@@ -54,26 +58,42 @@ for h in "${HOSTS[@]}"; do
) || fail=1 &
done
wait
-(( fail == 0 )) || { echo "❌ Some hosts could not be reached—check password or SSH access."; exit 1; }
+((fail == 0)) || {
+ echo "❌ Some hosts could not be reached—check SSH access."
+ exit 1
+}
echo "✓ exo processes killed on all reachable hosts."
-
+#
###############################################################################
-# Phase 2 – start new exo processes (parallel, with sudo -S)
+# Phase 2 – cleanup database files (parallel)
###############################################################################
-echo "=== Stage 2: starting new exo processes ==="
+echo "=== Stage 2: cleaning up database files ==="
fail=0
-for i in "${!HOSTS[@]}"; do
- h=${HOSTS[$i]}
-
- # one liner that pre-caches sudo and then runs the script
- if [[ $i -eq 0 ]]; then
- remote_cmd="cd ~/exo && ./run.sh -c"
- else
- remote_cmd="cd ~/exo && ./run.sh -rc"
- fi
-
- ( run_remote "$h" "$remote_cmd" ) || fail=1 &
+for h in "${HOSTS[@]}"; do
+ (
+ run_remote "$h" 'rm -f ~/.exo/*db* || true'
+ ) || fail=1 &
done
wait
-(( fail == 0 )) && echo "🎉 Deployment finished!" || \
- { echo "⚠️ Some starts failed—see above."; exit 1; }
+((fail == 0)) || {
+ echo "❌ Some hosts failed database cleanup."
+ exit 1
+}
+echo "✓ Database files cleaned on all hosts."
+
+###############################################################################
+# Phase 3 – start new exo processes in Terminal windows (parallel)
+###############################################################################
+echo "=== Stage 3: starting new exo processes ==="
+fail=0
+for h in "${HOSTS[@]}"; do
+ # Use osascript to open Terminal windows on remote Mac
+ remote_cmd="osascript -e \"tell app \\\"Terminal\\\" to do script \\\"cd ~/exo; nix develop --command uv run exo\\\"\""
+
+ (run_remote "$h" "$remote_cmd") || fail=1 &
+done
+wait
+((fail == 0)) && echo "🎉 Deployment finished!" || {
+ echo "⚠️ Some starts failed—see above."
+ exit 1
+}
diff --git a/rust/.gitignore b/rust/.gitignore
new file mode 100644
index 00000000..1256dafb
--- /dev/null
+++ b/rust/.gitignore
@@ -0,0 +1,15 @@
+# Generated by Cargo
+# will have compiled files and executables
+debug
+target
+Cargo.lock
+
+# These are backup files generated by rustfmt
+**/*.rs.bk
+
+# MSVC Windows builds of rustc generate these, which store debugging information
+*.pdb
+
+# Generated by cargo mutants
+# Contains mutation testing data
+**/mutants.out*/
\ No newline at end of file
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
new file mode 100644
index 00000000..f45941f4
--- /dev/null
+++ b/rust/Cargo.toml
@@ -0,0 +1,165 @@
+[workspace]
+resolver = "3"
+members = [
+ "networking",
+ "exo_pyo3_bindings",
+ "system_custodian",
+ "util",
+]
+
+[workspace.package]
+version = "0.0.1"
+edition = "2024"
+
+[profile.dev]
+opt-level = 1
+debug = true
+
+[profile.release]
+opt-level = 3
+
+# Common shared dependendencies configured once at the workspace
+# level, to be re-used more easily across workspace member crates.
+#
+# Common configurations include versions, paths, features, etc.
+[workspace.dependencies]
+## Crate members as common dependencies
+networking = { path = "networking" }
+system_custodian = { path = "system_custodian" }
+util = { path = "util" }
+
+# Proc-macro authoring tools
+syn = "2.0"
+quote = "1.0"
+proc-macro2 = "1.0"
+darling = "0.20"
+
+# Macro dependecies
+extend = "1.2"
+delegate = "0.13"
+impl-trait-for-tuples = "0.2"
+clap = "4.5"
+derive_more = { version = "2.0.1", features = ["display"] }
+pin-project = "1"
+
+# Utility dependencies
+itertools = "0.14"
+thiserror = "2"
+internment = "0.8"
+recursion = "0.5"
+regex = "1.11"
+once_cell = "1.21"
+thread_local = "1.1"
+bon = "3.4"
+generativity = "1.1"
+anyhow = "1.0"
+keccak-const = "0.2"
+
+# Functional generics/lenses frameworks
+frunk_core = "0.4"
+frunk = "0.4"
+frunk_utils = "0.2"
+frunk-enum-core = "0.3"
+
+# Async dependencies
+tokio = "1.46"
+futures = "0.3"
+futures-util = "0.3"
+futures-timer = "3.0"
+
+# Data structures
+either = "1.15"
+ordered-float = "5.0"
+ahash = "0.8"
+
+# Tracing/logging
+log = "0.4"
+
+# networking
+libp2p = "0.56"
+libp2p-tcp = "0.44"
+
+[workspace.lints.rust]
+static_mut_refs = "warn" # Or use "warn" instead of deny
+incomplete_features = "allow"
+
+# Clippy's lint category level configurations;
+# every member crate needs to inherit these by adding
+#
+# ```toml
+# [lints]
+# workspace = true
+# ```
+#
+# to their `Cargo.toml` files
+[workspace.lints.clippy]
+# Clippy lint categories meant to be enabled all at once
+correctness = { level = "deny", priority = -1 }
+suspicious = { level = "warn", priority = -1 }
+style = { level = "warn", priority = -1 }
+complexity = { level = "warn", priority = -1 }
+perf = { level = "warn", priority = -1 }
+pedantic = { level = "warn", priority = -1 }
+nursery = { level = "warn", priority = -1 }
+cargo = { level = "warn", priority = -1 }
+
+# Individual Clippy lints from the `restriction` category
+arithmetic_side_effects = "warn"
+as_conversions = "warn"
+assertions_on_result_states = "warn"
+clone_on_ref_ptr = "warn"
+decimal_literal_representation = "warn"
+default_union_representation = "warn"
+deref_by_slicing = "warn"
+disallowed_script_idents = "deny"
+else_if_without_else = "warn"
+empty_enum_variants_with_brackets = "warn"
+empty_structs_with_brackets = "warn"
+error_impl_error = "warn"
+exit = "deny"
+expect_used = "warn"
+float_cmp_const = "warn"
+get_unwrap = "warn"
+if_then_some_else_none = "warn"
+impl_trait_in_params = "warn"
+indexing_slicing = "warn"
+infinite_loop = "warn"
+let_underscore_must_use = "warn"
+let_underscore_untyped = "warn"
+lossy_float_literal = "warn"
+mem_forget = "warn"
+missing_inline_in_public_items = "warn"
+multiple_inherent_impl = "warn"
+multiple_unsafe_ops_per_block = "warn"
+mutex_atomic = "warn"
+non_zero_suggestions = "warn"
+panic = "warn"
+partial_pub_fields = "warn"
+pattern_type_mismatch = "warn"
+pub_without_shorthand = "warn"
+rc_buffer = "warn"
+rc_mutex = "warn"
+redundant_type_annotations = "warn"
+renamed_function_params = "warn"
+rest_pat_in_fully_bound_structs = "warn"
+same_name_method = "warn"
+self_named_module_files = "deny"
+semicolon_inside_block = "warn"
+shadow_same = "warn"
+shadow_unrelated = "warn"
+str_to_string = "warn"
+string_add = "warn"
+string_lit_chars_any = "warn"
+string_to_string = "warn"
+tests_outside_test_module = "warn"
+todo = "warn"
+try_err = "warn"
+undocumented_unsafe_blocks = "warn"
+unnecessary_safety_comment = "warn"
+unnecessary_safety_doc = "warn"
+unneeded_field_pattern = "warn"
+unseparated_literal_suffix = "warn"
+unused_result_ok = "warn"
+unused_trait_names = "warn"
+unwrap_used = "warn"
+verbose_file_reads = "warn"
\ No newline at end of file
diff --git a/rust/clippy.toml b/rust/clippy.toml
new file mode 100644
index 00000000..6d5a6187
--- /dev/null
+++ b/rust/clippy.toml
@@ -0,0 +1,2 @@
+# we can manually exclude false-positive lint errors for dual packages (if in dependencies)
+#allowed-duplicate-crates = ["hashbrown"]
\ No newline at end of file
diff --git a/rust/exo_pyo3_bindings/Cargo.toml b/rust/exo_pyo3_bindings/Cargo.toml
new file mode 100644
index 00000000..4895ecf4
--- /dev/null
+++ b/rust/exo_pyo3_bindings/Cargo.toml
@@ -0,0 +1,77 @@
+[package]
+name = "exo_pyo3_bindings"
+version = { workspace = true }
+edition = { workspace = true }
+publish = false
+
+[lib]
+doctest = false
+path = "src/lib.rs"
+name = "exo_pyo3_bindings"
+
+# "cdylib" needed to produce shared library for Python to import
+# "rlib" needed for stub-gen to run
+crate-type = ["cdylib", "rlib"]
+
+[[bin]]
+path = "src/bin/stub_gen.rs"
+name = "stub_gen"
+doc = false
+
+[lints]
+workspace = true
+
+[dependencies]
+networking = { workspace = true }
+
+# interop
+pyo3 = { version = "0.25.1", features = [# TODO: migrate to v0.26 soon!!
+ # "abi3-py311", # tells pyo3 (and maturin) to build using the stable ABI with minimum Python version 3.11
+ "nightly", # enables better-supported GIL integration
+ "experimental-async", # async support in #[pyfunction] & #[pymethods]
+ #"experimental-inspect", # inspection of generated binary => easier to automate type-hint generation
+ #"py-clone", # adding Clone-ing of `Py` without GIL (may cause panics - remove if panics happen)
+ "multiple-pymethods", # allows multiple #[pymethods] sections per class
+
+ # integrations with other libraries
+ "arc_lock", "bigdecimal", "either", "hashbrown", "indexmap", "num-bigint", "num-complex", "num-rational",
+ "ordered-float", "rust_decimal", "smallvec",
+ # "anyhow", "chrono", "chrono-local", "chrono-tz", "eyre", "jiff-02", "lock_api", "parking-lot", "time", "serde",
+] }
+pyo3-stub-gen = { version = "0.13.1" }
+pyo3-async-runtimes = { version = "0.25", features = ["attributes", "tokio-runtime", "testing"] }
+
+# macro dependencies
+extend = { workspace = true }
+delegate = { workspace = true }
+impl-trait-for-tuples = { workspace = true }
+derive_more = { workspace = true }
+pin-project = { workspace = true }
+
+# async runtime
+tokio = { workspace = true, features = ["full", "tracing"] }
+futures = { workspace = true }
+
+# utility dependencies
+once_cell = "1.21.3"
+thread_local = "1.1.9"
+util = { workspace = true }
+thiserror = { workspace = true }
+#internment = { workspace = true }
+#recursion = { workspace = true }
+#generativity = { workspace = true }
+#itertools = { workspace = true }
+
+
+# Tracing
+#tracing = "0.1"
+#tracing-subscriber = "0.3"
+#console-subscriber = "0.1.5"
+#tracing-log = "0.2.0"
+log = { workspace = true }
+env_logger = "0.11"
+pyo3-log = "0.12"
+
+
+# Networking
+libp2p = { workspace = true, features = ["full"] }
diff --git a/rust/exo_pyo3_bindings/README.md b/rust/exo_pyo3_bindings/README.md
new file mode 100644
index 00000000..e739dd89
--- /dev/null
+++ b/rust/exo_pyo3_bindings/README.md
@@ -0,0 +1 @@
+TODO: do something here....
diff --git a/rust/exo_pyo3_bindings/exo_pyo3_bindings.pyi b/rust/exo_pyo3_bindings/exo_pyo3_bindings.pyi
new file mode 100644
index 00000000..cf2214cd
--- /dev/null
+++ b/rust/exo_pyo3_bindings/exo_pyo3_bindings.pyi
@@ -0,0 +1,207 @@
+# This file is automatically generated by pyo3_stub_gen
+# ruff: noqa: E501, F401
+
+import builtins
+from enum import Enum
+
+class ConnectionUpdate:
+ @property
+ def update_type(self) -> ConnectionUpdateType:
+ r"""
+ Whether this is a connection or disconnection event
+ """
+ @property
+ def peer_id(self) -> PeerId:
+ r"""
+ Identity of the peer that we have connected to or disconnected from.
+ """
+ @property
+ def remote_ipv4(self) -> builtins.str:
+ r"""
+ Remote connection's IPv4 address.
+ """
+ @property
+ def remote_tcp_port(self) -> builtins.int:
+ r"""
+ Remote connection's TCP port.
+ """
+
+class Keypair:
+ r"""
+ Identity keypair of a node.
+ """
+ @staticmethod
+ def generate_ed25519() -> Keypair:
+ r"""
+ Generate a new Ed25519 keypair.
+ """
+ @staticmethod
+ def generate_ecdsa() -> Keypair:
+ r"""
+ Generate a new ECDSA keypair.
+ """
+ @staticmethod
+ def generate_secp256k1() -> Keypair:
+ r"""
+ Generate a new Secp256k1 keypair.
+ """
+ @staticmethod
+ def from_protobuf_encoding(bytes:bytes) -> Keypair:
+ r"""
+ Decode a private key from a protobuf structure and parse it as a `Keypair`.
+ """
+ @staticmethod
+ def rsa_from_pkcs8(bytes:bytes) -> Keypair:
+ r"""
+ Decode an keypair from a DER-encoded secret key in PKCS#8 `PrivateKeyInfo`
+ format (i.e. unencrypted) as defined in [RFC5208].
+
+ [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5
+ """
+ @staticmethod
+ def secp256k1_from_der(bytes:bytes) -> Keypair:
+ r"""
+ Decode a keypair from a DER-encoded Secp256k1 secret key in an `ECPrivateKey`
+ structure as defined in [RFC5915].
+
+ [RFC5915]: https://tools.ietf.org/html/rfc5915
+ """
+ @staticmethod
+ def ed25519_from_bytes(bytes:bytes) -> Keypair: ...
+ def to_protobuf_encoding(self) -> bytes:
+ r"""
+ Encode a private key as protobuf structure.
+ """
+ def to_peer_id(self) -> PeerId:
+ r"""
+ Convert the `Keypair` into the corresponding `PeerId`.
+ """
+
+class Multiaddr:
+ r"""
+ Representation of a Multiaddr.
+ """
+ @staticmethod
+ def empty() -> Multiaddr:
+ r"""
+ Create a new, empty multiaddress.
+ """
+ @staticmethod
+ def with_capacity(n:builtins.int) -> Multiaddr:
+ r"""
+ Create a new, empty multiaddress with the given capacity.
+ """
+ @staticmethod
+ def from_bytes(bytes:bytes) -> Multiaddr:
+ r"""
+ Parse a `Multiaddr` value from its byte slice representation.
+ """
+ @staticmethod
+ def from_string(string:builtins.str) -> Multiaddr:
+ r"""
+ Parse a `Multiaddr` value from its string representation.
+ """
+ def len(self) -> builtins.int:
+ r"""
+ Return the length in bytes of this multiaddress.
+ """
+ def is_empty(self) -> builtins.bool:
+ r"""
+ Returns true if the length of this multiaddress is 0.
+ """
+ def to_bytes(self) -> bytes:
+ r"""
+ Return a copy of this [`Multiaddr`]'s byte representation.
+ """
+ def to_string(self) -> builtins.str:
+ r"""
+ Convert a Multiaddr to a string.
+ """
+
+class NetworkingHandle:
+ def __new__(cls, identity:Keypair) -> NetworkingHandle: ...
+ async def connection_update_recv(self) -> ConnectionUpdate:
+ r"""
+ Receives the next `ConnectionUpdate` from networking.
+ """
+ async def connection_update_recv_many(self, limit:builtins.int) -> builtins.list[ConnectionUpdate]:
+ r"""
+ Receives at most `limit` `ConnectionUpdate`s from networking and returns them.
+
+ For `limit = 0`, an empty collection of `ConnectionUpdate`s will be returned immediately.
+ For `limit > 0`, if there are no `ConnectionUpdate`s in the channel's queue this method
+ will sleep until a `ConnectionUpdate`s is sent.
+ """
+ async def gossipsub_subscribe(self, topic:builtins.str) -> builtins.bool:
+ r"""
+ Subscribe to a `GossipSub` topic.
+
+ Returns `True` if the subscription worked. Returns `False` if we were already subscribed.
+ """
+ async def gossipsub_unsubscribe(self, topic:builtins.str) -> builtins.bool:
+ r"""
+ Unsubscribes from a `GossipSub` topic.
+
+ Returns `True` if we were subscribed to this topic. Returns `False` if we were not subscribed.
+ """
+ async def gossipsub_publish(self, topic:builtins.str, data:bytes) -> None:
+ r"""
+ Publishes a message with multiple topics to the `GossipSub` network.
+
+ If no peers are found that subscribe to this topic, throws `NoPeersSubscribedToTopicError` exception.
+ """
+ async def gossipsub_recv(self) -> tuple[builtins.str, bytes]:
+ r"""
+ Receives the next message from the `GossipSub` network.
+ """
+ async def gossipsub_recv_many(self, limit:builtins.int) -> builtins.list[tuple[builtins.str, bytes]]:
+ r"""
+ Receives at most `limit` messages from the `GossipSub` network and returns them.
+
+ For `limit = 0`, an empty collection of messages will be returned immediately.
+ For `limit > 0`, if there are no messages in the channel's queue this method
+ will sleep until a message is sent.
+ """
+
+class NoPeersSubscribedToTopicError(builtins.Exception):
+ def __new__(cls, *args) -> NoPeersSubscribedToTopicError: ...
+ def __repr__(self) -> builtins.str: ...
+ def __str__(self) -> builtins.str: ...
+
+class PeerId:
+ r"""
+ Identifier of a peer of the network.
+
+ The data is a `CIDv0` compatible multihash of the protobuf encoded public key of the peer
+ as specified in [specs/peer-ids](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md).
+ """
+ @staticmethod
+ def random() -> PeerId:
+ r"""
+ Generates a random peer ID from a cryptographically secure PRNG.
+
+ This is useful for randomly walking on a DHT, or for testing purposes.
+ """
+ @staticmethod
+ def from_bytes(bytes:bytes) -> PeerId:
+ r"""
+ Parses a `PeerId` from bytes.
+ """
+ def to_bytes(self) -> bytes:
+ r"""
+ Returns a raw bytes representation of this `PeerId`.
+ """
+ def to_base58(self) -> builtins.str:
+ r"""
+ Returns a base-58 encoded string of this `PeerId`.
+ """
+ def __repr__(self) -> builtins.str: ...
+ def __str__(self) -> builtins.str: ...
+
+class ConnectionUpdateType(Enum):
+ r"""
+ Connection or disconnection event discriminant type.
+ """
+ Connected = ...
+ Disconnected = ...
+
diff --git a/rust/exo_pyo3_bindings/pyproject.toml b/rust/exo_pyo3_bindings/pyproject.toml
new file mode 100644
index 00000000..f1d24cf9
--- /dev/null
+++ b/rust/exo_pyo3_bindings/pyproject.toml
@@ -0,0 +1,32 @@
+[build-system]
+requires = ["maturin>=1.0,<2.0"]
+build-backend = "maturin"
+
+[project]
+name = "exo_pyo3_bindings"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+authors = [
+ { name = "Andrei Cravtov", email = "the.andrei.cravtov@gmail.com" }
+]
+requires-python = ">=3.13"
+dependencies = []
+
+[dependency-groups]
+dev = [
+ "exo_pyo3_bindings",
+ "pytest>=8.4.0",
+ "pytest-asyncio>=1.0.0",
+]
+
+[tool.maturin]
+#purelib = true
+#python-source = "python"
+module-name = "exo_pyo3_bindings"
+features = ["pyo3/extension-module", "pyo3/experimental-async"]
+
+[tool.pytest.ini_options]
+log_cli = true
+log_cli_level = "INFO"
+asyncio_mode = "auto"
\ No newline at end of file
diff --git a/rust/exo_pyo3_bindings/src/allow_threading.rs b/rust/exo_pyo3_bindings/src/allow_threading.rs
new file mode 100644
index 00000000..3106e535
--- /dev/null
+++ b/rust/exo_pyo3_bindings/src/allow_threading.rs
@@ -0,0 +1,40 @@
+//! SEE: https://pyo3.rs/v0.26.0/async-await.html#detaching-from-the-interpreter-across-await
+//!
+
+use pin_project::pin_project;
+use pyo3::marker::Ungil;
+use pyo3::prelude::*;
+use std::{
+ future::Future,
+ pin::{Pin, pin},
+ task::{Context, Poll},
+};
+
+/// SEE: https://pyo3.rs/v0.26.0/async-await.html#detaching-from-the-interpreter-across-await
+#[pin_project]
+#[repr(transparent)]
+pub(crate) struct AllowThreads(#[pin] F);
+
+impl AllowThreads
+where
+ Self: Future,
+{
+ pub fn new(f: F) -> Self {
+ Self(f)
+ }
+}
+
+impl Future for AllowThreads
+where
+ F: Future + Ungil,
+ F::Output: Ungil,
+{
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll {
+ let waker = cx.waker();
+ Python::with_gil(|py| {
+ py.allow_threads(|| self.project().0.poll(&mut Context::from_waker(waker)))
+ })
+ }
+}
diff --git a/rust/exo_pyo3_bindings/src/bin/stub_gen.rs b/rust/exo_pyo3_bindings/src/bin/stub_gen.rs
new file mode 100644
index 00000000..3e30f493
--- /dev/null
+++ b/rust/exo_pyo3_bindings/src/bin/stub_gen.rs
@@ -0,0 +1,8 @@
+use pyo3_stub_gen::Result;
+
+fn main() -> Result<()> {
+ env_logger::Builder::from_env(env_logger::Env::default().filter_or("RUST_LOG", "info")).init();
+ let stub = exo_pyo3_bindings::stub_info()?;
+ stub.generate()?;
+ Ok(())
+}
diff --git a/rust/exo_pyo3_bindings/src/examples/mod.rs b/rust/exo_pyo3_bindings/src/examples/mod.rs
new file mode 100644
index 00000000..bde14199
--- /dev/null
+++ b/rust/exo_pyo3_bindings/src/examples/mod.rs
@@ -0,0 +1,240 @@
+//! This module exists to hold examples of some pyo3 patterns that may be too complex to
+//! re-create from scratch, but too inhomogenous to create an abstraction/wrapper around.
+//!
+//! Pattern examples include:
+//! - Async task handles: with GC-integrated cleanup
+//! - Sync/async callbacks from python: with propper eventloop handling
+//!
+//! Mutability pattern: https://pyo3.rs/v0.26.0/async-await.html#send--static-constraint
+//! - Store mutable fields in tokio's `Mutex`
+//! - For async code: take `&self` and `.lock().await`
+//! - For sync code: take `&mut self` and `.get_mut()`
+
+use crate::ext::{PyResultExt as _, ResultExt as _, TokioRuntimeExt as _};
+use futures::FutureExt as _;
+use futures::future::BoxFuture;
+use pyo3::exceptions::PyRuntimeError;
+use pyo3::prelude::{PyModule, PyModuleMethods as _};
+use pyo3::{
+ Bound, Py, PyAny, PyErr, PyResult, PyTraverseError, PyVisit, Python, pyclass, pymethods,
+};
+use std::time::Duration;
+use tokio::sync::mpsc;
+use tokio::sync::mpsc::error::TryRecvError;
+
+fn needs_tokio_runtime() {
+ tokio::runtime::Handle::current();
+}
+
+type SyncCallback = Box;
+type AsyncCallback = Box BoxFuture<'static, ()> + Send + Sync>;
+
+enum AsyncTaskMessage {
+ SyncCallback(SyncCallback),
+ AsyncCallback(AsyncCallback),
+}
+
+async fn async_task(
+ sender: mpsc::UnboundedSender<()>,
+ mut receiver: mpsc::UnboundedReceiver,
+) {
+ log::info!("RUST: async task started");
+
+ // task state
+ let mut interval = tokio::time::interval(Duration::from_secs(1));
+
+ let mut sync_cbs: Vec = vec![];
+ let mut async_cbs: Vec = vec![];
+
+ loop {
+ tokio::select! {
+ // handle incoming messages from task-handle
+ message = receiver.recv() => {
+ // handle closed channel by exiting
+ let Some(message) = message else {
+ log::info!("RUST: channel closed");
+ break;
+ };
+
+ // dispatch incoming event
+ match message {
+ AsyncTaskMessage::SyncCallback(cb) => {
+ sync_cbs.push(cb);
+ }
+ AsyncTaskMessage::AsyncCallback(cb) => {
+ async_cbs.push(cb);
+ }
+ }
+ }
+
+ // handle all other events
+ _ = interval.tick() => {
+ log::info!("RUST: async task tick");
+
+ // call back all sync callbacks
+ for cb in &sync_cbs {
+ cb();
+ }
+
+ // call back all async callbacks
+ for cb in &async_cbs {
+ cb().await;
+ }
+
+ // send event on unbounded channel
+ sender.send(()).expect("handle receiver cannot be closed/dropped");
+ }
+ }
+ }
+
+ log::info!("RUST: async task stopped");
+}
+
+// #[gen_stub_pyclass]
+#[pyclass(name = "AsyncTaskHandle")]
+#[derive(Debug)]
+struct PyAsyncTaskHandle {
+ sender: Option>,
+ receiver: mpsc::UnboundedReceiver<()>,
+}
+
+#[allow(clippy::expect_used)]
+impl PyAsyncTaskHandle {
+ const fn sender(&self) -> &mpsc::UnboundedSender {
+ self.sender
+ .as_ref()
+ .expect("The sender should only be None after de-initialization.")
+ }
+
+ const fn sender_mut(&mut self) -> &mpsc::UnboundedSender {
+ self.sender
+ .as_mut()
+ .expect("The sender should only be None after de-initialization.")
+ }
+
+ const fn new(
+ sender: mpsc::UnboundedSender,
+ receiver: mpsc::UnboundedReceiver<()>,
+ ) -> Self {
+ Self {
+ sender: Some(sender),
+ receiver,
+ }
+ }
+}
+
+// #[gen_stub_pymethods]
+#[pymethods]
+impl PyAsyncTaskHandle {
+ #[new]
+ fn py_new(py: Python<'_>) -> PyResult {
+ use pyo3_async_runtimes::tokio::get_runtime;
+
+ // create communication channel TOWARDS our task
+ let (h_sender, t_receiver) = mpsc::unbounded_channel::();
+
+ // create communication channel FROM our task
+ let (t_sender, h_receiver) = mpsc::unbounded_channel::<()>();
+
+ // perform necessary setup within tokio context - or it crashes
+ let () = get_runtime().block_on(async { needs_tokio_runtime() });
+
+ // spawn tokio task with this thread's task-locals - without this, async callbacks on the new threads will not work!!
+ _ = get_runtime().spawn_with_scope(py, async move {
+ async_task(t_sender, t_receiver).await;
+ });
+ Ok(Self::new(h_sender, h_receiver))
+ }
+
+ /// NOTE: exceptions in callbacks are silently ignored until end of execution
+ fn add_sync_callback(
+ &self,
+ // #[gen_stub(override_type(
+ // type_repr="collections.abc.Callable[[], None]",
+ // imports=("collections.abc")
+ // ))]
+ callback: Py,
+ ) -> PyResult<()> {
+ // blocking call to async method -> can do non-blocking if needed
+ self.sender()
+ .send(AsyncTaskMessage::SyncCallback(Box::new(move || {
+ _ = Python::with_gil(|py| callback.call0(py).write_unraisable_with(py));
+ })))
+ .pyerr()?;
+ Ok(())
+ }
+
+ /// NOTE: exceptions in callbacks are silently ignored until end of execution
+ fn add_async_callback(
+ &self,
+ // #[gen_stub(override_type(
+ // type_repr="collections.abc.Callable[[], collections.abc.Awaitable[None]]",
+ // imports=("collections.abc")
+ // ))]
+ callback: Py,
+ ) -> PyResult<()> {
+ // blocking call to async method -> can do non-blocking if needed
+ self.sender()
+ .send(AsyncTaskMessage::AsyncCallback(Box::new(move || {
+ let c = Python::with_gil(|py| callback.clone_ref(py));
+ async move {
+ if let Some(f) = Python::with_gil(|py| {
+ let coroutine = c.call0(py).write_unraisable_with(py)?;
+ pyo3_async_runtimes::tokio::into_future(coroutine.into_bound(py))
+ .write_unraisable_with(py)
+ }) {
+ _ = f.await.write_unraisable();
+ }
+ }
+ .boxed()
+ })))
+ .pyerr()?;
+ Ok(())
+ }
+
+ async fn receive_unit(&mut self) -> PyResult<()> {
+ self.receiver
+ .recv()
+ .await
+ .ok_or(PyErr::new::(
+ "cannot receive unit on closed channel",
+ ))
+ }
+
+ fn drain_units(&mut self) -> PyResult {
+ let mut cnt = 0;
+ loop {
+ match self.receiver.try_recv() {
+ Err(TryRecvError::Disconnected) => {
+ return Err(PyErr::new::(
+ "cannot receive unit on closed channel",
+ ));
+ }
+ Err(TryRecvError::Empty) => return Ok(cnt),
+ Ok(()) => {
+ cnt += 1;
+ continue;
+ }
+ }
+ }
+ }
+
+ // #[gen_stub(skip)]
+ const fn __traverse__(&self, _visit: PyVisit<'_>) -> Result<(), PyTraverseError> {
+ Ok(()) // This is needed purely so `__clear__` can work
+ }
+
+ // #[gen_stub(skip)]
+ fn __clear__(&mut self) {
+ // TODO: may or may not need to await a "kill-signal" oneshot channel message,
+ // to ensure that the networking task is done BEFORE exiting the clear function...
+ // but this may require GIL?? and it may not be safe to call GIL here??
+ self.sender = None; // Using Option as a trick to force `sender` channel to be dropped
+ }
+}
+
+pub fn examples_submodule(m: &Bound<'_, PyModule>) -> PyResult<()> {
+ m.add_class::()?;
+
+ Ok(())
+}
diff --git a/rust/exo_pyo3_bindings/src/lib.rs b/rust/exo_pyo3_bindings/src/lib.rs
new file mode 100644
index 00000000..4f591b8c
--- /dev/null
+++ b/rust/exo_pyo3_bindings/src/lib.rs
@@ -0,0 +1,217 @@
+//! TODO: crate documentation
+//!
+//! this is here as a placeholder documentation
+//!
+//!
+
+// enable Rust-unstable features for convenience
+#![feature(trait_alias)]
+#![feature(tuple_trait)]
+#![feature(unboxed_closures)]
+// #![feature(stmt_expr_attributes)]
+// #![feature(assert_matches)]
+// #![feature(async_fn_in_dyn_trait)]
+// #![feature(async_for_loop)]
+// #![feature(auto_traits)]
+// #![feature(negative_impls)]
+
+extern crate core;
+mod allow_threading;
+mod examples;
+pub(crate) mod networking;
+pub(crate) mod pylibp2p;
+
+use crate::networking::networking_submodule;
+use crate::pylibp2p::ident::ident_submodule;
+use crate::pylibp2p::multiaddr::multiaddr_submodule;
+use pyo3::prelude::PyModule;
+use pyo3::prelude::*;
+use pyo3::{Bound, PyResult, pyclass, pymodule};
+use pyo3_stub_gen::define_stub_info_gatherer;
+
+/// Namespace for all the constants used by this crate.
+pub(crate) mod r#const {
+ pub const MPSC_CHANNEL_SIZE: usize = 1024;
+}
+
+/// Namespace for all the type/trait aliases used by this crate.
+pub(crate) mod alias {
+ use std::error::Error;
+ use std::marker::Tuple;
+
+ pub trait SendFn =
+ Fn + Send + 'static;
+
+ pub type AnyError = Box;
+ pub type AnyResult = Result;
+}
+
+/// Namespace for crate-wide extension traits/methods
+pub(crate) mod ext {
+ use crate::allow_threading::AllowThreads;
+ use extend::ext;
+ use pyo3::exceptions::{PyConnectionError, PyRuntimeError};
+ use pyo3::marker::Ungil;
+ use pyo3::types::PyBytes;
+ use pyo3::{Py, PyErr, PyResult, Python};
+ use tokio::runtime::Runtime;
+ use tokio::sync::mpsc;
+ use tokio::sync::mpsc::error::TryRecvError;
+ use tokio::task::JoinHandle;
+
+ #[ext(pub, name = ByteArrayExt)]
+ impl [u8] {
+ fn pybytes(&self) -> Py {
+ Python::with_gil(|py| PyBytes::new(py, self).unbind())
+ }
+ }
+
+ #[ext(pub, name = ResultExt)]
+ impl Result
+ where
+ E: ToString,
+ {
+ fn pyerr(self) -> PyResult {
+ self.map_err(|e| PyRuntimeError::new_err(e.to_string()))
+ }
+ }
+
+ pub trait FutureExt: Future + Sized {
+ /// SEE: https://pyo3.rs/v0.26.0/async-await.html#detaching-from-the-interpreter-across-await
+ fn allow_threads_py(self) -> AllowThreads
+ where
+ AllowThreads: Future,
+ {
+ AllowThreads::new(self)
+ }
+ }
+
+ impl FutureExt for T {}
+
+ #[ext(pub, name = PyErrExt)]
+ impl PyErr {
+ fn receiver_channel_closed() -> Self {
+ PyConnectionError::new_err("Receiver channel closed unexpectedly")
+ }
+ }
+
+ #[ext(pub, name = PyResultExt)]
+ impl PyResult {
+ fn write_unraisable(self) -> Option {
+ Python::with_gil(|py| self.write_unraisable_with(py))
+ }
+
+ fn write_unraisable_with(self, py: Python<'_>) -> Option {
+ match self {
+ Ok(v) => Some(v),
+ Err(e) => {
+ // write error back to python
+ e.write_unraisable(py, None);
+ None
+ }
+ }
+ }
+ }
+
+ #[ext(pub, name = TokioRuntimeExt)]
+ impl Runtime {
+ fn spawn_with_scope(&self, py: Python<'_>, future: F) -> PyResult>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ let locals = pyo3_async_runtimes::tokio::get_current_locals(py)?;
+ Ok(self.spawn(pyo3_async_runtimes::tokio::scope(locals, future)))
+ }
+ }
+
+ #[ext(pub, name = TokioMpscSenderExt)]
+ impl mpsc::Sender {
+ /// Sends a value, waiting until there is capacity.
+ ///
+ /// A successful send occurs when it is determined that the other end of the
+ /// channel has not hung up already. An unsuccessful send would be one where
+ /// the corresponding receiver has already been closed.
+ async fn send_py(&self, value: T) -> PyResult<()> {
+ self.send(value)
+ .await
+ .map_err(|_| PyErr::receiver_channel_closed())
+ }
+ }
+
+ #[ext(pub, name = TokioMpscReceiverExt)]
+ impl mpsc::Receiver {
+ /// Receives the next value for this receiver.
+ async fn recv_py(&mut self) -> PyResult {
+ self.recv().await.ok_or_else(PyErr::receiver_channel_closed)
+ }
+
+ /// Receives at most `limit` values for this receiver and returns them.
+ ///
+ /// For `limit = 0`, an empty collection of messages will be returned immediately.
+ /// For `limit > 0`, if there are no messages in the channel's queue this method
+ /// will sleep until a message is sent.
+ async fn recv_many_py(&mut self, limit: usize) -> PyResult> {
+ // get updates from receiver channel
+ let mut updates = Vec::with_capacity(limit);
+ let received = self.recv_many(&mut updates, limit).await;
+
+ // if we received zero items, then the channel was unexpectedly closed
+ if limit != 0 && received == 0 {
+ return Err(PyErr::receiver_channel_closed());
+ }
+
+ Ok(updates)
+ }
+
+ /// Tries to receive the next value for this receiver.
+ fn try_recv_py(&mut self) -> PyResult