mirror of
https://github.com/exo-explore/exo.git
synced 2025-12-23 22:27:50 -05:00
kill go
Fairwell Gelu, Chief Lunch Officer
This commit is contained in:
10
.gitignore
vendored
10
.gitignore
vendored
@@ -2,21 +2,17 @@
|
||||
__pycache__
|
||||
*.so
|
||||
|
||||
hosts.json
|
||||
hosts*.json
|
||||
|
||||
# go cache is project local but not tracked
|
||||
.go_cache
|
||||
nodes.json
|
||||
|
||||
# hide direnv stuff
|
||||
.direnv/
|
||||
# TODO figure out how to properly solve the issue with these target directories showing up
|
||||
networking/target/
|
||||
networking/topology/target/
|
||||
|
||||
build/
|
||||
dist/
|
||||
*.xcuserstate
|
||||
|
||||
*.xcuserstate
|
||||
.DS_Store
|
||||
*/.DS_Store
|
||||
|
||||
|
||||
BIN
app/.DS_Store
vendored
BIN
app/.DS_Store
vendored
Binary file not shown.
BIN
app/exov2/.DS_Store
vendored
BIN
app/exov2/.DS_Store
vendored
Binary file not shown.
@@ -1,550 +0,0 @@
|
||||
// !$*UTF8*$!
|
||||
{
|
||||
archiveVersion = 1;
|
||||
classes = {
|
||||
};
|
||||
objectVersion = 77;
|
||||
objects = {
|
||||
|
||||
/* Begin PBXContainerItemProxy section */
|
||||
E07D64CC2E36127F009BFB4D /* PBXContainerItemProxy */ = {
|
||||
isa = PBXContainerItemProxy;
|
||||
containerPortal = E07D64B22E36127E009BFB4D /* Project object */;
|
||||
proxyType = 1;
|
||||
remoteGlobalIDString = E07D64B92E36127E009BFB4D;
|
||||
remoteInfo = exov2;
|
||||
};
|
||||
E07D64D62E36127F009BFB4D /* PBXContainerItemProxy */ = {
|
||||
isa = PBXContainerItemProxy;
|
||||
containerPortal = E07D64B22E36127E009BFB4D /* Project object */;
|
||||
proxyType = 1;
|
||||
remoteGlobalIDString = E07D64B92E36127E009BFB4D;
|
||||
remoteInfo = exov2;
|
||||
};
|
||||
/* End PBXContainerItemProxy section */
|
||||
|
||||
/* Begin PBXFileReference section */
|
||||
E07D64BA2E36127E009BFB4D /* EXO.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = EXO.app; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
E07D64CB2E36127F009BFB4D /* exov2Tests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = exov2Tests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
E07D64D52E36127F009BFB4D /* exov2UITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = exov2UITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
/* End PBXFileReference section */
|
||||
|
||||
/* Begin PBXFileSystemSynchronizedRootGroup section */
|
||||
E07D64BC2E36127E009BFB4D /* exov2 */ = {
|
||||
isa = PBXFileSystemSynchronizedRootGroup;
|
||||
path = exov2;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
E07D64CE2E36127F009BFB4D /* exov2Tests */ = {
|
||||
isa = PBXFileSystemSynchronizedRootGroup;
|
||||
path = exov2Tests;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
E07D64D82E36127F009BFB4D /* exov2UITests */ = {
|
||||
isa = PBXFileSystemSynchronizedRootGroup;
|
||||
path = exov2UITests;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
/* End PBXFileSystemSynchronizedRootGroup section */
|
||||
|
||||
/* Begin PBXFrameworksBuildPhase section */
|
||||
E07D64B72E36127E009BFB4D /* Frameworks */ = {
|
||||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64C82E36127F009BFB4D /* Frameworks */ = {
|
||||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64D22E36127F009BFB4D /* Frameworks */ = {
|
||||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXFrameworksBuildPhase section */
|
||||
|
||||
/* Begin PBXGroup section */
|
||||
E07D64B12E36127E009BFB4D = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
E07D64BC2E36127E009BFB4D /* exov2 */,
|
||||
E07D64CE2E36127F009BFB4D /* exov2Tests */,
|
||||
E07D64D82E36127F009BFB4D /* exov2UITests */,
|
||||
E07D64BB2E36127E009BFB4D /* Products */,
|
||||
);
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
E07D64BB2E36127E009BFB4D /* Products */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
E07D64BA2E36127E009BFB4D /* EXO.app */,
|
||||
E07D64CB2E36127F009BFB4D /* exov2Tests.xctest */,
|
||||
E07D64D52E36127F009BFB4D /* exov2UITests.xctest */,
|
||||
);
|
||||
name = Products;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
/* End PBXGroup section */
|
||||
|
||||
/* Begin PBXNativeTarget section */
|
||||
E07D64B92E36127E009BFB4D /* exov2 */ = {
|
||||
isa = PBXNativeTarget;
|
||||
buildConfigurationList = E07D64DF2E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2" */;
|
||||
buildPhases = (
|
||||
E07D64B62E36127E009BFB4D /* Sources */,
|
||||
E07D64B72E36127E009BFB4D /* Frameworks */,
|
||||
E07D64B82E36127E009BFB4D /* Resources */,
|
||||
);
|
||||
buildRules = (
|
||||
);
|
||||
dependencies = (
|
||||
);
|
||||
fileSystemSynchronizedGroups = (
|
||||
E07D64BC2E36127E009BFB4D /* exov2 */,
|
||||
);
|
||||
name = exov2;
|
||||
packageProductDependencies = (
|
||||
);
|
||||
productName = exov2;
|
||||
productReference = E07D64BA2E36127E009BFB4D /* EXO.app */;
|
||||
productType = "com.apple.product-type.application";
|
||||
};
|
||||
E07D64CA2E36127F009BFB4D /* exov2Tests */ = {
|
||||
isa = PBXNativeTarget;
|
||||
buildConfigurationList = E07D64E22E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2Tests" */;
|
||||
buildPhases = (
|
||||
E07D64C72E36127F009BFB4D /* Sources */,
|
||||
E07D64C82E36127F009BFB4D /* Frameworks */,
|
||||
E07D64C92E36127F009BFB4D /* Resources */,
|
||||
);
|
||||
buildRules = (
|
||||
);
|
||||
dependencies = (
|
||||
E07D64CD2E36127F009BFB4D /* PBXTargetDependency */,
|
||||
);
|
||||
fileSystemSynchronizedGroups = (
|
||||
E07D64CE2E36127F009BFB4D /* exov2Tests */,
|
||||
);
|
||||
name = exov2Tests;
|
||||
packageProductDependencies = (
|
||||
);
|
||||
productName = exov2Tests;
|
||||
productReference = E07D64CB2E36127F009BFB4D /* exov2Tests.xctest */;
|
||||
productType = "com.apple.product-type.bundle.unit-test";
|
||||
};
|
||||
E07D64D42E36127F009BFB4D /* exov2UITests */ = {
|
||||
isa = PBXNativeTarget;
|
||||
buildConfigurationList = E07D64E52E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2UITests" */;
|
||||
buildPhases = (
|
||||
E07D64D12E36127F009BFB4D /* Sources */,
|
||||
E07D64D22E36127F009BFB4D /* Frameworks */,
|
||||
E07D64D32E36127F009BFB4D /* Resources */,
|
||||
);
|
||||
buildRules = (
|
||||
);
|
||||
dependencies = (
|
||||
E07D64D72E36127F009BFB4D /* PBXTargetDependency */,
|
||||
);
|
||||
fileSystemSynchronizedGroups = (
|
||||
E07D64D82E36127F009BFB4D /* exov2UITests */,
|
||||
);
|
||||
name = exov2UITests;
|
||||
packageProductDependencies = (
|
||||
);
|
||||
productName = exov2UITests;
|
||||
productReference = E07D64D52E36127F009BFB4D /* exov2UITests.xctest */;
|
||||
productType = "com.apple.product-type.bundle.ui-testing";
|
||||
};
|
||||
/* End PBXNativeTarget section */
|
||||
|
||||
/* Begin PBXProject section */
|
||||
E07D64B22E36127E009BFB4D /* Project object */ = {
|
||||
isa = PBXProject;
|
||||
attributes = {
|
||||
BuildIndependentTargetsInParallel = 1;
|
||||
LastSwiftUpdateCheck = 1610;
|
||||
LastUpgradeCheck = 1610;
|
||||
TargetAttributes = {
|
||||
E07D64B92E36127E009BFB4D = {
|
||||
CreatedOnToolsVersion = 16.1;
|
||||
};
|
||||
E07D64CA2E36127F009BFB4D = {
|
||||
CreatedOnToolsVersion = 16.1;
|
||||
TestTargetID = E07D64B92E36127E009BFB4D;
|
||||
};
|
||||
E07D64D42E36127F009BFB4D = {
|
||||
CreatedOnToolsVersion = 16.1;
|
||||
TestTargetID = E07D64B92E36127E009BFB4D;
|
||||
};
|
||||
};
|
||||
};
|
||||
buildConfigurationList = E07D64B52E36127E009BFB4D /* Build configuration list for PBXProject "exov2" */;
|
||||
developmentRegion = en;
|
||||
hasScannedForEncodings = 0;
|
||||
knownRegions = (
|
||||
en,
|
||||
Base,
|
||||
);
|
||||
mainGroup = E07D64B12E36127E009BFB4D;
|
||||
minimizedProjectReferenceProxies = 1;
|
||||
preferredProjectObjectVersion = 77;
|
||||
productRefGroup = E07D64BB2E36127E009BFB4D /* Products */;
|
||||
projectDirPath = "";
|
||||
projectRoot = "";
|
||||
targets = (
|
||||
E07D64B92E36127E009BFB4D /* exov2 */,
|
||||
E07D64CA2E36127F009BFB4D /* exov2Tests */,
|
||||
E07D64D42E36127F009BFB4D /* exov2UITests */,
|
||||
);
|
||||
};
|
||||
/* End PBXProject section */
|
||||
|
||||
/* Begin PBXResourcesBuildPhase section */
|
||||
E07D64B82E36127E009BFB4D /* Resources */ = {
|
||||
isa = PBXResourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64C92E36127F009BFB4D /* Resources */ = {
|
||||
isa = PBXResourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64D32E36127F009BFB4D /* Resources */ = {
|
||||
isa = PBXResourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXResourcesBuildPhase section */
|
||||
|
||||
/* Begin PBXSourcesBuildPhase section */
|
||||
E07D64B62E36127E009BFB4D /* Sources */ = {
|
||||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64C72E36127F009BFB4D /* Sources */ = {
|
||||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
E07D64D12E36127F009BFB4D /* Sources */ = {
|
||||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXSourcesBuildPhase section */
|
||||
|
||||
/* Begin PBXTargetDependency section */
|
||||
E07D64CD2E36127F009BFB4D /* PBXTargetDependency */ = {
|
||||
isa = PBXTargetDependency;
|
||||
target = E07D64B92E36127E009BFB4D /* exov2 */;
|
||||
targetProxy = E07D64CC2E36127F009BFB4D /* PBXContainerItemProxy */;
|
||||
};
|
||||
E07D64D72E36127F009BFB4D /* PBXTargetDependency */ = {
|
||||
isa = PBXTargetDependency;
|
||||
target = E07D64B92E36127E009BFB4D /* exov2 */;
|
||||
targetProxy = E07D64D62E36127F009BFB4D /* PBXContainerItemProxy */;
|
||||
};
|
||||
/* End PBXTargetDependency section */
|
||||
|
||||
/* Begin XCBuildConfiguration section */
|
||||
E07D64DD2E36127F009BFB4D /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
DEBUG_INFORMATION_FORMAT = dwarf;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
ENABLE_TESTABILITY = YES;
|
||||
ENABLE_USER_SCRIPT_SANDBOXING = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu17;
|
||||
GCC_DYNAMIC_NO_PIC = NO;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_OPTIMIZATION_LEVEL = 0;
|
||||
GCC_PREPROCESSOR_DEFINITIONS = (
|
||||
"DEBUG=1",
|
||||
"$(inherited)",
|
||||
);
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
LOCALIZATION_PREFERS_STRING_CATALOGS = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.1;
|
||||
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
|
||||
MTL_FAST_MATH = YES;
|
||||
ONLY_ACTIVE_ARCH = YES;
|
||||
SDKROOT = macosx;
|
||||
SWIFT_ACTIVE_COMPILATION_CONDITIONS = "DEBUG $(inherited)";
|
||||
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
E07D64DE2E36127F009BFB4D /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
|
||||
ENABLE_NS_ASSERTIONS = NO;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
ENABLE_USER_SCRIPT_SANDBOXING = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu17;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
LOCALIZATION_PREFERS_STRING_CATALOGS = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.1;
|
||||
MTL_ENABLE_DEBUG_INFO = NO;
|
||||
MTL_FAST_MATH = YES;
|
||||
SDKROOT = macosx;
|
||||
SWIFT_COMPILATION_MODE = wholemodule;
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
E07D64E02E36127F009BFB4D /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
||||
CODE_SIGN_ENTITLEMENTS = exov2/exov2.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEVELOPMENT_ASSET_PATHS = "\"exov2/Preview Content\"";
|
||||
ENABLE_PREVIEWS = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
INFOPLIST_KEY_CFBundleDisplayName = EXO;
|
||||
INFOPLIST_KEY_LSUIElement = YES;
|
||||
INFOPLIST_KEY_NSHumanReadableCopyright = "";
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2;
|
||||
PRODUCT_NAME = EXO;
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
SWIFT_VERSION = 5.0;
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
E07D64E12E36127F009BFB4D /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
||||
CODE_SIGN_ENTITLEMENTS = exov2/exov2.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEVELOPMENT_ASSET_PATHS = "\"exov2/Preview Content\"";
|
||||
ENABLE_PREVIEWS = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
INFOPLIST_KEY_CFBundleDisplayName = EXO;
|
||||
INFOPLIST_KEY_LSUIElement = YES;
|
||||
INFOPLIST_KEY_NSHumanReadableCopyright = "";
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2;
|
||||
PRODUCT_NAME = EXO;
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
SWIFT_VERSION = 5.0;
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
E07D64E32E36127F009BFB4D /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.1;
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2Tests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = NO;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/exov2.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/exov2";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
E07D64E42E36127F009BFB4D /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.1;
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2Tests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = NO;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/exov2.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/exov2";
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
E07D64E62E36127F009BFB4D /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2UITests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = NO;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TEST_TARGET_NAME = exov2;
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
E07D64E72E36127F009BFB4D /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MARKETING_VERSION = 1.0;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = exolabs.exov2UITests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SWIFT_EMIT_LOC_STRINGS = NO;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TEST_TARGET_NAME = exov2;
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
/* End XCBuildConfiguration section */
|
||||
|
||||
/* Begin XCConfigurationList section */
|
||||
E07D64B52E36127E009BFB4D /* Build configuration list for PBXProject "exov2" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
E07D64DD2E36127F009BFB4D /* Debug */,
|
||||
E07D64DE2E36127F009BFB4D /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
E07D64DF2E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
E07D64E02E36127F009BFB4D /* Debug */,
|
||||
E07D64E12E36127F009BFB4D /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
E07D64E22E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2Tests" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
E07D64E32E36127F009BFB4D /* Debug */,
|
||||
E07D64E42E36127F009BFB4D /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
E07D64E52E36127F009BFB4D /* Build configuration list for PBXNativeTarget "exov2UITests" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
E07D64E62E36127F009BFB4D /* Debug */,
|
||||
E07D64E72E36127F009BFB4D /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
/* End XCConfigurationList section */
|
||||
};
|
||||
rootObject = E07D64B22E36127E009BFB4D /* Project object */;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Workspace
|
||||
version = "1.0">
|
||||
<FileRef
|
||||
location = "self:">
|
||||
</FileRef>
|
||||
</Workspace>
|
||||
Binary file not shown.
@@ -1,109 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Scheme
|
||||
LastUpgradeVersion = "1610"
|
||||
version = "1.7">
|
||||
<BuildAction
|
||||
parallelizeBuildables = "YES"
|
||||
buildImplicitDependencies = "YES"
|
||||
buildArchitectures = "Automatic">
|
||||
<BuildActionEntries>
|
||||
<BuildActionEntry
|
||||
buildForTesting = "YES"
|
||||
buildForRunning = "YES"
|
||||
buildForProfiling = "YES"
|
||||
buildForArchiving = "YES"
|
||||
buildForAnalyzing = "YES">
|
||||
<BuildableReference
|
||||
BuildableIdentifier = "primary"
|
||||
BlueprintIdentifier = "E07D64B92E36127E009BFB4D"
|
||||
BuildableName = "EXO.app"
|
||||
BlueprintName = "exov2"
|
||||
ReferencedContainer = "container:exov2.xcodeproj">
|
||||
</BuildableReference>
|
||||
</BuildActionEntry>
|
||||
</BuildActionEntries>
|
||||
</BuildAction>
|
||||
<TestAction
|
||||
buildConfiguration = "Debug"
|
||||
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
|
||||
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
|
||||
shouldUseLaunchSchemeArgsEnv = "YES"
|
||||
shouldAutocreateTestPlan = "YES">
|
||||
<Testables>
|
||||
<TestableReference
|
||||
skipped = "NO"
|
||||
parallelizable = "YES">
|
||||
<BuildableReference
|
||||
BuildableIdentifier = "primary"
|
||||
BlueprintIdentifier = "E07D64CA2E36127F009BFB4D"
|
||||
BuildableName = "exov2Tests.xctest"
|
||||
BlueprintName = "exov2Tests"
|
||||
ReferencedContainer = "container:exov2.xcodeproj">
|
||||
</BuildableReference>
|
||||
</TestableReference>
|
||||
<TestableReference
|
||||
skipped = "NO"
|
||||
parallelizable = "YES">
|
||||
<BuildableReference
|
||||
BuildableIdentifier = "primary"
|
||||
BlueprintIdentifier = "E07D64D42E36127F009BFB4D"
|
||||
BuildableName = "exov2UITests.xctest"
|
||||
BlueprintName = "exov2UITests"
|
||||
ReferencedContainer = "container:exov2.xcodeproj">
|
||||
</BuildableReference>
|
||||
</TestableReference>
|
||||
</Testables>
|
||||
</TestAction>
|
||||
<LaunchAction
|
||||
buildConfiguration = "Debug"
|
||||
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
|
||||
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
|
||||
launchStyle = "0"
|
||||
useCustomWorkingDirectory = "NO"
|
||||
ignoresPersistentStateOnLaunch = "NO"
|
||||
debugDocumentVersioning = "YES"
|
||||
debugServiceExtension = "internal"
|
||||
allowLocationSimulation = "YES">
|
||||
<BuildableProductRunnable
|
||||
runnableDebuggingMode = "0">
|
||||
<BuildableReference
|
||||
BuildableIdentifier = "primary"
|
||||
BlueprintIdentifier = "E07D64B92E36127E009BFB4D"
|
||||
BuildableName = "EXO.app"
|
||||
BlueprintName = "exov2"
|
||||
ReferencedContainer = "container:exov2.xcodeproj">
|
||||
</BuildableReference>
|
||||
</BuildableProductRunnable>
|
||||
<EnvironmentVariables>
|
||||
<EnvironmentVariable
|
||||
key = "EXO_PROJECT_ROOT"
|
||||
value = "/Users/SamiKhan/exo"
|
||||
isEnabled = "YES">
|
||||
</EnvironmentVariable>
|
||||
</EnvironmentVariables>
|
||||
</LaunchAction>
|
||||
<ProfileAction
|
||||
buildConfiguration = "Release"
|
||||
shouldUseLaunchSchemeArgsEnv = "YES"
|
||||
savedToolIdentifier = ""
|
||||
useCustomWorkingDirectory = "NO"
|
||||
debugDocumentVersioning = "YES">
|
||||
<BuildableProductRunnable
|
||||
runnableDebuggingMode = "0">
|
||||
<BuildableReference
|
||||
BuildableIdentifier = "primary"
|
||||
BlueprintIdentifier = "E07D64B92E36127E009BFB4D"
|
||||
BuildableName = "EXO.app"
|
||||
BlueprintName = "exov2"
|
||||
ReferencedContainer = "container:exov2.xcodeproj">
|
||||
</BuildableReference>
|
||||
</BuildableProductRunnable>
|
||||
</ProfileAction>
|
||||
<AnalyzeAction
|
||||
buildConfiguration = "Debug">
|
||||
</AnalyzeAction>
|
||||
<ArchiveAction
|
||||
buildConfiguration = "Release"
|
||||
revealArchiveInOrganizer = "YES">
|
||||
</ArchiveAction>
|
||||
</Scheme>
|
||||
@@ -1,32 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>SchemeUserState</key>
|
||||
<dict>
|
||||
<key>exov2.xcscheme_^#shared#^_</key>
|
||||
<dict>
|
||||
<key>orderHint</key>
|
||||
<integer>0</integer>
|
||||
</dict>
|
||||
</dict>
|
||||
<key>SuppressBuildableAutocreation</key>
|
||||
<dict>
|
||||
<key>E07D64B92E36127E009BFB4D</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>E07D64CA2E36127F009BFB4D</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>E07D64D42E36127F009BFB4D</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"info" : {
|
||||
"author" : "xcode",
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
import Foundation
|
||||
import OSLog
|
||||
import SwiftUI
|
||||
import AppKit
|
||||
import ServiceManagement
|
||||
|
||||
extension NSApplication {
|
||||
func addTerminationHandler(_ handler: @escaping () -> Void) {
|
||||
NSApp.setActivationPolicy(.accessory)
|
||||
NotificationCenter.default.addObserver(forName: NSApplication.willTerminateNotification,
|
||||
object: nil,
|
||||
queue: .main) { _ in
|
||||
handler()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ProcessManager: ObservableObject {
|
||||
@Published var masterProcess: Process?
|
||||
@Published var workerProcess: Process?
|
||||
@Published var masterStatus: String = "Stopped"
|
||||
@Published var workerStatus: String = "Stopped"
|
||||
@Published var isLoginItemEnabled: Bool = false
|
||||
@Published var isMasterMode: Bool = false // Default to replica mode (false)
|
||||
|
||||
private var masterStdout: Pipe?
|
||||
private var workerStdout: Pipe?
|
||||
private let logger = Logger(subsystem: "exolabs.exov2", category: "ProcessManager")
|
||||
|
||||
// Add file handle properties to track them
|
||||
private var masterFileHandle: FileHandle?
|
||||
private var workerFileHandle: FileHandle?
|
||||
|
||||
private let loginService = SMAppService.mainApp
|
||||
|
||||
// Find uv executable in common installation paths
|
||||
private var uvPath: String? {
|
||||
let commonPaths = [
|
||||
"/usr/local/bin/uv",
|
||||
"/opt/homebrew/bin/uv",
|
||||
"/usr/bin/uv",
|
||||
"/bin/uv",
|
||||
"/Users/\(NSUserName())/.cargo/bin/uv",
|
||||
"/Users/\(NSUserName())/.local/bin/uv"
|
||||
]
|
||||
|
||||
for path in commonPaths {
|
||||
if FileManager.default.fileExists(atPath: path) {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// Try using 'which uv' command as fallback
|
||||
let process = Process()
|
||||
process.executableURL = URL(fileURLWithPath: "/usr/bin/which")
|
||||
process.arguments = ["uv"]
|
||||
|
||||
let pipe = Pipe()
|
||||
process.standardOutput = pipe
|
||||
process.standardError = Pipe()
|
||||
|
||||
do {
|
||||
try process.run()
|
||||
process.waitUntilExit()
|
||||
|
||||
if process.terminationStatus == 0 {
|
||||
let data = pipe.fileHandleForReading.readDataToEndOfFile()
|
||||
if let path = String(data: data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines),
|
||||
!path.isEmpty {
|
||||
return path
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
logger.error("Failed to run 'which uv': \(error.localizedDescription)")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Project root path - assuming the app bundle is in the project directory
|
||||
private var projectPath: URL? {
|
||||
// Get the app bundle path and navigate to the project root
|
||||
// This assumes the app is built/run from within the project directory
|
||||
guard let bundlePath = Bundle.main.bundleURL.path as String? else { return nil }
|
||||
|
||||
// Navigate up from the app bundle to find the project root
|
||||
// Look for pyproject.toml to identify the project root
|
||||
var currentPath = URL(fileURLWithPath: bundlePath)
|
||||
while currentPath.pathComponents.count > 1 {
|
||||
let pyprojectPath = currentPath.appendingPathComponent("pyproject.toml")
|
||||
if FileManager.default.fileExists(atPath: pyprojectPath.path) {
|
||||
return currentPath
|
||||
}
|
||||
currentPath = currentPath.deletingLastPathComponent()
|
||||
}
|
||||
|
||||
// Fallback: try to find project in common development locations
|
||||
let homeDir = FileManager.default.homeDirectoryForCurrentUser
|
||||
let commonPaths = [
|
||||
"exo",
|
||||
"Projects/exo",
|
||||
"Documents/exo",
|
||||
"Desktop/exo"
|
||||
]
|
||||
|
||||
for path in commonPaths {
|
||||
let projectDir = homeDir.appendingPathComponent(path)
|
||||
let pyprojectPath = projectDir.appendingPathComponent("pyproject.toml")
|
||||
if FileManager.default.fileExists(atPath: pyprojectPath.path) {
|
||||
return projectDir
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
init() {
|
||||
// Add termination handler
|
||||
NSApplication.shared.addTerminationHandler { [weak self] in
|
||||
self?.stopAll()
|
||||
}
|
||||
|
||||
// Check if login item is enabled
|
||||
isLoginItemEnabled = (loginService.status == .enabled)
|
||||
|
||||
// Start processes automatically
|
||||
startMaster()
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 2) {
|
||||
self.startWorker()
|
||||
}
|
||||
}
|
||||
|
||||
private func handleProcessOutput(_ pipe: Pipe, processName: String) -> FileHandle {
|
||||
let fileHandle = pipe.fileHandleForReading
|
||||
fileHandle.readabilityHandler = { [weak self] handle in
|
||||
guard let data = try? handle.read(upToCount: 1024),
|
||||
let output = String(data: data, encoding: .utf8) else {
|
||||
return
|
||||
}
|
||||
|
||||
DispatchQueue.main.async {
|
||||
self?.logger.info("\(processName) output: \(output)")
|
||||
print("[\(processName)] \(output)")
|
||||
}
|
||||
}
|
||||
return fileHandle
|
||||
}
|
||||
|
||||
private func cleanupProcess(process: Process?, fileHandle: FileHandle?, pipe: Pipe?) {
|
||||
// Remove readability handler
|
||||
fileHandle?.readabilityHandler = nil
|
||||
|
||||
// Close file handles
|
||||
try? fileHandle?.close()
|
||||
try? pipe?.fileHandleForReading.close()
|
||||
try? pipe?.fileHandleForWriting.close()
|
||||
|
||||
// Terminate process if still running
|
||||
if process?.isRunning == true {
|
||||
process?.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
func startMaster() {
|
||||
guard let projectPath = self.projectPath else {
|
||||
masterStatus = "Error: Project directory not found"
|
||||
logger.error("Could not find project directory with pyproject.toml")
|
||||
return
|
||||
}
|
||||
|
||||
guard let uvPath = self.uvPath else {
|
||||
masterStatus = "Error: uv not found"
|
||||
logger.error("Could not find uv executable in common paths")
|
||||
return
|
||||
}
|
||||
|
||||
// Cleanup any existing process
|
||||
cleanupProcess(process: masterProcess, fileHandle: masterFileHandle, pipe: masterStdout)
|
||||
|
||||
masterProcess = Process()
|
||||
masterStdout = Pipe()
|
||||
|
||||
// Use uv to run the master module
|
||||
masterProcess?.executableURL = URL(fileURLWithPath: uvPath)
|
||||
masterProcess?.arguments = ["run", "python", "-m", "master.main"]
|
||||
masterProcess?.standardOutput = masterStdout
|
||||
masterProcess?.standardError = masterStdout
|
||||
|
||||
// Set up environment
|
||||
var env = ProcessInfo.processInfo.environment
|
||||
env["PYTHONUNBUFFERED"] = "1"
|
||||
env["PYTHONPATH"] = projectPath.path
|
||||
|
||||
// Set replica mode if not in master mode
|
||||
if !self.isMasterMode {
|
||||
env["EXO_RUN_AS_REPLICA"] = "1"
|
||||
}
|
||||
|
||||
masterProcess?.environment = env
|
||||
|
||||
// Set working directory to project root
|
||||
masterProcess?.currentDirectoryURL = projectPath
|
||||
|
||||
// Store the file handle
|
||||
masterFileHandle = handleProcessOutput(masterStdout!, processName: "Master")
|
||||
|
||||
do {
|
||||
logger.info("Starting master process with \(uvPath) run python -m master.main at \(projectPath.path)")
|
||||
try masterProcess?.run()
|
||||
masterStatus = "Running"
|
||||
|
||||
masterProcess?.terminationHandler = { [weak self] process in
|
||||
DispatchQueue.main.async {
|
||||
let status = "Stopped (exit: \(process.terminationStatus))"
|
||||
self?.masterStatus = status
|
||||
self?.logger.error("Master process terminated: \(status)")
|
||||
// Cleanup on termination
|
||||
self?.cleanupProcess(process: self?.masterProcess,
|
||||
fileHandle: self?.masterFileHandle,
|
||||
pipe: self?.masterStdout)
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
masterStatus = "Error: \(error.localizedDescription)"
|
||||
logger.error("Failed to start master: \(error.localizedDescription)")
|
||||
cleanupProcess(process: masterProcess, fileHandle: masterFileHandle, pipe: masterStdout)
|
||||
}
|
||||
}
|
||||
|
||||
func startWorker() {
|
||||
guard let projectPath = self.projectPath else {
|
||||
workerStatus = "Error: Project directory not found"
|
||||
logger.error("Could not find project directory with pyproject.toml")
|
||||
return
|
||||
}
|
||||
|
||||
guard let uvPath = self.uvPath else {
|
||||
workerStatus = "Error: uv not found"
|
||||
logger.error("Could not find uv executable in common paths")
|
||||
return
|
||||
}
|
||||
|
||||
// Cleanup any existing process
|
||||
cleanupProcess(process: workerProcess, fileHandle: workerFileHandle, pipe: workerStdout)
|
||||
|
||||
workerProcess = Process()
|
||||
workerStdout = Pipe()
|
||||
|
||||
// Use uv to run the worker module
|
||||
workerProcess?.executableURL = URL(fileURLWithPath: uvPath)
|
||||
workerProcess?.arguments = ["run", "python", "-m", "worker.main"]
|
||||
workerProcess?.standardOutput = workerStdout
|
||||
workerProcess?.standardError = workerStdout
|
||||
|
||||
// Set up environment
|
||||
var env = ProcessInfo.processInfo.environment
|
||||
env["PYTHONUNBUFFERED"] = "1"
|
||||
env["PYTHONPATH"] = projectPath.path
|
||||
workerProcess?.environment = env
|
||||
|
||||
// Set working directory to project root
|
||||
workerProcess?.currentDirectoryURL = projectPath
|
||||
|
||||
// Store the file handle
|
||||
workerFileHandle = handleProcessOutput(workerStdout!, processName: "Worker")
|
||||
|
||||
do {
|
||||
logger.info("Starting worker process with \(uvPath) run python -m worker.main at \(projectPath.path)")
|
||||
try workerProcess?.run()
|
||||
workerStatus = "Running"
|
||||
|
||||
workerProcess?.terminationHandler = { [weak self] process in
|
||||
DispatchQueue.main.async {
|
||||
let status = "Stopped (exit: \(process.terminationStatus))"
|
||||
self?.workerStatus = status
|
||||
self?.logger.error("Worker process terminated: \(status)")
|
||||
// Cleanup on termination
|
||||
self?.cleanupProcess(process: self?.workerProcess,
|
||||
fileHandle: self?.workerFileHandle,
|
||||
pipe: self?.workerStdout)
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
workerStatus = "Error: \(error.localizedDescription)"
|
||||
logger.error("Failed to start worker: \(error.localizedDescription)")
|
||||
cleanupProcess(process: workerProcess, fileHandle: workerFileHandle, pipe: workerStdout)
|
||||
}
|
||||
}
|
||||
|
||||
func stopAll() {
|
||||
logger.info("Stopping all processes")
|
||||
|
||||
// Clean up master process
|
||||
cleanupProcess(process: masterProcess, fileHandle: masterFileHandle, pipe: masterStdout)
|
||||
masterProcess = nil
|
||||
masterStdout = nil
|
||||
masterFileHandle = nil
|
||||
masterStatus = "Stopped"
|
||||
|
||||
// Clean up worker process
|
||||
cleanupProcess(process: workerProcess, fileHandle: workerFileHandle, pipe: workerStdout)
|
||||
workerProcess = nil
|
||||
workerStdout = nil
|
||||
workerFileHandle = nil
|
||||
workerStatus = "Stopped"
|
||||
}
|
||||
|
||||
func checkBinaries() -> Bool {
|
||||
guard let projectPath = self.projectPath else {
|
||||
logger.error("Could not find project directory")
|
||||
return false
|
||||
}
|
||||
|
||||
guard let uvPath = self.uvPath else {
|
||||
logger.error("Could not find uv executable")
|
||||
return false
|
||||
}
|
||||
|
||||
let fileManager = FileManager.default
|
||||
let pyprojectPath = projectPath.appendingPathComponent("pyproject.toml").path
|
||||
let masterPath = projectPath.appendingPathComponent("master/main.py").path
|
||||
let workerPath = projectPath.appendingPathComponent("worker/main.py").path
|
||||
|
||||
let uvExists = fileManager.fileExists(atPath: uvPath)
|
||||
let pyprojectExists = fileManager.fileExists(atPath: pyprojectPath)
|
||||
let masterExists = fileManager.fileExists(atPath: masterPath)
|
||||
let workerExists = fileManager.fileExists(atPath: workerPath)
|
||||
|
||||
if !uvExists {
|
||||
logger.error("uv not found at \(uvPath)")
|
||||
}
|
||||
if !pyprojectExists {
|
||||
logger.error("pyproject.toml not found at \(pyprojectPath)")
|
||||
}
|
||||
if !masterExists {
|
||||
logger.error("master/main.py not found at \(masterPath)")
|
||||
}
|
||||
if !workerExists {
|
||||
logger.error("worker/main.py not found at \(workerPath)")
|
||||
}
|
||||
|
||||
return uvExists && pyprojectExists && masterExists && workerExists
|
||||
}
|
||||
|
||||
func toggleLoginItem() {
|
||||
do {
|
||||
if isLoginItemEnabled {
|
||||
try loginService.unregister()
|
||||
} else {
|
||||
try loginService.register()
|
||||
}
|
||||
isLoginItemEnabled = (loginService.status == .enabled)
|
||||
} catch {
|
||||
logger.error("Failed to toggle login item: \(error.localizedDescription)")
|
||||
}
|
||||
}
|
||||
|
||||
func toggleMasterMode() {
|
||||
isMasterMode.toggle()
|
||||
logger.info("Toggling master mode to: \(self.isMasterMode ? "Master" : "Replica")")
|
||||
|
||||
// Restart master process with new mode
|
||||
if masterProcess?.isRunning == true {
|
||||
// Clean up current master process
|
||||
cleanupProcess(process: masterProcess, fileHandle: masterFileHandle, pipe: masterStdout)
|
||||
masterProcess = nil
|
||||
masterStdout = nil
|
||||
masterFileHandle = nil
|
||||
masterStatus = "Stopped"
|
||||
|
||||
// Start master with new mode after a brief delay
|
||||
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
|
||||
self.startMaster()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>com.apple.security.app-sandbox</key>
|
||||
<false/>
|
||||
<key>com.apple.security.cs.allow-unsigned-executable-memory</key>
|
||||
<true/>
|
||||
<key>com.apple.security.cs.disable-library-validation</key>
|
||||
<true/>
|
||||
<key>com.apple.security.automation.apple-events</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
||||
@@ -1,115 +0,0 @@
|
||||
//
|
||||
// exov2App.swift
|
||||
// exov2
|
||||
//
|
||||
// Created by Sami Khan on 2025-07-27.
|
||||
//
|
||||
|
||||
import SwiftUI
|
||||
import AppKit
|
||||
import Foundation
|
||||
import OSLog
|
||||
import ServiceManagement
|
||||
|
||||
@main
|
||||
struct exov2App: App {
|
||||
@StateObject private var processManager = ProcessManager()
|
||||
|
||||
private func resizedMenuBarIcon(named: String, size: CGFloat = 18.0) -> NSImage? {
|
||||
guard let original = NSImage(named: named) else {
|
||||
print("Failed to load image named: \(named)")
|
||||
return nil
|
||||
}
|
||||
|
||||
let resized = NSImage(size: NSSize(width: size, height: size), flipped: false) { rect in
|
||||
NSGraphicsContext.current?.imageInterpolation = .high
|
||||
original.draw(in: rect)
|
||||
return true
|
||||
}
|
||||
|
||||
resized.isTemplate = false
|
||||
resized.size = NSSize(width: size, height: size)
|
||||
return resized
|
||||
}
|
||||
|
||||
var body: some Scene {
|
||||
MenuBarExtra {
|
||||
MenuBarView(processManager: processManager)
|
||||
} label: {
|
||||
if let resizedImage = resizedMenuBarIcon(named: "menubar-icon") {
|
||||
Image(nsImage: resizedImage)
|
||||
.opacity(processManager.masterStatus == "Running" ? 1.0 : 0.5)
|
||||
}
|
||||
}
|
||||
.menuBarExtraStyle(.window)
|
||||
}
|
||||
}
|
||||
|
||||
struct MenuBarView: View {
|
||||
@ObservedObject var processManager: ProcessManager
|
||||
|
||||
var body: some View {
|
||||
VStack(alignment: .leading, spacing: 8) {
|
||||
StatusSection(processManager: processManager)
|
||||
|
||||
Divider()
|
||||
|
||||
Toggle("Launch at Login", isOn: Binding(
|
||||
get: { processManager.isLoginItemEnabled },
|
||||
set: { _ in processManager.toggleLoginItem() }
|
||||
))
|
||||
.padding(.horizontal)
|
||||
|
||||
Toggle("Is Master?", isOn: Binding(
|
||||
get: { processManager.isMasterMode },
|
||||
set: { _ in processManager.toggleMasterMode() }
|
||||
))
|
||||
.padding(.horizontal)
|
||||
|
||||
Divider()
|
||||
|
||||
Button("Quit") {
|
||||
NSApplication.shared.terminate(nil)
|
||||
}
|
||||
}
|
||||
.padding()
|
||||
.frame(width: 250)
|
||||
.onAppear {
|
||||
if !processManager.checkBinaries() {
|
||||
showEnvironmentError()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func showEnvironmentError() {
|
||||
let alert = NSAlert()
|
||||
alert.messageText = "Python Environment Error"
|
||||
alert.informativeText = "Could not find the required Python environment, uv, or project files. Please ensure uv is installed and the project directory is accessible."
|
||||
alert.alertStyle = .critical
|
||||
alert.addButton(withTitle: "OK")
|
||||
alert.runModal()
|
||||
NSApplication.shared.terminate(nil)
|
||||
}
|
||||
}
|
||||
|
||||
struct StatusSection: View {
|
||||
@ObservedObject var processManager: ProcessManager
|
||||
|
||||
var body: some View {
|
||||
VStack(alignment: .leading, spacing: 4) {
|
||||
HStack {
|
||||
Text("Master:")
|
||||
.bold()
|
||||
Text(processManager.masterStatus)
|
||||
.foregroundColor(processManager.masterStatus == "Running" ? .green : .red)
|
||||
}
|
||||
|
||||
HStack {
|
||||
Text("Worker:")
|
||||
.bold()
|
||||
Text(processManager.workerStatus)
|
||||
.foregroundColor(processManager.workerStatus == "Running" ? .green : .red)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
//
|
||||
// exov2Tests.swift
|
||||
// exov2Tests
|
||||
//
|
||||
// Created by Sami Khan on 2025-07-27.
|
||||
//
|
||||
|
||||
import Testing
|
||||
@testable import exov2
|
||||
|
||||
struct exov2Tests {
|
||||
|
||||
@Test func example() async throws {
|
||||
// Write your test here and use APIs like `#expect(...)` to check expected conditions.
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
//
|
||||
// exov2UITests.swift
|
||||
// exov2UITests
|
||||
//
|
||||
// Created by Sami Khan on 2025-07-27.
|
||||
//
|
||||
|
||||
import XCTest
|
||||
|
||||
final class exov2UITests: XCTestCase {
|
||||
|
||||
override func setUpWithError() throws {
|
||||
// Put setup code here. This method is called before the invocation of each test method in the class.
|
||||
|
||||
// In UI tests it is usually best to stop immediately when a failure occurs.
|
||||
continueAfterFailure = false
|
||||
|
||||
// In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this.
|
||||
}
|
||||
|
||||
override func tearDownWithError() throws {
|
||||
// Put teardown code here. This method is called after the invocation of each test method in the class.
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func testExample() throws {
|
||||
// UI tests must launch the application that they test.
|
||||
let app = XCUIApplication()
|
||||
app.launch()
|
||||
|
||||
// Use XCTAssert and related functions to verify your tests produce the correct results.
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func testLaunchPerformance() throws {
|
||||
if #available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 7.0, *) {
|
||||
// This measures how long it takes to launch your application.
|
||||
measure(metrics: [XCTApplicationLaunchMetric()]) {
|
||||
XCUIApplication().launch()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
//
|
||||
// exov2UITestsLaunchTests.swift
|
||||
// exov2UITests
|
||||
//
|
||||
// Created by Sami Khan on 2025-07-27.
|
||||
//
|
||||
|
||||
import XCTest
|
||||
|
||||
final class exov2UITestsLaunchTests: XCTestCase {
|
||||
|
||||
override class var runsForEachTargetApplicationUIConfiguration: Bool {
|
||||
true
|
||||
}
|
||||
|
||||
override func setUpWithError() throws {
|
||||
continueAfterFailure = false
|
||||
}
|
||||
|
||||
@MainActor
|
||||
func testLaunch() throws {
|
||||
let app = XCUIApplication()
|
||||
app.launch()
|
||||
|
||||
// Insert steps here to perform after app launch but before taking a screenshot,
|
||||
// such as logging into a test account or navigating somewhere in the app
|
||||
|
||||
let attachment = XCTAttachment(screenshot: app.screenshot())
|
||||
attachment.name = "Launch Screen"
|
||||
attachment.lifetime = .keepAlways
|
||||
add(attachment)
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
["s17@169.254.17.227", "s18@169.254.27.237"]
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
NUM_RECORDS="${1:-10000}"
|
||||
BATCH_SIZE="${2:-100}"
|
||||
|
||||
echo "Running burst benchmark with $NUM_RECORDS records in batches of $BATCH_SIZE..."
|
||||
|
||||
# Build the forwarder binary
|
||||
BIN_PATH="$(pwd)/forwarder_bin"
|
||||
BUILD_TMPDIR="$(mktemp -d 2>/dev/null || mktemp -d -t forwarder-build)"
|
||||
export TMPDIR="$BUILD_TMPDIR"
|
||||
|
||||
pushd . >/dev/null
|
||||
go build -o "$BIN_PATH" .
|
||||
popd >/dev/null
|
||||
|
||||
# Temporary workspace
|
||||
TMP_DIR="$(mktemp -d 2>/dev/null || mktemp -d -t forwarder-burst)"
|
||||
SRC_DB="$TMP_DIR/src.db"
|
||||
DST_DB="$TMP_DIR/dst.db"
|
||||
TABLE="records"
|
||||
TOPIC="burst_topic_$$"
|
||||
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
echo "Cleaning up…"
|
||||
kill "${PID1:-}" "${PID2:-}" 2>/dev/null || true
|
||||
wait "${PID1:-}" "${PID2:-}" 2>/dev/null || true
|
||||
rm -rf "$TMP_DIR" "$BIN_PATH" "$BUILD_TMPDIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create databases with WAL mode
|
||||
sqlite3 "$SRC_DB" <<SQL
|
||||
.timeout 5000
|
||||
PRAGMA journal_mode=WAL;
|
||||
SQL
|
||||
|
||||
sqlite3 "$DST_DB" <<SQL
|
||||
.timeout 5000
|
||||
PRAGMA journal_mode=WAL;
|
||||
SQL
|
||||
|
||||
# Start forwarder nodes
|
||||
"$BIN_PATH" -node-id node1 "sqlite:${SRC_DB}:${TABLE}|libp2p:${TOPIC}" >"$TMP_DIR/node1.log" 2>&1 &
|
||||
PID1=$!
|
||||
|
||||
"$BIN_PATH" -node-id node2 "libp2p:${TOPIC}|sqlite:${DST_DB}:${TABLE}" >"$TMP_DIR/node2.log" 2>&1 &
|
||||
PID2=$!
|
||||
|
||||
# Give nodes time to start
|
||||
sleep 3
|
||||
|
||||
echo "Inserting $NUM_RECORDS records in batches of $BATCH_SIZE..."
|
||||
START_NS=$(date +%s%N)
|
||||
|
||||
# Insert records in batches for high throughput
|
||||
for batch_start in $(seq 1 $BATCH_SIZE $NUM_RECORDS); do
|
||||
batch_end=$((batch_start + BATCH_SIZE - 1))
|
||||
if [ $batch_end -gt $NUM_RECORDS ]; then
|
||||
batch_end=$NUM_RECORDS
|
||||
fi
|
||||
|
||||
# Build values for batch insert
|
||||
values=""
|
||||
for i in $(seq $batch_start $batch_end); do
|
||||
if [ -n "$values" ]; then
|
||||
values="$values,"
|
||||
fi
|
||||
values="$values('seednode','seedpath',$i,datetime('now'),'{}')"
|
||||
done
|
||||
|
||||
# Insert batch
|
||||
sqlite3 -cmd ".timeout 5000" "$SRC_DB" \
|
||||
"INSERT INTO ${TABLE} (source_node_id, source_path, source_row_id, source_timestamp, data) VALUES $values;"
|
||||
|
||||
# Small delay to prevent overwhelming
|
||||
sleep 0.01
|
||||
done
|
||||
|
||||
echo "Waiting for destination to catch up..."
|
||||
|
||||
# Wait for completion
|
||||
while true; do
|
||||
dest_count=$(sqlite3 -cmd ".timeout 5000" "$DST_DB" "SELECT IFNULL(COUNT(*),0) FROM ${TABLE};" 2>/dev/null || echo 0)
|
||||
if [[ "$dest_count" -ge "$NUM_RECORDS" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Progress: $dest_count / $NUM_RECORDS"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
END_NS=$(date +%s%N)
|
||||
DURATION_NS=$((END_NS-START_NS))
|
||||
THROUGHPUT=$(echo "scale=2; $NUM_RECORDS*1000000000/$DURATION_NS" | bc)
|
||||
|
||||
echo "Forwarded $NUM_RECORDS records in $(printf '%.2f' "$(echo "$DURATION_NS/1000000000" | bc -l)") seconds — $THROUGHPUT records/s"
|
||||
|
||||
# Show some logs
|
||||
echo ""
|
||||
echo "=== Node1 Log (last 10 lines) ==="
|
||||
tail -10 "$TMP_DIR/node1.log"
|
||||
echo ""
|
||||
echo "=== Node2 Log (last 10 lines) ==="
|
||||
tail -10 "$TMP_DIR/node2.log"
|
||||
@@ -1,114 +0,0 @@
|
||||
module forwarder
|
||||
|
||||
go 1.24.5
|
||||
|
||||
replace lib => ./lib
|
||||
|
||||
replace forwarder/src => ./src
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/libp2p/go-libp2p v0.43.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
github.com/multiformats/go-multiaddr v0.16.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.6.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/interceptor v0.1.40 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/rtp v1.8.19 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.6 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.54.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.9.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
)
|
||||
|
||||
// Remember to run `go mod tidy` after adding dependencies.
|
||||
@@ -1,472 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
|
||||
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
|
||||
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
||||
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
|
||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
|
||||
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
|
||||
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
|
||||
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
|
||||
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
|
||||
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
|
||||
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
@@ -1,106 +0,0 @@
|
||||
module lib
|
||||
|
||||
go 1.24.5
|
||||
|
||||
require (
|
||||
github.com/ipfs/go-log/v2 v2.6.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/sys v0.35.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.16.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/interceptor v0.1.40 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/rtp v1.8.19 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.6 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.54.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.9.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/libp2p/go-libp2p v0.43.0
|
||||
github.com/pdgendt/cobs v1.1.0
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
golang.org/x/sync v0.16.0
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
@@ -1,443 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
|
||||
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
|
||||
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
||||
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
|
||||
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pdgendt/cobs v1.1.0 h1:gGeI8VUIMCz5jAWoEi24UZv+vsQwiOSjoJuRY4jKnxg=
|
||||
github.com/pdgendt/cobs v1.1.0/go.mod h1:AdxrOLm724a1y0E1RQn6+PtMjLUXgBM4FQJ9lm+/h3E=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
|
||||
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
|
||||
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
|
||||
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
|
||||
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
|
||||
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
@@ -1,208 +0,0 @@
|
||||
//go:build unix
|
||||
|
||||
package ipc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileDescriptorAlreadyOpen = errors.New("file descriptor not open")
|
||||
ErrFileDescriptorNotOpen = errors.New("file descriptor not open")
|
||||
ErrLockAlreadyHeld = errors.New("lock already held")
|
||||
ErrLockNotHeld = errors.New("lock not held")
|
||||
)
|
||||
|
||||
const (
|
||||
// open in read-write mode, creates file if it doesn't exist already,
|
||||
// closes this file descriptor in any children processes (prevents FD leaking),
|
||||
// truncates this file on opening (lock-files shouldn't hold content FOR NOW!!!)
|
||||
//
|
||||
// SEE: https://man7.org/linux/man-pages/man2/openat.2.html
|
||||
flockMutexOpenFlags int = syscall.O_RDWR | syscall.O_CREAT | syscall.O_CLOEXEC | syscall.O_TRUNC
|
||||
|
||||
// 0x644 mode flags -> user has read-write permissions, others have read permission only
|
||||
// SEE: https://man7.org/linux/man-pages/man2/openat.2.html
|
||||
flockMutexModeFlags uint32 = syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IRGRP | syscall.S_IROTH
|
||||
|
||||
// default poll-interval for spin-blocking lock
|
||||
flockMutexPollInterval = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
type LockType int
|
||||
|
||||
const (
|
||||
ReadLock LockType = syscall.LOCK_SH
|
||||
WriteLock LockType = syscall.LOCK_EX
|
||||
LockMissing LockType = -1
|
||||
)
|
||||
|
||||
type AcquireMode int
|
||||
|
||||
const (
|
||||
OsBlocking AcquireMode = iota
|
||||
SpinBlocking
|
||||
NonBlocking
|
||||
)
|
||||
|
||||
type FlockMutex struct {
|
||||
filePath string
|
||||
fd int
|
||||
lockHeld LockType
|
||||
}
|
||||
|
||||
func NewFlockMutex(filePath string) *FlockMutex {
|
||||
return &FlockMutex{
|
||||
filePath: filePath,
|
||||
fd: -1,
|
||||
lockHeld: LockMissing,
|
||||
}
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) openFd() error {
|
||||
if mu.fd != -1 {
|
||||
return ErrFileDescriptorAlreadyOpen
|
||||
}
|
||||
// TODO: ensure_directory_exists(mu.filePath)
|
||||
|
||||
// open file & TRY to change permissions to `modeFlags` flags
|
||||
fd, err := unix.Open(mu.filePath, flockMutexOpenFlags, flockMutexModeFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
mu.fd = fd
|
||||
_ = unix.Fchmod(fd, flockMutexModeFlags) // This locked is not owned by this UID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) closeFd() error {
|
||||
if mu.fd == -1 {
|
||||
return ErrFileDescriptorNotOpen
|
||||
}
|
||||
|
||||
if err := unix.Close(mu.fd); err != nil {
|
||||
mu.fd = -1
|
||||
return err
|
||||
}
|
||||
|
||||
mu.fd = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) acquire(lockType LockType, blocking bool) (bool, error) {
|
||||
// enforce preconditions/sanity checks
|
||||
if mu.fd == -1 {
|
||||
return false, ErrFileDescriptorNotOpen
|
||||
}
|
||||
if mu.lockHeld != LockMissing {
|
||||
return false, ErrLockAlreadyHeld
|
||||
}
|
||||
|
||||
// create flags for acquiring lock
|
||||
var flags = int(lockType)
|
||||
if !blocking {
|
||||
flags |= syscall.LOCK_NB
|
||||
}
|
||||
|
||||
// continually try to acquire lock (since it may fail due to interrupts)
|
||||
for {
|
||||
if err := unix.Flock(mu.fd, flags); err != nil {
|
||||
if errno, ok := err.(unix.Errno); ok {
|
||||
// call interrupted by signal -> try again
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// file is locked & non-blocking is enabled -> return false to indicate
|
||||
if errno == unix.EWOULDBLOCK {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// unhandleable errors -> close FD & return error
|
||||
_ = mu.closeFd() // TODO: how to merge Go errors ???
|
||||
return false, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// set lock-type held
|
||||
mu.lockHeld = lockType
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) release() error {
|
||||
// enforce preconditions/sanity checks
|
||||
if mu.fd == -1 {
|
||||
return ErrFileDescriptorNotOpen
|
||||
}
|
||||
if mu.lockHeld == LockMissing {
|
||||
return ErrLockNotHeld
|
||||
}
|
||||
|
||||
// continually try to release lock (since it may fail due to interrupts)
|
||||
for {
|
||||
if err := unix.Flock(mu.fd, syscall.LOCK_UN); err != nil {
|
||||
if errno, ok := err.(unix.Errno); ok {
|
||||
// call interrupted by signal -> try again
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// unhandleable errors -> close FD & return error
|
||||
mu.lockHeld = LockMissing
|
||||
_ = mu.closeFd() // TODO: how to merge Go errors ???
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
mu.lockHeld = LockMissing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) Acquire(lockType LockType, acquireMode AcquireMode) (bool, error) {
|
||||
// open file if missing
|
||||
if mu.fd == -1 {
|
||||
if err := mu.openFd(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// OS-blocking & non-blocking is direct passthrough to private function
|
||||
switch acquireMode {
|
||||
case OsBlocking:
|
||||
return mu.acquire(lockType, true)
|
||||
case NonBlocking:
|
||||
return mu.acquire(lockType, false)
|
||||
}
|
||||
|
||||
// spin-blocking works by trying to acquire the lock in non-blocking mode, and retrying until success
|
||||
for {
|
||||
locked, err := mu.acquire(lockType, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if locked {
|
||||
return true, err
|
||||
}
|
||||
time.Sleep(flockMutexPollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func (mu *FlockMutex) Release(lockType LockType, acquireMode AcquireMode) error {
|
||||
if err := mu.release(); err != nil {
|
||||
_ = mu.closeFd() // TODO: how to merge Go errors ???
|
||||
return err
|
||||
}
|
||||
if err := mu.closeFd(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
//go:build unix
|
||||
|
||||
package ipc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func check(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeTempPath(t *testing.T, pattern string) string {
|
||||
f, err := os.CreateTemp("", pattern)
|
||||
check(t, err)
|
||||
name := f.Name()
|
||||
defer os.Remove(name)
|
||||
return name
|
||||
}
|
||||
|
||||
func TestLockHeld(t *testing.T) {
|
||||
path := makeTempPath(t, "testing_flock.lock")
|
||||
defer os.Remove(path)
|
||||
mu := NewFlockMutex(path)
|
||||
|
||||
assert.Equal(t, LockMissing, mu.lockHeld)
|
||||
|
||||
acquired, err := mu.Acquire(WriteLock, SpinBlocking)
|
||||
check(t, err)
|
||||
assert.True(t, acquired)
|
||||
assert.Equal(t, WriteLock, mu.lockHeld)
|
||||
check(t, mu.release())
|
||||
|
||||
assert.Equal(t, LockMissing, mu.lockHeld)
|
||||
|
||||
acquired, err = mu.Acquire(ReadLock, SpinBlocking)
|
||||
check(t, err)
|
||||
assert.True(t, acquired)
|
||||
assert.Equal(t, ReadLock, mu.lockHeld)
|
||||
check(t, mu.release())
|
||||
|
||||
assert.Equal(t, LockMissing, mu.lockHeld)
|
||||
}
|
||||
|
||||
func TestNoReentrantLock(t *testing.T) {
|
||||
path := makeTempPath(t, "testing_flock.lock")
|
||||
defer os.Remove(path)
|
||||
mu := NewFlockMutex(path)
|
||||
|
||||
// no write-lock reentrancy
|
||||
acquired, err := mu.Acquire(WriteLock, SpinBlocking)
|
||||
check(t, err)
|
||||
assert.True(t, acquired)
|
||||
{
|
||||
acquired, err = mu.Acquire(WriteLock, SpinBlocking)
|
||||
assert.False(t, acquired)
|
||||
assert.Equal(t, ErrLockAlreadyHeld, err)
|
||||
}
|
||||
{
|
||||
acquired, err = mu.Acquire(ReadLock, SpinBlocking)
|
||||
assert.False(t, acquired)
|
||||
assert.Equal(t, ErrLockAlreadyHeld, err)
|
||||
}
|
||||
check(t, mu.release())
|
||||
|
||||
// no read-lock reentrancy
|
||||
acquired, err = mu.Acquire(ReadLock, SpinBlocking)
|
||||
check(t, err)
|
||||
assert.True(t, acquired)
|
||||
{
|
||||
acquired, err = mu.Acquire(WriteLock, SpinBlocking)
|
||||
assert.False(t, acquired)
|
||||
assert.Equal(t, ErrLockAlreadyHeld, err)
|
||||
}
|
||||
{
|
||||
acquired, err = mu.Acquire(ReadLock, SpinBlocking)
|
||||
assert.False(t, acquired)
|
||||
assert.Equal(t, ErrLockAlreadyHeld, err)
|
||||
}
|
||||
check(t, mu.release())
|
||||
}
|
||||
@@ -1,400 +0,0 @@
|
||||
//go:build unix
|
||||
|
||||
package ipc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"lib"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pdgendt/cobs"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInOutPipesAreSame = errors.New("the in-pipe and out-pipe are the same")
|
||||
ErrExistingFileNotFifo = errors.New("the existing file is not a FIFO")
|
||||
)
|
||||
|
||||
const (
|
||||
pipeDuplexOpenReaderFlags = syscall.O_RDONLY | syscall.O_NONBLOCK
|
||||
pipeDuplexOpenWriterFlags = syscall.O_WRONLY | syscall.O_NONBLOCK
|
||||
pipeDuplexModeFlags = syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IRGRP | syscall.S_IROTH
|
||||
pipeDuplexPollInterval = 50 * time.Millisecond
|
||||
pipeDuplex_PIPE_BUF = 4096
|
||||
)
|
||||
|
||||
// Signal messages range from 1 to 255 & indicate control flow for the bytestream of the pipe.
|
||||
type SignalMessage byte
|
||||
|
||||
const (
|
||||
// DISCARD_PREVIOUS tells the receiver to discard previous partial work.
|
||||
DiscardPrevious SignalMessage = 0x01
|
||||
)
|
||||
|
||||
type OnMessage = func(msg []byte) error
|
||||
|
||||
// Creates a named-pipe communication duplex. Creates a named-pipe communication duplex.
|
||||
// The reader end is responsible for creating the pipe.
|
||||
//
|
||||
// The layers are:
|
||||
// 1. Raw binary data over pipes
|
||||
// 2. Variable-length binary packets with COBS
|
||||
// 3. JSON-like values with Message Pack
|
||||
type PipeDuplex struct {
|
||||
inPath string
|
||||
outPath string
|
||||
|
||||
rawOutMu sync.Mutex
|
||||
rawOut chan []byte
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
errg *errgroup.Group
|
||||
}
|
||||
|
||||
func NewPipeDuplex(inPath, outPath string, onMessage OnMessage) (*PipeDuplex, error) {
|
||||
// they must be different files
|
||||
if inPath == outPath {
|
||||
return nil, ErrInOutPipesAreSame
|
||||
}
|
||||
// pipes should only ever be created, and only by the reader (one-way operations)
|
||||
if err := ensureFifoExists(inPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
errg, ctx := errgroup.WithContext(ctx)
|
||||
p := &PipeDuplex{
|
||||
inPath: inPath,
|
||||
outPath: outPath,
|
||||
|
||||
rawOut: make(chan []byte, 128), // TODO: decide on size of this w/ constant??
|
||||
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
errg: errg,
|
||||
}
|
||||
// Reader
|
||||
p.errg.Go(func() error {
|
||||
return p.pipeBufferReader(onMessage)
|
||||
})
|
||||
|
||||
// Writer
|
||||
p.errg.Go(func() error {
|
||||
return p.pipeBufferWriter()
|
||||
})
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Close stops all goroutines and waits for them to exit.
|
||||
func (p *PipeDuplex) Close() error {
|
||||
p.cancel()
|
||||
|
||||
// this channel is exclusively written to via methods on this object handle, so it is its owner;
|
||||
// owners must be the ones to close channels to avoid race conditions
|
||||
defer func() {
|
||||
// lock channel to avoid race conditions when closing
|
||||
p.rawOutMu.Lock()
|
||||
defer p.rawOutMu.Unlock()
|
||||
|
||||
close(p.rawOut)
|
||||
}()
|
||||
|
||||
return p.errg.Wait()
|
||||
}
|
||||
|
||||
// SendMessage MessagePack-encodes a "value" and enqueues it to the writer.
|
||||
func (p *PipeDuplex) SendMessage(msg []byte) error {
|
||||
// lock channel to avoid race conditions when closing
|
||||
p.rawOutMu.Lock()
|
||||
defer p.rawOutMu.Unlock()
|
||||
|
||||
// send message bytes over outRaw channel
|
||||
select {
|
||||
case p.rawOut <- msg:
|
||||
// TODO: could this trigger a race condition if calling Close() immediately after SendMessage()???
|
||||
// should I lock p.rawOut w/ a mutex??
|
||||
return nil
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PipeDuplex) InPath() string { return p.inPath }
|
||||
func (p *PipeDuplex) OutPath() string { return p.outPath }
|
||||
|
||||
// ===== Private =====
|
||||
|
||||
func ensureFifoExists(path string) error {
|
||||
// try to make a file if one doesn't exist already
|
||||
// TODO: add equivalent of `ensure_parent_directory_exists(path)` here !!!!!! <- may cause bugs w/out it???
|
||||
if err := unix.Mkfifo(path, pipeDuplexModeFlags); err != nil {
|
||||
if errno, ok := err.(unix.Errno); ok {
|
||||
// misc error, do not handle
|
||||
if errno != unix.EEXIST {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure the file exists is FIFO
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
if fi.Mode()&fs.ModeNamedPipe == 0 {
|
||||
return ErrExistingFileNotFifo
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PipeDuplex) pipeBufferReader(onMessage OnMessage) error {
|
||||
// open reader in nonblocking mode -> should not fail & immediately open;
|
||||
// this marks when the writer process has "started"
|
||||
fd, err := unix.Open(p.inPath, pipeDuplexOpenReaderFlags, pipeDuplexModeFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unix.Close(fd)
|
||||
|
||||
// continually pull from the pipe and interpret messages as such:
|
||||
// - all messages are separated/framed by NULL bytes (zero)
|
||||
// - messages with >=2 bytes are COBS-encoded messages, because
|
||||
// the smallest COBS-encoded message is 2 bytes
|
||||
// - 1-byte messages are therefore to be treated as control signals
|
||||
var buf []byte // accumulation buffer
|
||||
for {
|
||||
select { // check for kill-signal
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// read available data (and try again if nothing)
|
||||
data := make([]byte, pipeDuplex_PIPE_BUF)
|
||||
n, err := unix.Read(fd, data)
|
||||
if err != nil {
|
||||
errno, ok := err.(unix.Errno)
|
||||
if !ok || errno != unix.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// if there is a writer connected & the buffer is empty, this would block
|
||||
// so we must consume this error gracefully and try again
|
||||
time.Sleep(pipeDuplexPollInterval)
|
||||
continue
|
||||
}
|
||||
if n == 0 {
|
||||
time.Sleep(pipeDuplexPollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
// extend buffer with new data
|
||||
buf = append(buf, data[:n]...)
|
||||
|
||||
// if there are no NULL bytes in the buffer, no new message has been formed
|
||||
chunks := bytes.Split(buf, []byte{0x00})
|
||||
if len(chunks) == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// last chunk is always an unfinished message, so that becomes our new buffer;
|
||||
// the rest should be decoded as either signals or COBS and put on queue
|
||||
buf = chunks[len(chunks)-1]
|
||||
for i := 0; i < len(chunks)-1; i++ {
|
||||
chunk := chunks[i]
|
||||
|
||||
// ignore empty messages (they mean nothing)
|
||||
if len(chunk) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// interpret 1-byte messages as signals (they indicate control-flow on messages)
|
||||
if len(chunk) == 1 {
|
||||
log.Printf("(reader): gotten control signal: %v", chunk[0])
|
||||
// TODO: do some kind of stuff here??
|
||||
continue
|
||||
}
|
||||
|
||||
// interpret >=2 byte messages as COBS-encoded data (decode them)
|
||||
decoded, err := cobs.Decode(chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call the callback to handle message
|
||||
if err := onMessage(decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PipeDuplex) pipeBufferWriter() error {
|
||||
log.Printf("(writer): started")
|
||||
|
||||
// continually attempt to open FIFO for reading in nonblocking mode -> will error that:
|
||||
// - ENOENT[2] No such file or directory: until a reader creates FIFO
|
||||
// - ENXIO[6] No such device or address: until a reader opens FIFO
|
||||
fd := -1
|
||||
for {
|
||||
select { // check for kill-signal
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
tempFd, err := unix.Open(p.outPath, pipeDuplexOpenWriterFlags, pipeDuplexModeFlags)
|
||||
if err != nil {
|
||||
if errno, ok := err.(unix.Errno); ok {
|
||||
// misc error, do not handle
|
||||
if !(errno == unix.ENOENT || errno == unix.ENXIO) {
|
||||
return err
|
||||
}
|
||||
|
||||
// try again if waiting for FIFO creation or reader-end opening
|
||||
time.Sleep(pipeDuplexPollInterval)
|
||||
continue
|
||||
} else {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
}
|
||||
fd = tempFd
|
||||
defer unix.Close(fd)
|
||||
|
||||
// ensure the file exists is FIFO
|
||||
mode, err := lib.FstatGetMode(fd)
|
||||
if err != nil {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
if mode&fs.ModeNamedPipe == 0 {
|
||||
return ErrExistingFileNotFifo
|
||||
}
|
||||
|
||||
break // continue logic
|
||||
}
|
||||
|
||||
// read bytes from rawOut & write them to pipe
|
||||
for {
|
||||
select {
|
||||
case buf, ok := <-p.rawOut:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := p.writeData(fd, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PipeDuplex) writeData(fd int, buf []byte) error {
|
||||
// COBS-encode the data & append NULL-byte to signify end-of-frame
|
||||
buf, err := cobs.Encode(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(buf, 0x00)
|
||||
total := len(buf)
|
||||
sent := 0
|
||||
|
||||
// begin transmission progress
|
||||
for sent < total {
|
||||
select { // check for kill-signal
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// write & progress on happy path
|
||||
written, err := unix.Write(fd, buf[sent:])
|
||||
if err == nil {
|
||||
sent += written
|
||||
continue
|
||||
}
|
||||
|
||||
// cast to OS error for propper handling
|
||||
errno, ok := err.(unix.Errno)
|
||||
if !ok {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
|
||||
// non-blocking pipe is full, wait a bit and retry
|
||||
if errno == syscall.EAGAIN {
|
||||
time.Sleep(pipeDuplexPollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
// reader disconnected -> handle failure-recovery by doing:
|
||||
// 1. signal DISCARD_PREVIOUS to any reader
|
||||
// 2. re-setting the progress & trying again
|
||||
if errno == syscall.EPIPE {
|
||||
if err := p.writeSignal(fd, DiscardPrevious); err != nil {
|
||||
return err
|
||||
}
|
||||
sent = 0
|
||||
continue
|
||||
}
|
||||
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PipeDuplex) writeSignal(fd int, sig SignalMessage) error {
|
||||
signalMessageLength := 2
|
||||
|
||||
// Turn signal-byte into message by terminating with NULL-byte
|
||||
buf := []byte{byte(sig), 0x00}
|
||||
lib.Assert(len(buf) == signalMessageLength, "this must never NOT be the case")
|
||||
|
||||
// attempt to write until successful
|
||||
for {
|
||||
select { // check for kill-signal
|
||||
case <-p.ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// small writes (e.g. 2 bytes) should be atomic as per Pipe semantics,
|
||||
// meaning IF SUCCESSFUL: the number of bytes written MUST be exactly 2
|
||||
written, err := unix.Write(fd, buf)
|
||||
if err == nil {
|
||||
lib.Assert(written == signalMessageLength, "this must never NOT be the case")
|
||||
break
|
||||
}
|
||||
|
||||
// cast to OS error for propper handling
|
||||
errno, ok := err.(unix.Errno)
|
||||
if !ok {
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
|
||||
// wait a bit and retry if:
|
||||
// - non-blocking pipe is full
|
||||
// - the pipe is broken because of reader disconnection
|
||||
if errno == syscall.EAGAIN || errno == syscall.EPIPE {
|
||||
time.Sleep(pipeDuplexPollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
return err // misc error, do not handle
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
//go:build unix
|
||||
|
||||
package ipc
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOneTwoThree(t *testing.T) {
|
||||
// Avoid SIGPIPE killing the test if a writer outlives its reader.
|
||||
// signal.Ignore(syscall.SIGPIPE) TODO: shoudn't sigpipe be handled by the error-code deep inside the duplex??
|
||||
|
||||
// Clean slate before/after.
|
||||
onePath := "/tmp/one.pipe"
|
||||
twoPath := "/tmp/two.pipe"
|
||||
_ = os.Remove(onePath)
|
||||
_ = os.Remove(twoPath)
|
||||
defer os.Remove(onePath)
|
||||
defer os.Remove(twoPath)
|
||||
|
||||
owner, err := NewPipeDuplex(
|
||||
onePath, // in
|
||||
twoPath, // out
|
||||
func(m []byte) error { log.Printf("wow, owner got: [%v]%v", len(m), m); return nil },
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("owner New failed: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
guest1, err := NewPipeDuplex(
|
||||
twoPath, // in
|
||||
onePath, // out
|
||||
func(m []byte) error { log.Printf("wow, guest1 got: [%v]%v", len(m), m); return nil },
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("guest1 New failed: %v", err)
|
||||
}
|
||||
|
||||
if err := owner.SendMessage(make([]byte, 10)); err != nil {
|
||||
t.Fatalf("owner SendMessage failed: %v", err)
|
||||
}
|
||||
|
||||
// batch send
|
||||
if err := guest1.SendMessage(make([]byte, 200)); err != nil {
|
||||
t.Fatalf("guest1 SendMessage failed: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
if err := guest1.Close(); err != nil {
|
||||
t.Fatalf("guest1 Close failed: %v", err)
|
||||
}
|
||||
|
||||
if err := owner.SendMessage(make([]byte, 21)); err != nil {
|
||||
t.Fatalf("owner SendMessage failed: %v", err)
|
||||
}
|
||||
|
||||
guest2, err := NewPipeDuplex(
|
||||
twoPath, // in
|
||||
onePath, // out
|
||||
func(m []byte) error { log.Printf("wow, guest2 got: [%v]%v", len(m), m); return nil },
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("guest2 New failed: %v", err)
|
||||
}
|
||||
|
||||
if err := guest2.SendMessage(make([]byte, 12)); err != nil {
|
||||
t.Fatalf("guest2 SendMessage failed: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
if err := guest2.Close(); err != nil {
|
||||
t.Fatalf("guest2 Close failed: %v", err)
|
||||
}
|
||||
if err := owner.Close(); err != nil {
|
||||
t.Fatalf("owner Close failed: %v", err)
|
||||
}
|
||||
t.Fail()
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package dm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Host host.Host
|
||||
Protocol protocol.ID
|
||||
MessageHandler MessageHandler
|
||||
Logger *logging.ZapEventLogger
|
||||
}
|
||||
|
||||
type Option func(c *Config) error // TODO: add more options ??
|
||||
|
||||
func WithHandler(h MessageHandler) Option {
|
||||
return func(c *Config) error {
|
||||
c.MessageHandler = h
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func WithHandlerFunction(onMessage func(ctx context.Context, from peer.ID, msg []byte) error) Option {
|
||||
return func(c *Config) error {
|
||||
c.MessageHandler = &MessageHandlerBundle{OnMessageF: onMessage}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func WithLogger(l *logging.ZapEventLogger) Option {
|
||||
return func(c *Config) error {
|
||||
c.Logger = l
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package dm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
ServiceName = "libp2p.ext.dm/v1"
|
||||
DmProtocol = protocol.ID("/dm/1.0.0")
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingHandler = errors.New("the message handler is missing")
|
||||
)
|
||||
|
||||
type MessageHandler interface {
|
||||
OnMessage(ctx context.Context, from peer.ID, msg []byte) error
|
||||
}
|
||||
|
||||
type MessageHandlerBundle struct {
|
||||
OnMessageF func(ctx context.Context, from peer.ID, msg []byte) error
|
||||
}
|
||||
|
||||
func (m *MessageHandlerBundle) OnMessage(ctx context.Context, from peer.ID, msg []byte) error {
|
||||
return m.OnMessageF(ctx, from, msg)
|
||||
}
|
||||
|
||||
type DirectMessenger interface {
|
||||
Send(to peer.ID, msg []byte) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
func NewDirectMessenger(h host.Host, opts ...Option) (DirectMessenger, error) {
|
||||
cfg := &Config{
|
||||
Host: h,
|
||||
Protocol: DmProtocol,
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
// apply all configs
|
||||
for _, o := range opts {
|
||||
if err := o(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if cfg.MessageHandler == nil {
|
||||
return nil, ErrMissingHandler
|
||||
}
|
||||
|
||||
// create DM from config
|
||||
return newDirectMessenger(cfg)
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package dm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
)
|
||||
|
||||
func genPriv(t *testing.T, seed [32]byte) crypto.PrivKey {
|
||||
priv, _, err := crypto.GenerateEd25519Key(bytes.NewReader(seed[:]))
|
||||
if err != nil {
|
||||
t.Fatalf("failed generating key from seed %v: %v", seed, err)
|
||||
}
|
||||
return priv
|
||||
}
|
||||
|
||||
func createTestHost(t *testing.T, name string, opts ...Option) (host.Host, DirectMessenger) {
|
||||
// generate key
|
||||
seed := sha256.Sum256([]byte(name))
|
||||
id := genPriv(t, seed)
|
||||
|
||||
// create host
|
||||
h, err := libp2p.New(
|
||||
libp2p.Identity(id),
|
||||
libp2p.Transport(libp2pquic.NewTransport),
|
||||
libp2p.ListenAddrStrings(
|
||||
"/ip4/0.0.0.0/udp/0/quic-v1",
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating test host '%v': %v", name, err)
|
||||
}
|
||||
|
||||
// configure direct messaging
|
||||
dmOpts := []Option{WithHandler(&MessageHandlerBundle{
|
||||
OnMessageF: func(ctx context.Context, from peer.ID, msg []byte) error {
|
||||
log.Printf("[%v]<-[%v]: [%v]%v", name, from, len(msg), msg)
|
||||
return nil
|
||||
},
|
||||
})}
|
||||
dmOpts = append(dmOpts, opts...)
|
||||
dm, err := NewDirectMessenger(h, dmOpts...)
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating test DM manager for host '%v': %v", name, err)
|
||||
}
|
||||
|
||||
return h, dm
|
||||
}
|
||||
|
||||
func createConnection(t *testing.T, p1, p2 host.Host) {
|
||||
ctx := context.Background()
|
||||
if err := p1.Connect(ctx, p2.Peerstore().PeerInfo(p2.ID())); err != nil {
|
||||
t.Fatalf("failed connecting '%v' to '%v': %v", p1.ID(), p2.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonEncoder(t *testing.T) {
|
||||
peer1, dm1 := createTestHost(t, "peer 1")
|
||||
defer dm1.Close()
|
||||
defer peer1.Close()
|
||||
|
||||
peer2, dm2 := createTestHost(t, "peer 2")
|
||||
defer dm2.Close()
|
||||
defer peer2.Close()
|
||||
|
||||
createConnection(t, peer1, peer2)
|
||||
|
||||
if err := dm1.Send(peer2.ID(), make([]byte, 10)); err != nil {
|
||||
t.Fatalf("dm1 Send failed: %v", err)
|
||||
}
|
||||
|
||||
// big send
|
||||
if err := dm2.Send(peer1.ID(), make([]byte, 10_000)); err != nil {
|
||||
t.Fatalf("dm2 Send failed: %v", err)
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
t.Fail()
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
package dm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"lib"
|
||||
"sync"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
uint64NumBytes = 8
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logging.Logger(ServiceName)
|
||||
)
|
||||
|
||||
type directMessenger struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
h host.Host
|
||||
pid protocol.ID
|
||||
handler MessageHandler
|
||||
log *logging.ZapEventLogger
|
||||
|
||||
scope network.ResourceScopeSpan
|
||||
notifiee network.Notifiee
|
||||
|
||||
mx sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newDirectMessenger(cfg *Config) (*directMessenger, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
dm := &directMessenger{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
h: cfg.Host,
|
||||
pid: cfg.Protocol,
|
||||
handler: cfg.MessageHandler,
|
||||
log: cfg.Logger,
|
||||
}
|
||||
|
||||
// get a scope for memory reservations at service level
|
||||
err := dm.h.Network().ResourceManager().ViewService(ServiceName,
|
||||
func(s network.ServiceScope) error {
|
||||
var err error
|
||||
dm.scope, err = s.BeginSpan()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dm.h.SetStreamHandler(dm.pid, dm.handleStream)
|
||||
dm.notifiee = &network.NotifyBundle{} // TODO: add handler funcions in the future if so needed??
|
||||
dm.h.Network().Notify(dm.notifiee)
|
||||
|
||||
return dm, nil
|
||||
}
|
||||
|
||||
func (dm *directMessenger) Close() error {
|
||||
dm.mx.Lock()
|
||||
if !dm.closed {
|
||||
dm.closed = true
|
||||
dm.mx.Unlock()
|
||||
|
||||
dm.h.RemoveStreamHandler(proto.ProtoIDv2Hop)
|
||||
dm.h.Network().StopNotify(dm.notifiee)
|
||||
defer dm.scope.Done()
|
||||
dm.cancel()
|
||||
return nil
|
||||
}
|
||||
dm.mx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dm *directMessenger) Send(p peer.ID, msg []byte) error {
|
||||
dm.log.Infof("outgoing DM stream to: %s", p)
|
||||
|
||||
// create new stream
|
||||
s, err := dm.h.NewStream(dm.ctx, p, dm.pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// grab length if byte-buffer and encode it as big-endian
|
||||
mLen := len(msg)
|
||||
buf := make([]byte, uint64NumBytes, uint64NumBytes+mLen) // allocate enough capacity
|
||||
binary.BigEndian.PutUint64(buf, uint64(mLen))
|
||||
buf = append(buf, msg...)
|
||||
lib.Assert(len(buf) == uint64NumBytes+mLen, "literally what????")
|
||||
|
||||
// write to stream & handle any potential errors
|
||||
if _, err := s.Write(buf); err != nil {
|
||||
dm.log.Debugf("error writing message to DM service stream: %s", err)
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
_ = s.CloseWrite() // signal EOF to caller if half-close is supported
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dm *directMessenger) handleStream(s network.Stream) {
|
||||
dm.log.Infof("incoming DM stream from: %s", s.Conn().RemotePeer())
|
||||
|
||||
defer s.Close()
|
||||
|
||||
// attach scope to this service (for scoped capacity allocation reasons)
|
||||
if err := s.Scope().SetService(ServiceName); err != nil {
|
||||
dm.log.Debugf("error attaching stream to DM service: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// read big-endian length bytes & decode
|
||||
buf := make([]byte, uint64NumBytes)
|
||||
if _, err := io.ReadFull(s, buf); err != nil {
|
||||
dm.log.Debugf("error reading message length from DM service stream: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
mLen := binary.BigEndian.Uint64(buf)
|
||||
|
||||
// read rest of message & call OnMessage callback
|
||||
buf = make([]byte, mLen)
|
||||
if _, err := io.ReadFull(s, buf); err != nil {
|
||||
dm.log.Debugf("error reading message body from DM service stream: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if err := dm.handler.OnMessage(dm.ctx, s.Conn().RemotePeer(), buf); err != nil {
|
||||
dm.log.Debugf("error handling incoming message from DM service stream: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
_ = s.CloseWrite() // signal EOF to caller if half-close is supported
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Assert(b bool, msg string) {
|
||||
if !b {
|
||||
log.Panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func FstatGetMode(fd int) (os.FileMode, error) {
|
||||
// perform fstat syscall
|
||||
var sys unix.Stat_t = unix.Stat_t{}
|
||||
if err := unix.Fstat(fd, &sys); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// reconstruct FileMode from sys-struct; SEE: https://github.com/golang/go/blob/5a56d8848b4ffb79c5ccc11ec6fa01823a91aaf8/src/os/stat_linux.go#L17
|
||||
mode := os.FileMode(sys.Mode & 0777)
|
||||
switch sys.Mode & syscall.S_IFMT {
|
||||
case syscall.S_IFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case syscall.S_IFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case syscall.S_IFDIR:
|
||||
mode |= os.ModeDir
|
||||
case syscall.S_IFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case syscall.S_IFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case syscall.S_IFREG:
|
||||
// nothing to do
|
||||
case syscall.S_IFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if sys.Mode&syscall.S_ISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if sys.Mode&syscall.S_ISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if sys.Mode&syscall.S_ISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode, nil
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
forwarder "forwarder/src"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var nodeID = flag.String("node-id", "", "Node ID (defaults to FORWARDER_NODE_ID env var or a new UUID)")
|
||||
var eventsDBPath = flag.String("events-db", "", "Path to the worker events SQLite database")
|
||||
|
||||
var SourceHash = "dev"
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
log.Printf("SourceHash: %s\n", SourceHash)
|
||||
|
||||
os.Setenv("SOURCE_HASH", SourceHash)
|
||||
|
||||
id := *nodeID
|
||||
if id != "" {
|
||||
forwarder.SetNodeId(id)
|
||||
} else {
|
||||
id = forwarder.GetNodeId()
|
||||
}
|
||||
log.Printf("Starting forwarder with node ID: %s", id)
|
||||
|
||||
// Set the events database path if provided
|
||||
if *eventsDBPath != "" {
|
||||
forwarder.SetEventsDBPath(*eventsDBPath)
|
||||
log.Printf("Using events database: %s", *eventsDBPath)
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
log.Fatal("forwarding pairs argument is required as the first positional argument (of the form {source}|{sink}) where source and sink sqlite:db_file:table_name or libp2p:topic")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
forwardingPairs := args[0]
|
||||
connections, err := forwarder.ParseForwardingPairs(forwardingPairs, ctx, cancel)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse forwarding pairs: %v", err)
|
||||
}
|
||||
for _, conn := range connections {
|
||||
log.Printf("Forwarding Pair %v", conn)
|
||||
}
|
||||
|
||||
for _, conn := range connections {
|
||||
fwd, err := forwarder.NewForwarder(conn)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create forwarder: %v", err)
|
||||
}
|
||||
fwd.Start(ctx)
|
||||
}
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sig
|
||||
cancel()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("Forwarder is shutting down...")
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ParseForwardingPairs(pairsStr string, ctx context.Context, cancel context.CancelFunc) ([]ForwardingPair, error) {
|
||||
if pairsStr == "" {
|
||||
return nil, fmt.Errorf("forwarding pairs string is empty")
|
||||
}
|
||||
|
||||
pairStrs := strings.Split(pairsStr, ",")
|
||||
var connections []ForwardingPair
|
||||
|
||||
for _, pairStr := range pairStrs {
|
||||
pairStr = strings.TrimSpace(pairStr)
|
||||
if pairStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(pairStr, "|")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid forwarding pair format: %s", pairStr)
|
||||
}
|
||||
|
||||
sourceStr := strings.TrimSpace(parts[0])
|
||||
sinkStr := strings.TrimSpace(parts[1])
|
||||
|
||||
sourceType := strings.Split(sourceStr, ":")[0]
|
||||
sinkType := strings.Split(sinkStr, ":")[0]
|
||||
if sinkType == sourceType {
|
||||
return nil, fmt.Errorf("source and sink types cannot be the same: %s", pairStr)
|
||||
}
|
||||
|
||||
sourceConn, err := parseEndpoint(sourceStr, ctx, cancel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid source endpoint '%s': %w", sourceStr, err)
|
||||
}
|
||||
|
||||
sinkConn, err := parseEndpoint(sinkStr, ctx, cancel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid sink endpoint '%s': %w", sinkStr, err)
|
||||
}
|
||||
|
||||
conn := ForwardingPair{
|
||||
source: sourceConn,
|
||||
sink: sinkConn,
|
||||
}
|
||||
connections = append(connections, conn)
|
||||
}
|
||||
tables := make(map[string]bool)
|
||||
for _, conn := range connections {
|
||||
if conn.sink.getType() == "sqlite" {
|
||||
tableName := conn.sink.(*sqliteConnector).tableName
|
||||
if _, ok := tables[tableName]; ok {
|
||||
return nil, fmt.Errorf("sink table '%s' already used in another connection", tableName)
|
||||
}
|
||||
tables[tableName] = true
|
||||
}
|
||||
}
|
||||
|
||||
return connections, nil
|
||||
}
|
||||
|
||||
func parseEndpoint(endpointStr string, ctx context.Context, cancel context.CancelFunc) (connection, error) {
|
||||
parts := strings.SplitN(endpointStr, ":", 2)
|
||||
if len(parts) < 2 || parts[1] == "" {
|
||||
return nil, fmt.Errorf("invalid endpoint format: %s", endpointStr)
|
||||
}
|
||||
|
||||
endpointType := parts[0]
|
||||
endpointArgsStr := parts[1]
|
||||
|
||||
switch endpointType {
|
||||
case "sqlite":
|
||||
args := strings.SplitN(endpointArgsStr, ":", 2)
|
||||
if len(args) != 2 || args[0] == "" || args[1] == "" {
|
||||
return nil, fmt.Errorf("invalid sqlite endpoint format: %s. Expected 'sqlite:db_file:table'", endpointStr)
|
||||
}
|
||||
return newSQLiteConnector(args[0], args[1])
|
||||
case "libp2p":
|
||||
if strings.Contains(endpointArgsStr, ":") {
|
||||
return nil, fmt.Errorf("invalid libp2p topic format: %s. Topic should not contain ':'", endpointStr)
|
||||
}
|
||||
return newLibP2PConnector(endpointArgsStr, ctx, cancel), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown endpoint type: %s", endpointType)
|
||||
}
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var (
|
||||
eventsDBPath string
|
||||
eventsDB *sql.DB
|
||||
eventsDBMu sync.Mutex
|
||||
)
|
||||
|
||||
func SetEventsDBPath(path string) {
|
||||
eventsDBMu.Lock()
|
||||
defer eventsDBMu.Unlock()
|
||||
eventsDBPath = path
|
||||
}
|
||||
|
||||
const (
|
||||
EventTypeTopologyEdgeCreated = "TopologyEdgeCreated"
|
||||
EventTypeTopologyEdgeDeleted = "TopologyEdgeDeleted"
|
||||
)
|
||||
|
||||
type ConnectionProfile struct {
|
||||
Throughput float64 `json:"throughput"`
|
||||
Latency float64 `json:"latency"`
|
||||
Jitter float64 `json:"jitter"`
|
||||
}
|
||||
|
||||
type Multiaddr struct {
|
||||
Address string `json:"address"`
|
||||
IPv4Address string `json:"ipv4_address,omitempty"`
|
||||
IPv6Address string `json:"ipv6_address,omitempty"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Transport string `json:"transport,omitempty"` // tcp/quic/ws/etc
|
||||
}
|
||||
|
||||
type Connection struct {
|
||||
LocalNodeID string `json:"local_node_id"`
|
||||
SendBackNodeID string `json:"send_back_node_id"`
|
||||
LocalMultiaddr Multiaddr `json:"local_multiaddr"`
|
||||
SendBackMultiaddr Multiaddr `json:"send_back_multiaddr"`
|
||||
ConnectionProfile *ConnectionProfile `json:"connection_profile"`
|
||||
}
|
||||
|
||||
type TopologyEdgeCreated struct {
|
||||
EventType string `json:"event_type"`
|
||||
EventID string `json:"event_id"`
|
||||
Edge Connection `json:"edge"`
|
||||
}
|
||||
|
||||
type TopologyEdgeDeleted struct {
|
||||
EventType string `json:"event_type"`
|
||||
EventID string `json:"event_id"`
|
||||
Edge Connection `json:"edge"`
|
||||
}
|
||||
|
||||
func initEventsDB() error {
|
||||
eventsDBMu.Lock()
|
||||
defer eventsDBMu.Unlock()
|
||||
if eventsDB != nil {
|
||||
return nil
|
||||
}
|
||||
if eventsDBPath == "" {
|
||||
return nil
|
||||
}
|
||||
db, err := sql.Open("sqlite3", eventsDBPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open events database: %w", err)
|
||||
}
|
||||
eventsDB = db
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS events (
|
||||
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
origin TEXT NOT NULL,
|
||||
event_type TEXT NOT NULL,
|
||||
event_id TEXT NOT NULL,
|
||||
event_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_events_origin ON events(origin);
|
||||
CREATE INDEX IF NOT EXISTS idx_events_event_type ON events(event_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_events_created_at ON events(created_at);
|
||||
`
|
||||
if _, err := eventsDB.Exec(schema); err != nil {
|
||||
eventsDB.Close()
|
||||
eventsDB = nil
|
||||
return fmt.Errorf("failed to create events table: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeEvent(eventType string, eventData interface{}) error {
|
||||
if eventsDB == nil {
|
||||
if err := initEventsDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
if eventsDB == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
jsonData, err := json.Marshal(eventData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal event data: %w", err)
|
||||
}
|
||||
var eventID string
|
||||
switch e := eventData.(type) {
|
||||
case *TopologyEdgeCreated:
|
||||
eventID = e.EventID
|
||||
case *TopologyEdgeDeleted:
|
||||
eventID = e.EventID
|
||||
default:
|
||||
eventID = uuid.New().String()
|
||||
}
|
||||
const insert = `INSERT INTO events (origin, event_type, event_id, event_data) VALUES (?, ?, ?, ?)`
|
||||
_, err = eventsDB.Exec(insert, GetNodeId(), eventType, eventID, string(jsonData))
|
||||
return err
|
||||
}
|
||||
|
||||
var WriteEdgeCreatedEvent = func(localNodeID, remoteNodeID, localIP, remoteIP, proto string) {
|
||||
event := &TopologyEdgeCreated{
|
||||
EventType: EventTypeTopologyEdgeCreated,
|
||||
EventID: uuid.New().String(),
|
||||
Edge: Connection{
|
||||
LocalNodeID: localNodeID,
|
||||
SendBackNodeID: remoteNodeID,
|
||||
LocalMultiaddr: Multiaddr{
|
||||
Address: fmt.Sprintf("/ip4/%s/tcp/7847", localIP),
|
||||
IPv4Address: localIP,
|
||||
Port: 7847,
|
||||
Transport: proto,
|
||||
},
|
||||
SendBackMultiaddr: Multiaddr{
|
||||
Address: fmt.Sprintf("/ip4/%s/tcp/7847", remoteIP),
|
||||
IPv4Address: remoteIP,
|
||||
Port: 7847,
|
||||
Transport: proto,
|
||||
},
|
||||
ConnectionProfile: nil,
|
||||
},
|
||||
}
|
||||
if err := writeEvent(EventTypeTopologyEdgeCreated, event); err != nil {
|
||||
log.Printf("Failed to write edge created event: %v", err)
|
||||
} else {
|
||||
log.Printf("Wrote TCP edge created event: %s -> %s (%s:%s)", localNodeID, remoteNodeID, remoteIP, proto)
|
||||
}
|
||||
}
|
||||
|
||||
var WriteEdgeDeletedEvent = func(localNodeID, remoteNodeID, localIP, remoteIP, proto string) {
|
||||
event := &TopologyEdgeDeleted{
|
||||
EventType: EventTypeTopologyEdgeDeleted,
|
||||
EventID: uuid.New().String(),
|
||||
Edge: Connection{
|
||||
LocalNodeID: localNodeID,
|
||||
SendBackNodeID: remoteNodeID,
|
||||
LocalMultiaddr: Multiaddr{
|
||||
Address: fmt.Sprintf("/ip4/%s/tcp/7847", localIP),
|
||||
IPv4Address: localIP,
|
||||
Port: 7847,
|
||||
Transport: proto,
|
||||
},
|
||||
SendBackMultiaddr: Multiaddr{
|
||||
Address: fmt.Sprintf("/ip4/%s/tcp/7847", remoteIP),
|
||||
IPv4Address: remoteIP,
|
||||
Port: 7847,
|
||||
Transport: proto,
|
||||
},
|
||||
ConnectionProfile: nil,
|
||||
},
|
||||
}
|
||||
if err := writeEvent(EventTypeTopologyEdgeDeleted, event); err != nil {
|
||||
log.Printf("Failed to write edge deleted event: %v", err)
|
||||
} else {
|
||||
log.Printf("Wrote TCP edge deleted event: %s -> %s (%s:%s)", localNodeID, remoteNodeID, remoteIP, proto)
|
||||
}
|
||||
}
|
||||
|
||||
type NotifeeHandler struct{}
|
||||
|
||||
func (n *NotifeeHandler) Listen(net network.Network, ma multiaddr.Multiaddr) {}
|
||||
func (n *NotifeeHandler) ListenClose(net network.Network, ma multiaddr.Multiaddr) {}
|
||||
func (n *NotifeeHandler) Connected(netw network.Network, conn network.Conn) {
|
||||
pid := conn.RemotePeer()
|
||||
rawR := conn.RemoteMultiaddr()
|
||||
|
||||
if node != nil && node.ConnManager() != nil {
|
||||
node.ConnManager().Protect(pid, "multipath-"+hostTransportKey(rawR))
|
||||
}
|
||||
|
||||
if ipStr, err := rawR.ValueForProtocol(multiaddr.P_IP4); err == nil && ipStr != "" {
|
||||
if ip := net.ParseIP(ipStr); ip != nil {
|
||||
GetTCPAgent().UpdateDiscoveredIPs(pid, []net.IP{ip})
|
||||
}
|
||||
}
|
||||
}
|
||||
func (n *NotifeeHandler) Disconnected(net network.Network, conn network.Conn) {
|
||||
pid := conn.RemotePeer()
|
||||
rawR := conn.RemoteMultiaddr()
|
||||
|
||||
if node != nil && node.ConnManager() != nil {
|
||||
tag := "multipath-" + hostTransportKey(rawR)
|
||||
node.ConnManager().Unprotect(pid, tag)
|
||||
}
|
||||
}
|
||||
func (n *NotifeeHandler) OpenedStream(net network.Network, str network.Stream) {}
|
||||
func (n *NotifeeHandler) ClosedStream(net network.Network, str network.Stream) {}
|
||||
|
||||
func GetNotifee() network.Notifiee { return &NotifeeHandler{} }
|
||||
@@ -1,133 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
type libP2PToSqliteForwarder struct {
|
||||
source LibP2PConnection
|
||||
sink SQLiteConnection
|
||||
recordStore stateStoreInterface
|
||||
}
|
||||
|
||||
func newLibP2PToSqliteForwarder(source LibP2PConnection, sink SQLiteConnection) (*libP2PToSqliteForwarder, error) {
|
||||
latestRowIds, err := sink.getLatestRowIds()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get latest row IDs: %w", err)
|
||||
}
|
||||
return &libP2PToSqliteForwarder{
|
||||
source: source,
|
||||
sink: sink,
|
||||
recordStore: newStateStore(latestRowIds),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *libP2PToSqliteForwarder) Start(ctx context.Context) error {
|
||||
f.source.tail(func(record RecordData) error {
|
||||
f.recordStore.onRecord(record)
|
||||
return nil
|
||||
})
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
msgs := f.recordStore.getWriteableMessages()
|
||||
for _, msg := range msgs {
|
||||
if err := f.sink.write(msg); err != nil {
|
||||
log.Printf("Error writing to sink: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Resend handler with less frequent checks
|
||||
go func() {
|
||||
ticker := time.NewTicker(500 * time.Millisecond) // Less frequent than before
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
reqs := f.recordStore.getResendRequests()
|
||||
for _, req := range reqs {
|
||||
if err := f.source.writeResend(req); err != nil {
|
||||
log.Printf("Error writing resend request: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type sqliteToLibP2PForwarder struct {
|
||||
source SQLiteConnection
|
||||
sink LibP2PConnection
|
||||
}
|
||||
|
||||
func newSqliteToLibP2PForwarder(source SQLiteConnection, sink LibP2PConnection) (*sqliteToLibP2PForwarder, error) {
|
||||
return &sqliteToLibP2PForwarder{
|
||||
source: source,
|
||||
sink: sink,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *sqliteToLibP2PForwarder) Start(ctx context.Context) error {
|
||||
// Handle resend requests
|
||||
f.sink.tailResend(func(req ResendRequest) error {
|
||||
if req.SourceNodeID != f.source.getNodeId() {
|
||||
return nil
|
||||
}
|
||||
if req.SourcePath != f.source.getTablePath() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process resends in a separate goroutine to not block
|
||||
go func() {
|
||||
for _, gap := range req.Gaps {
|
||||
records, err := f.source.readRange(gap.Start, gap.End)
|
||||
if err != nil {
|
||||
log.Printf("Error getting records for resend: %v", err)
|
||||
continue
|
||||
}
|
||||
// Send resend records - libp2p connector will handle batching
|
||||
for _, rec := range records {
|
||||
if err := f.sink.write(rec); err != nil {
|
||||
log.Printf("Error writing resend record: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
|
||||
// Tail new records - libp2p connector handles async batching internally
|
||||
f.source.tail(func(record RecordData) error {
|
||||
if err := f.sink.write(record); err != nil {
|
||||
log.Printf("Error writing record: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewForwarder(forwardingPair ForwardingPair) (Forwarder, error) {
|
||||
if forwardingPair.source.getType() == "libp2p" && forwardingPair.sink.getType() == "sqlite" {
|
||||
return newLibP2PToSqliteForwarder(forwardingPair.source.(*libP2PConnector), forwardingPair.sink.(*sqliteConnector))
|
||||
} else if forwardingPair.source.getType() == "sqlite" && forwardingPair.sink.getType() == "libp2p" {
|
||||
return newSqliteToLibP2PForwarder(forwardingPair.source.(*sqliteConnector), forwardingPair.sink.(*libP2PConnector))
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported forwarding pair: %v", forwardingPair)
|
||||
}
|
||||
@@ -1,474 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mockLibP2PConnector struct {
|
||||
tailHandler func(RecordData) error
|
||||
tailResendHandler func(ResendRequest) error
|
||||
writtenRecords []RecordData
|
||||
writeErr error
|
||||
resendRequests []ResendRequest
|
||||
writeResendErr error
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) tail(handler func(record RecordData) error) {
|
||||
m.tailHandler = handler
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) tailResend(handler func(req ResendRequest) error) {
|
||||
m.tailResendHandler = handler
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) write(record RecordData) error {
|
||||
m.writtenRecords = append(m.writtenRecords, record)
|
||||
return m.writeErr
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) writeResend(req ResendRequest) error {
|
||||
m.resendRequests = append(m.resendRequests, req)
|
||||
return m.writeResendErr
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) getType() string {
|
||||
return "libp2p"
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) SendRecord(record RecordData) error {
|
||||
if m.tailHandler == nil {
|
||||
return fmt.Errorf("no tail handler registered")
|
||||
}
|
||||
return m.tailHandler(record)
|
||||
}
|
||||
|
||||
func (m *mockLibP2PConnector) SendResend(req ResendRequest) error {
|
||||
if m.tailResendHandler == nil {
|
||||
return fmt.Errorf("no tailResend handler registered")
|
||||
}
|
||||
return m.tailResendHandler(req)
|
||||
}
|
||||
|
||||
type mockSqliteConnector struct {
|
||||
getLatestRowIdsRet map[SourceKey]int64
|
||||
getLatestRowIdsErr error
|
||||
writtenRecords []RecordData
|
||||
writeErr error
|
||||
readRangeCalls []struct{ start, end int64 }
|
||||
readRangeRet []RecordData
|
||||
readRangeErr error
|
||||
nodeId string
|
||||
tablePath string
|
||||
tailHandler func(RecordData) error
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) getLatestRowIds() (map[SourceKey]int64, error) {
|
||||
return m.getLatestRowIdsRet, m.getLatestRowIdsErr
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) write(record RecordData) error {
|
||||
m.writtenRecords = append(m.writtenRecords, record)
|
||||
return m.writeErr
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) readRange(start, end int64) ([]RecordData, error) {
|
||||
m.readRangeCalls = append(m.readRangeCalls, struct{ start, end int64 }{start, end})
|
||||
return m.readRangeRet, m.readRangeErr
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) tail(handler func(record RecordData) error) {
|
||||
m.tailHandler = handler
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) getType() string {
|
||||
return "sqlite"
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) SendRecord(record RecordData) error {
|
||||
if m.tailHandler == nil {
|
||||
return fmt.Errorf("no tail handler registered")
|
||||
}
|
||||
return m.tailHandler(record)
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) getNodeId() string {
|
||||
return m.nodeId
|
||||
}
|
||||
|
||||
func (m *mockSqliteConnector) getTablePath() string {
|
||||
return m.tablePath
|
||||
}
|
||||
|
||||
func TestNewLibP2PToSqliteForwarder(t *testing.T) {
|
||||
source := &mockLibP2PConnector{}
|
||||
sink := &mockSqliteConnector{
|
||||
getLatestRowIdsRet: map[SourceKey]int64{},
|
||||
}
|
||||
f, err := newLibP2PToSqliteForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if f == nil {
|
||||
t.Fatal("expected non-nil forwarder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2PToSqliteForwarder_Start_InOrderRecords(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockLibP2PConnector{}
|
||||
sink := &mockSqliteConnector{
|
||||
getLatestRowIdsRet: map[SourceKey]int64{},
|
||||
}
|
||||
|
||||
f, err := newLibP2PToSqliteForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
key := SourceKey{SourceNodeId: "node1", SourcePath: "path1"}
|
||||
|
||||
rec1 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 1}}
|
||||
source.SendRecord(rec1)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected 1 written record, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
if !reflect.DeepEqual(sink.writtenRecords[0], rec1) {
|
||||
t.Fatal("written record mismatch")
|
||||
}
|
||||
|
||||
rec2 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 2}}
|
||||
source.SendRecord(rec2)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 2 {
|
||||
t.Fatalf("expected 2 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
if !reflect.DeepEqual(sink.writtenRecords[1], rec2) {
|
||||
t.Fatal("written record mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2PToSqliteForwarder_Start_OutOfOrderRecords(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockLibP2PConnector{}
|
||||
sink := &mockSqliteConnector{
|
||||
getLatestRowIdsRet: map[SourceKey]int64{},
|
||||
}
|
||||
|
||||
f, err := newLibP2PToSqliteForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
key := SourceKey{SourceNodeId: "node1", SourcePath: "path1"}
|
||||
|
||||
rec1 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 1}}
|
||||
source.SendRecord(rec1)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected 1 written record, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
|
||||
rec3 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 3}}
|
||||
source.SendRecord(rec3)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected still 1 written record, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
|
||||
time.Sleep(5500 * time.Millisecond) // Wait for resend ticker
|
||||
|
||||
if len(source.resendRequests) != 1 {
|
||||
t.Fatalf("expected 1 resend request, got %d", len(source.resendRequests))
|
||||
}
|
||||
|
||||
req := source.resendRequests[0]
|
||||
if req.SourceNodeID != "node1" || req.SourcePath != "path1" {
|
||||
t.Fatal("resend request mismatch")
|
||||
}
|
||||
if len(req.Gaps) != 1 || req.Gaps[0].Start != 2 || req.Gaps[0].End != 2 {
|
||||
t.Fatal("gap mismatch")
|
||||
}
|
||||
|
||||
rec2 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 2}}
|
||||
source.SendRecord(rec2)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 3 {
|
||||
t.Fatalf("expected 3 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
// Check order: rec1, rec2, rec3
|
||||
if !reflect.DeepEqual(sink.writtenRecords[1], rec2) || !reflect.DeepEqual(sink.writtenRecords[2], rec3) {
|
||||
t.Fatal("written records order mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2PToSqliteForwarder_Start_MultipleSources(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockLibP2PConnector{}
|
||||
sink := &mockSqliteConnector{
|
||||
getLatestRowIdsRet: map[SourceKey]int64{},
|
||||
}
|
||||
|
||||
f, err := newLibP2PToSqliteForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
key1 := SourceKey{SourceNodeId: "node1", SourcePath: "path1"}
|
||||
key2 := SourceKey{SourceNodeId: "node2", SourcePath: "path2"}
|
||||
|
||||
rec1_1 := RecordData{TrackingData: TrackingData{SourceKey: key1, SourceRowID: 1}}
|
||||
source.SendRecord(rec1_1)
|
||||
|
||||
rec2_1 := RecordData{TrackingData: TrackingData{SourceKey: key2, SourceRowID: 1}}
|
||||
source.SendRecord(rec2_1)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 2 {
|
||||
t.Fatalf("expected 2 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
|
||||
rec1_3 := RecordData{TrackingData: TrackingData{SourceKey: key1, SourceRowID: 3}}
|
||||
source.SendRecord(rec1_3)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 2 {
|
||||
t.Fatalf("expected still 2 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
|
||||
time.Sleep(5500 * time.Millisecond)
|
||||
|
||||
if len(source.resendRequests) != 1 {
|
||||
t.Fatalf("expected 1 resend request, got %d", len(source.resendRequests))
|
||||
}
|
||||
if source.resendRequests[0].SourceNodeID != "node1" {
|
||||
t.Fatal("resend for wrong source")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2PToSqliteForwarder_Start_WithInitialLatest(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
key := SourceKey{SourceNodeId: "node1", SourcePath: "path1"}
|
||||
|
||||
source := &mockLibP2PConnector{}
|
||||
sink := &mockSqliteConnector{
|
||||
getLatestRowIdsRet: map[SourceKey]int64{key: 5},
|
||||
}
|
||||
|
||||
f, err := newLibP2PToSqliteForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
rec6 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 6}}
|
||||
source.SendRecord(rec6)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected 1 written record, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
|
||||
rec7 := RecordData{TrackingData: TrackingData{SourceKey: key, SourceRowID: 7}}
|
||||
source.SendRecord(rec7)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 2 {
|
||||
t.Fatalf("expected 2 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSqliteToLibP2PForwarder(t *testing.T) {
|
||||
source := &mockSqliteConnector{}
|
||||
sink := &mockLibP2PConnector{}
|
||||
f, err := newSqliteToLibP2PForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if f == nil {
|
||||
t.Fatal("expected non-nil forwarder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqliteToLibP2PForwarder_Start_TailRecords(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockSqliteConnector{
|
||||
nodeId: "node1",
|
||||
tablePath: "path1",
|
||||
}
|
||||
sink := &mockLibP2PConnector{}
|
||||
|
||||
f, err := newSqliteToLibP2PForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
rec1 := RecordData{TrackingData: TrackingData{SourceRowID: 1}}
|
||||
source.SendRecord(rec1)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected 1 written record, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
if !reflect.DeepEqual(sink.writtenRecords[0], rec1) {
|
||||
t.Fatal("written record mismatch")
|
||||
}
|
||||
|
||||
rec2 := RecordData{TrackingData: TrackingData{SourceRowID: 2}}
|
||||
source.SendRecord(rec2)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(sink.writtenRecords) != 2 {
|
||||
t.Fatalf("expected 2 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
if !reflect.DeepEqual(sink.writtenRecords[1], rec2) {
|
||||
t.Fatal("written record mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqliteToLibP2PForwarder_Start_ResendRequest_Matching(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockSqliteConnector{
|
||||
nodeId: "node1",
|
||||
tablePath: "path1",
|
||||
readRangeRet: []RecordData{
|
||||
{TrackingData: TrackingData{SourceRowID: 5}},
|
||||
},
|
||||
}
|
||||
sink := &mockLibP2PConnector{}
|
||||
|
||||
f, err := newSqliteToLibP2PForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
req := ResendRequest{
|
||||
SourceNodeID: "node1",
|
||||
SourcePath: "path1",
|
||||
Gaps: []GapRange{{Start: 5, End: 6}},
|
||||
}
|
||||
sink.SendResend(req)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(source.readRangeCalls) != 1 {
|
||||
t.Fatalf("expected 1 readRange call, got %d", len(source.readRangeCalls))
|
||||
}
|
||||
if source.readRangeCalls[0].start != 5 || source.readRangeCalls[0].end != 6 {
|
||||
t.Fatal("readRange args mismatch")
|
||||
}
|
||||
|
||||
if len(sink.writtenRecords) != 1 {
|
||||
t.Fatalf("expected 1 written record from resend, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
if sink.writtenRecords[0].SourceRowID != 5 {
|
||||
t.Fatal("resend record mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqliteToLibP2PForwarder_Start_ResendRequest_NotMatching(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
source := &mockSqliteConnector{
|
||||
nodeId: "node1",
|
||||
tablePath: "path1",
|
||||
}
|
||||
sink := &mockLibP2PConnector{}
|
||||
|
||||
f, err := newSqliteToLibP2PForwarder(source, sink)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
err = f.Start(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
req := ResendRequest{
|
||||
SourceNodeID: "node2",
|
||||
SourcePath: "path2",
|
||||
Gaps: []GapRange{{Start: 5, End: 5}},
|
||||
}
|
||||
sink.SendResend(req)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(source.readRangeCalls) != 0 {
|
||||
t.Fatalf("expected 0 readRange calls, got %d", len(source.readRangeCalls))
|
||||
}
|
||||
|
||||
if len(sink.writtenRecords) != 0 {
|
||||
t.Fatalf("expected 0 written records, got %d", len(sink.writtenRecords))
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var (
|
||||
generatedNodeID string
|
||||
nodeIDOnce sync.Once
|
||||
)
|
||||
|
||||
func GetNodeId() string {
|
||||
if id := os.Getenv("FORWARDER_NODE_ID"); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
nodeIDOnce.Do(func() {
|
||||
generatedNodeID = uuid.New().String()
|
||||
})
|
||||
|
||||
return generatedNodeID
|
||||
}
|
||||
|
||||
func SetNodeId(id string) {
|
||||
os.Setenv("FORWARDER_NODE_ID", id)
|
||||
}
|
||||
@@ -1,819 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/pnet"
|
||||
mdns "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
||||
connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var node host.Host
|
||||
var ps *pubsub.PubSub
|
||||
var mdnsSer mdns.Service
|
||||
|
||||
var once sync.Once
|
||||
var mu sync.Mutex
|
||||
var refCount int
|
||||
var topicsMap = make(map[string]*pubsub.Topic)
|
||||
|
||||
type peerConnState struct {
|
||||
retryCount int
|
||||
lastAttempt time.Time
|
||||
}
|
||||
|
||||
type peerAddrKey struct {
|
||||
id peer.ID
|
||||
addr string // host+transport key (IP|transport)
|
||||
}
|
||||
|
||||
var (
|
||||
peerRetryState = make(map[peerAddrKey]*peerConnState)
|
||||
retryMu sync.Mutex
|
||||
|
||||
connecting = make(map[peerAddrKey]bool)
|
||||
connMu sync.Mutex
|
||||
|
||||
mdnsRestartMu sync.Mutex
|
||||
lastMdnsRestart time.Time
|
||||
restartPending bool
|
||||
minRestartSpacing = 2 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
connectTimeout = 25 * time.Second
|
||||
mdnsFastInterval = 1 * time.Second
|
||||
mdnsSlowInterval = 30 * time.Second
|
||||
)
|
||||
|
||||
var rendezvousTag string
|
||||
|
||||
func computeRendezvousTag() string {
|
||||
sum := sha256.Sum256([]byte("forwarder_network/" + os.Getenv("SOURCE_HASH")))
|
||||
return fmt.Sprintf("forwarder_network-%x", sum[:8])
|
||||
}
|
||||
|
||||
func getRendezvousTag() string {
|
||||
if rendezvousTag == "" {
|
||||
rendezvousTag = computeRendezvousTag()
|
||||
}
|
||||
return rendezvousTag
|
||||
}
|
||||
|
||||
func ipString(a multiaddr.Multiaddr) string {
|
||||
if v, err := a.ValueForProtocol(multiaddr.P_IP4); err == nil {
|
||||
return v
|
||||
}
|
||||
if v, err := a.ValueForProtocol(multiaddr.P_IP6); err == nil {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func hostTransportKey(a multiaddr.Multiaddr) string {
|
||||
ip := ipString(a)
|
||||
t := "tcp"
|
||||
if _, err := a.ValueForProtocol(multiaddr.P_QUIC_V1); err == nil {
|
||||
t = "quic"
|
||||
}
|
||||
if _, err := a.ValueForProtocol(multiaddr.P_WS); err == nil {
|
||||
t = "ws"
|
||||
}
|
||||
return ip + "|" + t
|
||||
}
|
||||
|
||||
func isAddressValid(addr multiaddr.Multiaddr) bool {
|
||||
allowLoopback := os.Getenv("FORWARDER_ALLOW_LOOPBACK") == "true"
|
||||
|
||||
if ipStr, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil && ipStr != "" {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
if !allowLoopback && (ip.IsLoopback() || ip.IsUnspecified()) {
|
||||
return false
|
||||
}
|
||||
if ip.IsUnspecified() {
|
||||
return false
|
||||
}
|
||||
if b := ip.To4(); b != nil && b[0] == 100 && b[1] >= 64 && b[1] <= 127 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if ipStr, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil && ipStr != "" {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
if !allowLoopback && (ip.IsLoopback() || ip.IsUnspecified()) {
|
||||
return false
|
||||
}
|
||||
if ip.IsUnspecified() {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(ipStr), "fd7a:115c:a1e0:") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func customInterfaceAddresses() ([]net.IP, error) {
|
||||
var ips []net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ifi := range ifaces {
|
||||
if ifi.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
addrs, err := ifi.Addrs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range addrs {
|
||||
if ipnet, ok := a.(*net.IPNet); ok && ipnet.IP != nil {
|
||||
ips = append(ips, ipnet.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func customAddrsFactory(listenAddrs []multiaddr.Multiaddr) []multiaddr.Multiaddr {
|
||||
ips, err := customInterfaceAddresses()
|
||||
if err != nil {
|
||||
log.Printf("Error getting interface IPs: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var advAddrs []multiaddr.Multiaddr
|
||||
for _, la := range listenAddrs {
|
||||
comps := multiaddr.Split(la)
|
||||
if len(comps) == 0 {
|
||||
continue
|
||||
}
|
||||
first := comps[0]
|
||||
protos := first.Protocols()
|
||||
if len(protos) == 0 {
|
||||
continue
|
||||
}
|
||||
code := protos[0].Code
|
||||
val, err := first.ValueForProtocol(code)
|
||||
isWildcard := (err == nil &&
|
||||
((code == multiaddr.P_IP4 && val == "0.0.0.0") ||
|
||||
(code == multiaddr.P_IP6 && val == "::")))
|
||||
|
||||
if isWildcard {
|
||||
for _, ip := range ips {
|
||||
var pcode string
|
||||
if ip.To4() != nil {
|
||||
pcode = "4"
|
||||
} else {
|
||||
pcode = "6"
|
||||
}
|
||||
newIPMA, err := multiaddr.NewMultiaddr("/ip" + pcode + "/" + ip.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var newComps []multiaddr.Multiaddrer
|
||||
newComps = append(newComps, newIPMA)
|
||||
for _, c := range comps[1:] {
|
||||
newComps = append(newComps, c.Multiaddr())
|
||||
}
|
||||
newAddr := multiaddr.Join(newComps...)
|
||||
if isAddressValid(newAddr) {
|
||||
advAddrs = append(advAddrs, newAddr)
|
||||
}
|
||||
}
|
||||
} else if isAddressValid(la) {
|
||||
advAddrs = append(advAddrs, la)
|
||||
}
|
||||
}
|
||||
return advAddrs
|
||||
}
|
||||
|
||||
type discoveryNotifee struct{ h host.Host }
|
||||
|
||||
func (n *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
|
||||
log.Printf("mDNS discovered peer %s with %d addresses", pi.ID, len(pi.Addrs))
|
||||
|
||||
var ipList []string
|
||||
for _, a := range pi.Addrs {
|
||||
if v := ipString(a); v != "" {
|
||||
ipList = append(ipList, v)
|
||||
}
|
||||
}
|
||||
if len(ipList) > 0 {
|
||||
log.Printf("mDNS %s IPs: %s", pi.ID, strings.Join(ipList, ", "))
|
||||
}
|
||||
|
||||
var filtered []multiaddr.Multiaddr
|
||||
var ips []net.IP
|
||||
for _, a := range pi.Addrs {
|
||||
if isAddressValid(a) {
|
||||
filtered = append(filtered, a)
|
||||
|
||||
if ipStr := ipString(a); ipStr != "" {
|
||||
if ip := net.ParseIP(ipStr); ip != nil {
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
log.Printf("No valid addrs for %s", pi.ID)
|
||||
return
|
||||
}
|
||||
|
||||
ps := n.h.Peerstore()
|
||||
ps.AddAddrs(pi.ID, filtered, peerstore.TempAddrTTL)
|
||||
|
||||
tcpAgent := GetTCPAgent()
|
||||
if len(ips) > 0 {
|
||||
tcpAgent.UpdateDiscoveredIPs(pi.ID, ips)
|
||||
}
|
||||
|
||||
existing := make(map[string]struct{})
|
||||
for _, c := range n.h.Network().ConnsToPeer(pi.ID) {
|
||||
if cm, ok := c.(network.ConnMultiaddrs); ok {
|
||||
existing[hostTransportKey(cm.RemoteMultiaddr())] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range filtered {
|
||||
if _, seen := existing[hostTransportKey(a)]; seen {
|
||||
continue
|
||||
}
|
||||
go n.connectWithRetryToAddr(pi.ID, a)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *discoveryNotifee) connectWithRetryToAddr(pid peer.ID, addr multiaddr.Multiaddr) {
|
||||
key := peerAddrKey{pid, hostTransportKey(addr)}
|
||||
|
||||
connMu.Lock()
|
||||
if connecting[key] {
|
||||
connMu.Unlock()
|
||||
return
|
||||
}
|
||||
connecting[key] = true
|
||||
connMu.Unlock()
|
||||
defer func() {
|
||||
connMu.Lock()
|
||||
delete(connecting, key)
|
||||
connMu.Unlock()
|
||||
}()
|
||||
|
||||
retryMu.Lock()
|
||||
state, ok := peerRetryState[key]
|
||||
if !ok {
|
||||
state = &peerConnState{}
|
||||
peerRetryState[key] = state
|
||||
}
|
||||
backoff := time.Duration(1<<uint(state.retryCount)) * initialBackoff
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
if state.retryCount > 0 && time.Since(state.lastAttempt) < backoff {
|
||||
retryMu.Unlock()
|
||||
return
|
||||
}
|
||||
state.lastAttempt = time.Now()
|
||||
retryMu.Unlock()
|
||||
|
||||
ai := peer.AddrInfo{ID: pid, Addrs: []multiaddr.Multiaddr{addr}}
|
||||
|
||||
ctx, cancel := context.WithTimeout(network.WithForceDirectDial(context.Background(), "ensure-multipath"), connectTimeout)
|
||||
defer cancel()
|
||||
|
||||
n.h.Peerstore().AddAddrs(pid, []multiaddr.Multiaddr{addr}, peerstore.TempAddrTTL)
|
||||
|
||||
if err := n.h.Connect(ctx, ai); err != nil {
|
||||
log.Printf("Dial %s@%s failed (attempt %d): %v", pid, addr, state.retryCount+1, err)
|
||||
retryMu.Lock()
|
||||
state.retryCount++
|
||||
retryMu.Unlock()
|
||||
|
||||
time.AfterFunc(backoff, func() {
|
||||
pathStillMissing := true
|
||||
for _, c := range n.h.Network().ConnsToPeer(pid) {
|
||||
if cm, ok := c.(network.ConnMultiaddrs); ok &&
|
||||
hostTransportKey(cm.RemoteMultiaddr()) == key.addr {
|
||||
pathStillMissing = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if pathStillMissing {
|
||||
n.connectWithRetryToAddr(pid, addr)
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Connected to %s via %s", pid, addr)
|
||||
retryMu.Lock()
|
||||
delete(peerRetryState, key)
|
||||
retryMu.Unlock()
|
||||
}
|
||||
|
||||
func getPrivKey(nodeId string) (crypto.PrivKey, error) {
|
||||
seed := sha256.Sum256([]byte(nodeId))
|
||||
priv, _, err := crypto.GenerateEd25519Key(bytes.NewReader(seed[:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
func getNode(ctx context.Context) {
|
||||
once.Do(func() {
|
||||
nodeId := GetNodeId()
|
||||
|
||||
var opts []libp2p.Option
|
||||
|
||||
priv, err := getPrivKey(nodeId)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate key: %v", err)
|
||||
}
|
||||
opts = append(opts, libp2p.Identity(priv))
|
||||
opts = append(opts, libp2p.Security(noise.ID, noise.New))
|
||||
|
||||
pskHash := sha256.Sum256([]byte("forwarder_network/" + os.Getenv("SOURCE_HASH")))
|
||||
psk := pnet.PSK(pskHash[:])
|
||||
opts = append(opts, libp2p.PrivateNetwork(psk))
|
||||
|
||||
opts = append(opts, libp2p.EnableHolePunching())
|
||||
opts = append(opts, libp2p.EnableRelay())
|
||||
|
||||
opts = append(opts, libp2p.AddrsFactory(customAddrsFactory))
|
||||
|
||||
cm, _ := connmgr.NewConnManager(100, 1000, connmgr.WithGracePeriod(2*time.Minute))
|
||||
opts = append(opts, libp2p.ConnectionManager(cm))
|
||||
|
||||
var errNode error
|
||||
node, errNode = libp2p.New(opts...)
|
||||
if errNode != nil {
|
||||
log.Fatalf("failed to create host: %v", errNode)
|
||||
}
|
||||
|
||||
gossipOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSigning(false),
|
||||
pubsub.WithStrictSignatureVerification(false),
|
||||
pubsub.WithMaxMessageSize(1024 * 1024),
|
||||
pubsub.WithValidateQueueSize(1000),
|
||||
pubsub.WithPeerOutboundQueueSize(1000),
|
||||
}
|
||||
ps, err = pubsub.NewGossipSub(ctx, node, gossipOpts...)
|
||||
if err != nil {
|
||||
_ = node.Close()
|
||||
log.Fatalf("failed to create pubsub: %v", err)
|
||||
}
|
||||
|
||||
rendezvous := getRendezvousTag()
|
||||
notifee := &discoveryNotifee{h: node}
|
||||
mdnsSer = mdns.NewMdnsService(node, rendezvous, notifee)
|
||||
if err := mdnsSer.Start(); err != nil {
|
||||
_ = node.Close()
|
||||
log.Fatalf("failed to start mdns service: %v", err)
|
||||
}
|
||||
|
||||
node.Network().Notify(&disconnectNotifee{})
|
||||
node.Network().Notify(GetNotifee())
|
||||
|
||||
tcpAgent := GetTCPAgent()
|
||||
if err := tcpAgent.Start(ctx, node.ID()); err != nil {
|
||||
log.Printf("Failed to start TCP agent: %v", err)
|
||||
}
|
||||
|
||||
go periodicMDNSDiscovery()
|
||||
go watchInterfacesAndKickMDNS()
|
||||
})
|
||||
}
|
||||
|
||||
func periodicMDNSDiscovery() {
|
||||
current := mdnsSlowInterval
|
||||
t := time.NewTicker(current)
|
||||
defer t.Stop()
|
||||
|
||||
lastNoPeerRestart := time.Time{}
|
||||
|
||||
for range t.C {
|
||||
if mdnsSer == nil || node == nil {
|
||||
return
|
||||
}
|
||||
n := len(node.Network().Peers())
|
||||
if n == 0 {
|
||||
if current != mdnsFastInterval {
|
||||
current = mdnsFastInterval
|
||||
t.Reset(current)
|
||||
}
|
||||
if time.Since(lastNoPeerRestart) > 5*time.Second {
|
||||
forceRestartMDNS("no-peers")
|
||||
lastNoPeerRestart = time.Now()
|
||||
}
|
||||
} else {
|
||||
if current != mdnsSlowInterval {
|
||||
current = mdnsSlowInterval
|
||||
t.Reset(current)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func watchInterfacesAndKickMDNS() {
|
||||
snap := interfacesSignature()
|
||||
t := time.NewTicker(1 * time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
for range t.C {
|
||||
next := interfacesSignature()
|
||||
if next != snap {
|
||||
snap = next
|
||||
kickMDNSBurst("iface-change")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func kickMDNSBurst(reason string) {
|
||||
forceRestartMDNS(reason)
|
||||
time.AfterFunc(2*time.Second, func() { forceRestartMDNS(reason + "-stabilize-2s") })
|
||||
time.AfterFunc(6*time.Second, func() { forceRestartMDNS(reason + "-stabilize-6s") })
|
||||
}
|
||||
|
||||
func interfacesSignature() string {
|
||||
ifaces, _ := net.Interfaces()
|
||||
var b strings.Builder
|
||||
for _, ifi := range ifaces {
|
||||
if ifi.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
addrs, _ := ifi.Addrs()
|
||||
b.WriteString(ifi.Name)
|
||||
b.WriteByte('|')
|
||||
b.WriteString(ifi.Flags.String())
|
||||
for _, a := range addrs {
|
||||
b.WriteByte('|')
|
||||
b.WriteString(a.String())
|
||||
}
|
||||
b.WriteByte(';')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func forceRestartMDNS(reason string) {
|
||||
mdnsRestartMu.Lock()
|
||||
defer mdnsRestartMu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if restartPending || now.Sub(lastMdnsRestart) < minRestartSpacing {
|
||||
if !restartPending {
|
||||
restartPending = true
|
||||
wait := minRestartSpacing - now.Sub(lastMdnsRestart)
|
||||
if wait < 0 {
|
||||
wait = minRestartSpacing
|
||||
}
|
||||
time.AfterFunc(wait, func() {
|
||||
forceRestartMDNS("coalesced")
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
restartPending = false
|
||||
lastMdnsRestart = now
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if mdnsSer != nil && node != nil {
|
||||
log.Printf("Restarting mDNS (%s)", reason)
|
||||
old := mdnsSer
|
||||
rendezvous := getRendezvousTag()
|
||||
notifee := &discoveryNotifee{h: node}
|
||||
newMdns := mdns.NewMdnsService(node, rendezvous, notifee)
|
||||
if err := newMdns.Start(); err != nil {
|
||||
log.Printf("Failed to restart mDNS: %v", err)
|
||||
return
|
||||
}
|
||||
_ = old.Close()
|
||||
mdnsSer = newMdns
|
||||
GetTCPAgent().OnInterfaceChange()
|
||||
|
||||
retryMu.Lock()
|
||||
peerRetryState = make(map[peerAddrKey]*peerConnState)
|
||||
retryMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type disconnectNotifee struct{}
|
||||
|
||||
func (d *disconnectNotifee) Connected(network.Network, network.Conn) {}
|
||||
func (d *disconnectNotifee) Disconnected(n network.Network, c network.Conn) {
|
||||
go func() {
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
forceRestartMDNS("disconnect")
|
||||
}()
|
||||
}
|
||||
func (d *disconnectNotifee) OpenedStream(network.Network, network.Stream) {}
|
||||
func (d *disconnectNotifee) ClosedStream(network.Network, network.Stream) {}
|
||||
func (d *disconnectNotifee) Listen(network.Network, multiaddr.Multiaddr) {}
|
||||
func (d *disconnectNotifee) ListenClose(network.Network, multiaddr.Multiaddr) {}
|
||||
|
||||
type libP2PConnector struct {
|
||||
topic string
|
||||
sub *pubsub.Subscription
|
||||
subResend *pubsub.Subscription
|
||||
top *pubsub.Topic
|
||||
topResend *pubsub.Topic
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
writeChan chan RecordData
|
||||
batchSize int
|
||||
batchTimeout time.Duration
|
||||
workerPool int
|
||||
}
|
||||
|
||||
func newLibP2PConnector(topic string, ctx context.Context, cancel context.CancelFunc) *libP2PConnector {
|
||||
getNode(ctx)
|
||||
mu.Lock()
|
||||
var err error
|
||||
t, ok := topicsMap[topic]
|
||||
if !ok {
|
||||
t, err = ps.Join(topic)
|
||||
if err != nil {
|
||||
mu.Unlock()
|
||||
log.Fatalf("failed to join topic %s: %v", topic, err)
|
||||
}
|
||||
topicsMap[topic] = t
|
||||
}
|
||||
t2, okResend := topicsMap[topic+"/resend"]
|
||||
if !okResend {
|
||||
t2, err = ps.Join(topic + "/resend")
|
||||
if err != nil {
|
||||
mu.Unlock()
|
||||
log.Fatalf("failed to join topic %s: %v", topic+"/resend", err)
|
||||
}
|
||||
topicsMap[topic+"/resend"] = t2
|
||||
}
|
||||
refCount++
|
||||
mu.Unlock()
|
||||
|
||||
conn := &libP2PConnector{
|
||||
topic: topic,
|
||||
top: t,
|
||||
topResend: t2,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
writeChan: make(chan RecordData, 2000),
|
||||
batchSize: 100,
|
||||
batchTimeout: 10 * time.Millisecond,
|
||||
workerPool: 5,
|
||||
}
|
||||
conn.startAsyncPublishers()
|
||||
return conn
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) tail(handler func(record RecordData) error) {
|
||||
sub, err := c.top.Subscribe()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to subscribe to topic %s: %v", c.topic, err)
|
||||
}
|
||||
c.sub = sub
|
||||
go handleRecordSub(c.sub, c.ctx, handler)
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) tailResend(handler func(data ResendRequest) error) {
|
||||
sub, err := c.topResend.Subscribe()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to subscribe to topic %s: %v", c.topic, err)
|
||||
}
|
||||
c.subResend = sub
|
||||
go handleSub(c.subResend, c.ctx, handler)
|
||||
}
|
||||
|
||||
func handleSub[T any](sub *pubsub.Subscription, ctx context.Context, handler func(data T) error) {
|
||||
for {
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
if err == context.Canceled {
|
||||
return
|
||||
}
|
||||
log.Printf("subscription error for topic %s: %v", sub.Topic(), err)
|
||||
return
|
||||
}
|
||||
var rec T
|
||||
if err := json.Unmarshal(msg.Data, &rec); err != nil {
|
||||
log.Printf("unmarshal error for topic %s: %v", sub.Topic(), err)
|
||||
continue
|
||||
}
|
||||
if handler != nil {
|
||||
if err := handler(rec); err != nil {
|
||||
log.Printf("handler error for topic %s: %v", sub.Topic(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleRecordSub(sub *pubsub.Subscription, ctx context.Context, handler func(record RecordData) error) {
|
||||
for {
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
if err == context.Canceled {
|
||||
return
|
||||
}
|
||||
log.Printf("subscription error for topic %s: %v", sub.Topic(), err)
|
||||
return
|
||||
}
|
||||
var batch BatchRecord
|
||||
if err := json.Unmarshal(msg.Data, &batch); err == nil && len(batch.Records) > 0 {
|
||||
for _, r := range batch.Records {
|
||||
if handler != nil {
|
||||
if err := handler(r); err != nil {
|
||||
log.Printf("handler error for batched record: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
var single RecordData
|
||||
if err := json.Unmarshal(msg.Data, &single); err == nil {
|
||||
if handler != nil {
|
||||
if err := handler(single); err != nil {
|
||||
log.Printf("handler error for single record: %v", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Printf("failed to unmarshal message for topic %s", sub.Topic())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) startAsyncPublishers() {
|
||||
for i := 0; i < c.workerPool; i++ {
|
||||
go c.publishWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) publishWorker() {
|
||||
batch := make([]RecordData, 0, c.batchSize)
|
||||
timer := time.NewTimer(c.batchTimeout)
|
||||
timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
if len(batch) > 0 {
|
||||
if err := c.publishBatch(batch); err != nil {
|
||||
log.Printf("Error publishing batch: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
case r := <-c.writeChan:
|
||||
batch = append(batch, r)
|
||||
if len(batch) >= c.batchSize {
|
||||
if err := c.publishBatch(batch); err != nil {
|
||||
log.Printf("Error publishing batch: %v", err)
|
||||
}
|
||||
batch = batch[:0]
|
||||
timer.Stop()
|
||||
} else if len(batch) == 1 {
|
||||
timer.Reset(c.batchTimeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
if len(batch) > 0 {
|
||||
if err := c.publishBatch(batch); err != nil {
|
||||
log.Printf("Error publishing batch: %v", err)
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) publishBatch(records []RecordData) error {
|
||||
if len(records) == 0 {
|
||||
return nil
|
||||
}
|
||||
data, err := json.Marshal(BatchRecord{Records: records})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
pubCtx, cancel := context.WithTimeout(c.ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
if err := c.top.Publish(pubCtx, data); err != nil && err != context.DeadlineExceeded {
|
||||
log.Printf("Error publishing batch of %d: %v", len(records), err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) write(record RecordData) error {
|
||||
select {
|
||||
case c.writeChan <- record:
|
||||
return nil
|
||||
case <-c.ctx.Done():
|
||||
return c.ctx.Err()
|
||||
default:
|
||||
return c.publishSingle(record)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) publishSingle(record RecordData) error {
|
||||
if c.top == nil {
|
||||
return context.Canceled
|
||||
}
|
||||
data, err := json.Marshal(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.top.Publish(c.ctx, data)
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) writeResend(req ResendRequest) error {
|
||||
if c.topResend == nil {
|
||||
return context.Canceled
|
||||
}
|
||||
data, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.topResend.Publish(c.ctx, data)
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) close() error {
|
||||
mu.Lock()
|
||||
refCount--
|
||||
closeHost := refCount == 0
|
||||
mu.Unlock()
|
||||
|
||||
if c.cancel != nil {
|
||||
c.cancel()
|
||||
}
|
||||
if c.sub != nil {
|
||||
c.sub.Cancel()
|
||||
}
|
||||
if c.subResend != nil {
|
||||
c.subResend.Cancel()
|
||||
}
|
||||
|
||||
if closeHost {
|
||||
for _, top := range topicsMap {
|
||||
_ = top.Close()
|
||||
}
|
||||
topicsMap = make(map[string]*pubsub.Topic)
|
||||
}
|
||||
|
||||
c.top = nil
|
||||
|
||||
if !closeHost {
|
||||
return nil
|
||||
}
|
||||
|
||||
if mdnsSer != nil {
|
||||
_ = mdnsSer.Close()
|
||||
mdnsSer = nil
|
||||
}
|
||||
|
||||
tcpAgent := GetTCPAgent()
|
||||
if err := tcpAgent.Stop(); err != nil {
|
||||
log.Printf("Error stopping TCP agent: %v", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
if node != nil {
|
||||
err = node.Close()
|
||||
}
|
||||
node = nil
|
||||
ps = nil
|
||||
refCount = 0
|
||||
once = sync.Once{}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *libP2PConnector) getType() string { return "libp2p" }
|
||||
@@ -1,175 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLibP2PConnectorCreation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
conn := newLibP2PConnector("test_topic", ctx, cancel)
|
||||
assert.NotNil(t, conn)
|
||||
assert.Equal(t, "test_topic", conn.topic)
|
||||
assert.NotNil(t, conn.top)
|
||||
assert.Nil(t, conn.sub)
|
||||
err := conn.close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLibP2PConnectorGetType(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
conn := newLibP2PConnector("test_topic", ctx, cancel)
|
||||
assert.Equal(t, "libp2p", conn.getType())
|
||||
err := conn.close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLibP2PConnectorTailAndWriteSameTopic(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
conn := newLibP2PConnector("test_topic_tail_and_write", ctx, cancel)
|
||||
|
||||
received := make(chan RecordData, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
conn.tail(func(rec RecordData) error {
|
||||
received <- rec
|
||||
return nil
|
||||
})
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{
|
||||
SourceNodeId: "test_node_id",
|
||||
SourcePath: "test_path",
|
||||
},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: map[string]interface{}{"test_key": "test_value"},
|
||||
}
|
||||
err := conn.write(rec)
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case got := <-received:
|
||||
assert.Equal(t, rec.SourceKey.SourceNodeId, got.SourceKey.SourceNodeId)
|
||||
assert.Equal(t, rec.SourceKey.SourcePath, got.SourceKey.SourcePath)
|
||||
assert.Equal(t, rec.SourceRowID, got.SourceRowID)
|
||||
assert.Equal(t, rec.Data, got.Data)
|
||||
assert.WithinDuration(t, rec.SourceTimestamp, got.SourceTimestamp, time.Second)
|
||||
case err := <-errChan:
|
||||
t.Fatalf("handler error: %v", err)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timeout waiting for message")
|
||||
}
|
||||
|
||||
err = conn.close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLibP2PConnectorTailAndWriteDifferentTopic(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
conn1 := newLibP2PConnector("test_topic_tail_and_write1", ctx, cancel)
|
||||
conn2 := newLibP2PConnector("test_topic_tail_and_write2", ctx, cancel)
|
||||
|
||||
received := make(chan RecordData, 1)
|
||||
|
||||
conn1.tail(func(rec RecordData) error {
|
||||
received <- rec
|
||||
return nil
|
||||
})
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{
|
||||
SourceNodeId: "test_node_id",
|
||||
SourcePath: "test_path",
|
||||
},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: map[string]interface{}{"test_key": "test_value"},
|
||||
}
|
||||
err := conn2.write(rec)
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case <-received:
|
||||
t.Fatal("should not receive message from different topic")
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
}
|
||||
|
||||
err = conn1.close()
|
||||
assert.NoError(t, err)
|
||||
err = conn2.close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLibP2PConnectorMultipleSubscriptionsSameTopic(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
conn1 := newLibP2PConnector("test_topic_multiple_subscriptions", ctx, cancel)
|
||||
conn2 := newLibP2PConnector("test_topic_multiple_subscriptions", ctx, cancel)
|
||||
|
||||
received1 := make(chan RecordData, 1)
|
||||
received2 := make(chan RecordData, 1)
|
||||
|
||||
conn1.tail(func(rec RecordData) error {
|
||||
received1 <- rec
|
||||
return nil
|
||||
})
|
||||
conn2.tail(func(rec RecordData) error {
|
||||
received2 <- rec
|
||||
return nil
|
||||
})
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{
|
||||
SourceNodeId: "test_node_id",
|
||||
SourcePath: "test_path",
|
||||
},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: map[string]interface{}{"test_key": "test_value"},
|
||||
}
|
||||
err := conn1.write(rec)
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case got := <-received1:
|
||||
assert.Equal(t, rec.SourceKey.SourceNodeId, got.SourceKey.SourceNodeId)
|
||||
assert.Equal(t, rec.SourceKey.SourcePath, got.SourceKey.SourcePath)
|
||||
assert.Equal(t, rec.SourceRowID, got.SourceRowID)
|
||||
assert.Equal(t, rec.Data, got.Data)
|
||||
assert.WithinDuration(t, rec.SourceTimestamp, got.SourceTimestamp, time.Second)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timeout waiting for message on conn1")
|
||||
}
|
||||
|
||||
select {
|
||||
case got := <-received2:
|
||||
assert.Equal(t, rec.SourceKey.SourceNodeId, got.SourceKey.SourceNodeId)
|
||||
assert.Equal(t, rec.SourceKey.SourcePath, got.SourceKey.SourcePath)
|
||||
assert.Equal(t, rec.SourceRowID, got.SourceRowID)
|
||||
assert.Equal(t, rec.Data, got.Data)
|
||||
assert.WithinDuration(t, rec.SourceTimestamp, got.SourceTimestamp, time.Second)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timeout waiting for message on conn2")
|
||||
}
|
||||
|
||||
err = conn1.close()
|
||||
assert.NoError(t, err)
|
||||
err = conn2.close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SourceKey struct {
|
||||
SourceNodeId string `json:"source_node_id"`
|
||||
SourcePath string `json:"source_path"` // db:table
|
||||
}
|
||||
|
||||
type TrackingData struct {
|
||||
SourceKey
|
||||
SourceRowID int64 `json:"source_row_id"`
|
||||
SourceTimestamp time.Time `json:"source_timestamp"`
|
||||
}
|
||||
type RecordData struct {
|
||||
TrackingData
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type BatchRecord struct {
|
||||
Records []RecordData `json:"records"`
|
||||
}
|
||||
|
||||
type ForwardingPair struct {
|
||||
source connection
|
||||
sink connection
|
||||
}
|
||||
|
||||
type connection interface {
|
||||
tail(handler func(record RecordData) error)
|
||||
write(record RecordData) error
|
||||
close() error
|
||||
getType() string
|
||||
}
|
||||
|
||||
type LibP2PConnection interface {
|
||||
connection
|
||||
tailResend(handler func(record ResendRequest) error)
|
||||
writeResend(record ResendRequest) error
|
||||
}
|
||||
|
||||
type SQLiteConnection interface {
|
||||
connection
|
||||
getLatestRowIds() (map[SourceKey]int64, error)
|
||||
readRange(start, end int64) ([]RecordData, error)
|
||||
getNodeId() string
|
||||
getTablePath() string
|
||||
}
|
||||
|
||||
type GapRange struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
}
|
||||
type ResendRequest struct {
|
||||
SourceNodeID string `json:"source_node_id"`
|
||||
SourcePath string `json:"source_path"`
|
||||
Gaps []GapRange `json:"gaps"`
|
||||
}
|
||||
|
||||
type stateStoreInterface interface {
|
||||
onRecord(record RecordData)
|
||||
getWriteableMessages() []RecordData
|
||||
getResendRequests() []ResendRequest
|
||||
getCurrentGaps() map[SourceKey][]gap
|
||||
}
|
||||
|
||||
type Forwarder interface {
|
||||
Start(ctx context.Context) error
|
||||
}
|
||||
@@ -1,649 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
type sqliteConnector struct {
|
||||
db *sql.DB
|
||||
tableName string
|
||||
stop chan struct{}
|
||||
wg sync.WaitGroup
|
||||
pendingWrites []RecordData
|
||||
mu sync.Mutex
|
||||
nodeId string
|
||||
tablePath string
|
||||
// Cache the original columns (non-tracking columns)
|
||||
originalColumns []string
|
||||
columnTypes map[string]string
|
||||
}
|
||||
|
||||
func newSQLiteConnector(dbPath, tableName string) (*sqliteConnector, error) {
|
||||
if tableName == "" {
|
||||
return nil, errors.New("table name cannot be empty")
|
||||
}
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = db.Exec("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA busy_timeout = 500; PRAGMA cache_size = -64000;")
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("failed to apply PRAGMA settings: %w", err)
|
||||
}
|
||||
|
||||
// Increase connection pool for better concurrency
|
||||
db.SetMaxOpenConns(25)
|
||||
db.SetMaxIdleConns(10)
|
||||
db.SetConnMaxLifetime(5 * time.Minute)
|
||||
|
||||
c := &sqliteConnector{
|
||||
db: db,
|
||||
tableName: tableName,
|
||||
stop: make(chan struct{}),
|
||||
pendingWrites: []RecordData{},
|
||||
nodeId: GetNodeId(),
|
||||
tablePath: dbPath + ":" + tableName,
|
||||
columnTypes: make(map[string]string),
|
||||
}
|
||||
|
||||
// Get the table schema before adding tracking columns
|
||||
err = c.loadTableSchema()
|
||||
if err != nil && !strings.Contains(err.Error(), "no such table") {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.ensureTrackingColumns()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reload schema after ensuring tracking columns
|
||||
err = c.loadTableSchema()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
c.writerLoop()
|
||||
}()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) loadTableSchema() error {
|
||||
rows, err := c.db.Query(fmt.Sprintf(`PRAGMA table_info("%s")`, c.tableName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
trackingCols := make(map[string]bool)
|
||||
for _, col := range []string{"source_node_id", "source_path", "source_row_id", "source_timestamp"} {
|
||||
trackingCols[col] = true
|
||||
}
|
||||
|
||||
c.originalColumns = []string{}
|
||||
c.columnTypes = make(map[string]string)
|
||||
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name string
|
||||
var typ string
|
||||
var notnull int
|
||||
var dflt interface{}
|
||||
var pk int
|
||||
if err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.columnTypes[name] = typ
|
||||
|
||||
// Only include non-tracking columns in originalColumns
|
||||
if !trackingCols[name] {
|
||||
c.originalColumns = append(c.originalColumns, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) getNodeId() string {
|
||||
return c.nodeId
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) getTablePath() string {
|
||||
return c.tablePath
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) writerLoop() {
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.mu.Lock()
|
||||
batch := c.pendingWrites
|
||||
c.pendingWrites = nil
|
||||
c.mu.Unlock()
|
||||
if len(batch) > 0 {
|
||||
if err := c.writeBatch(batch); err != nil {
|
||||
log.Printf("Error writing batch: %v", err)
|
||||
}
|
||||
}
|
||||
case <-c.stop:
|
||||
c.mu.Lock()
|
||||
batch := c.pendingWrites
|
||||
c.pendingWrites = nil
|
||||
c.mu.Unlock()
|
||||
if len(batch) > 0 {
|
||||
if err := c.writeBatch(batch); err != nil {
|
||||
log.Printf("Error writing final batch: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) writeBatch(records []RecordData) error {
|
||||
if len(records) == 0 {
|
||||
return nil
|
||||
}
|
||||
tx, err := c.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Build column list: tracking columns + original columns
|
||||
trackingCols := []string{"source_node_id", "source_path", "source_row_id", "source_timestamp"}
|
||||
cols := append(trackingCols, c.originalColumns...)
|
||||
colStr := strings.Join(cols, `", "`)
|
||||
|
||||
places := make([]string, len(cols))
|
||||
for i := range places {
|
||||
places[i] = "?"
|
||||
}
|
||||
singlePlace := "(" + strings.Join(places, ", ") + ")"
|
||||
rowPlaces := make([]string, len(records))
|
||||
for i := range rowPlaces {
|
||||
rowPlaces[i] = singlePlace
|
||||
}
|
||||
valuesStr := strings.Join(rowPlaces, ", ")
|
||||
|
||||
query := fmt.Sprintf(`INSERT INTO "%s" ("%s") VALUES %s`, c.tableName, colStr, valuesStr)
|
||||
|
||||
vals := make([]interface{}, 0, len(records)*len(cols))
|
||||
for _, rec := range records {
|
||||
// Add tracking columns
|
||||
vals = append(vals, rec.SourceNodeId, rec.SourcePath, rec.SourceRowID, rec.SourceTimestamp)
|
||||
|
||||
// Add original column values from Data map
|
||||
for _, col := range c.originalColumns {
|
||||
if val, ok := rec.Data[col]; ok {
|
||||
vals = append(vals, val)
|
||||
} else {
|
||||
vals = append(vals, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tx.Exec(query, vals...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) ensureTrackingColumns() error {
|
||||
// Wrap table creation and alterations in a transaction for atomicity
|
||||
tx, err := c.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Check if table exists
|
||||
var count int
|
||||
err = tx.QueryRow(`SELECT count(*) FROM sqlite_master WHERE type = 'table' AND name = ?`, c.tableName).Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count == 0 {
|
||||
// Create table with only tracking columns initially
|
||||
// The original schema should be defined by the first records written
|
||||
typePairs := getJsonTagsWithSqliteTypes(reflect.TypeOf(TrackingData{}))
|
||||
colDefs := make([]string, 0, len(typePairs))
|
||||
for _, pair := range typePairs {
|
||||
colDefs = append(colDefs, fmt.Sprintf("%s %s", pair.name, pair.typeStr))
|
||||
}
|
||||
createQuery := fmt.Sprintf(`CREATE TABLE "%s" (%s)`, c.tableName, strings.Join(colDefs, ", "))
|
||||
_, err := tx.Exec(createQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Table exists, ensure tracking columns
|
||||
existing := make(map[string]bool)
|
||||
rows, err := tx.Query(fmt.Sprintf(`PRAGMA table_info("%s")`, c.tableName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name string
|
||||
var typ string
|
||||
var notnull int
|
||||
var dflt interface{}
|
||||
var pk int
|
||||
if err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk); err != nil {
|
||||
return err
|
||||
}
|
||||
existing[name] = true
|
||||
}
|
||||
|
||||
typePairs := getJsonTagsWithSqliteTypes(reflect.TypeOf(TrackingData{}))
|
||||
for _, pair := range typePairs {
|
||||
if !existing[pair.name] {
|
||||
if _, err := tx.Exec(fmt.Sprintf(`ALTER TABLE "%s" ADD COLUMN %s %s`, c.tableName, pair.name, pair.typeStr)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) getLatestRowIds() (map[SourceKey]int64, error) {
|
||||
keyCols := getJsonTagNames(reflect.TypeOf(SourceKey{}))
|
||||
rowIdField := "SourceRowID"
|
||||
rowIDCol := getFieldJsonTag(reflect.TypeOf(TrackingData{}), rowIdField)
|
||||
if rowIDCol == "" {
|
||||
return nil, fmt.Errorf("could not find field %s in TrackingData struct", rowIdField)
|
||||
}
|
||||
|
||||
selectCols := strings.Join(keyCols, ", ")
|
||||
query := fmt.Sprintf(`SELECT %s, MAX(%s) FROM "%s" WHERE %s IS NOT NULL GROUP BY %s`, selectCols, rowIDCol, c.tableName, rowIDCol, selectCols)
|
||||
|
||||
rows, err := c.db.Query(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
m := make(map[SourceKey]int64)
|
||||
for rows.Next() {
|
||||
strPtrs := make([]*string, len(keyCols))
|
||||
scanArgs := make([]interface{}, 0, len(keyCols)+1)
|
||||
for i := range keyCols {
|
||||
var s string
|
||||
strPtrs[i] = &s
|
||||
scanArgs = append(scanArgs, &s)
|
||||
}
|
||||
var maxPtr int64
|
||||
scanArgs = append(scanArgs, &maxPtr)
|
||||
if err := rows.Scan(scanArgs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var key SourceKey
|
||||
val := reflect.ValueOf(&key).Elem()
|
||||
keyType := reflect.TypeOf(key)
|
||||
for i, colName := range keyCols {
|
||||
// find field with json tag = colName
|
||||
for f := 0; f < keyType.NumField(); f++ {
|
||||
field := keyType.Field(f)
|
||||
tag := strings.Split(field.Tag.Get("json"), ",")[0]
|
||||
if tag == "" {
|
||||
tag = strings.ToLower(field.Name)
|
||||
}
|
||||
if tag == colName {
|
||||
if strPtrs[i] != nil {
|
||||
val.FieldByName(field.Name).SetString(*strPtrs[i])
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
m[key] = maxPtr
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) scanToRecord(rows *sql.Rows) (RecordData, int64, error) {
|
||||
// Get column names from the result set
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
return RecordData{}, 0, err
|
||||
}
|
||||
|
||||
// Create scan destinations
|
||||
scanArgs := make([]interface{}, len(columns))
|
||||
values := make([]interface{}, len(columns))
|
||||
for i := range values {
|
||||
scanArgs[i] = &values[i]
|
||||
}
|
||||
|
||||
err = rows.Scan(scanArgs...)
|
||||
if err != nil {
|
||||
return RecordData{}, 0, err
|
||||
}
|
||||
|
||||
var rec RecordData
|
||||
rec.Data = make(map[string]interface{})
|
||||
var rowID int64
|
||||
|
||||
// Process each column
|
||||
for i, col := range columns {
|
||||
val := values[i]
|
||||
|
||||
// Handle NULL values
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert []byte to appropriate type
|
||||
if b, ok := val.([]byte); ok {
|
||||
val = string(b)
|
||||
}
|
||||
|
||||
switch col {
|
||||
case "source_node_id":
|
||||
if s, ok := val.(string); ok {
|
||||
rec.SourceNodeId = s
|
||||
}
|
||||
case "source_path":
|
||||
if s, ok := val.(string); ok {
|
||||
rec.SourcePath = s
|
||||
}
|
||||
case "source_row_id":
|
||||
switch v := val.(type) {
|
||||
case int64:
|
||||
rec.SourceRowID = v
|
||||
case int:
|
||||
rec.SourceRowID = int64(v)
|
||||
case string:
|
||||
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
rec.SourceRowID = parsed
|
||||
}
|
||||
}
|
||||
case "source_timestamp":
|
||||
switch v := val.(type) {
|
||||
case time.Time:
|
||||
rec.SourceTimestamp = v
|
||||
case string:
|
||||
if parsed, err := time.Parse(time.RFC3339Nano, v); err == nil {
|
||||
rec.SourceTimestamp = parsed
|
||||
} else if parsed, err := time.Parse("2006-01-02 15:04:05", v); err == nil {
|
||||
rec.SourceTimestamp = parsed
|
||||
}
|
||||
}
|
||||
case "rowid":
|
||||
switch v := val.(type) {
|
||||
case int64:
|
||||
rowID = v
|
||||
case int:
|
||||
rowID = int64(v)
|
||||
}
|
||||
default:
|
||||
// All other columns go into the Data map
|
||||
rec.Data[col] = val
|
||||
}
|
||||
}
|
||||
|
||||
return rec, rowID, nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) readRange(start, end int64) ([]RecordData, error) {
|
||||
// Select all columns plus rowid
|
||||
query := fmt.Sprintf(`SELECT *, rowid FROM "%s" WHERE rowid >= ? AND rowid <= ? ORDER BY rowid`, c.tableName)
|
||||
rows, err := c.db.Query(query, start, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var records []RecordData
|
||||
for rows.Next() {
|
||||
rec, rowID, err := c.scanToRecord(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Override tracking data so that this table is treated as the new source
|
||||
rec.SourceNodeId = c.nodeId
|
||||
rec.SourcePath = c.tablePath
|
||||
rec.SourceRowID = rowID
|
||||
rec.SourceTimestamp = time.Now()
|
||||
records = append(records, rec)
|
||||
}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) tail(handler func(record RecordData) error) {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
var last int64
|
||||
err := c.db.QueryRow(fmt.Sprintf(`SELECT IFNULL(MAX(rowid), 0) FROM "%s"`, c.tableName)).Scan(&last)
|
||||
if err != nil {
|
||||
last = 0
|
||||
}
|
||||
// Prepare the statement outside the loop for efficiency
|
||||
query := fmt.Sprintf(`SELECT *, rowid FROM "%s" WHERE rowid > ? ORDER BY rowid LIMIT ?`, c.tableName)
|
||||
stmt, err := c.db.Prepare(query)
|
||||
if err != nil {
|
||||
log.Printf("Error preparing tail statement: %v", err)
|
||||
return
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
// Adaptive polling: start fast, slow down when idle
|
||||
minPollInterval := 1 * time.Millisecond
|
||||
maxPollInterval := 50 * time.Millisecond
|
||||
currentInterval := minPollInterval
|
||||
batchSize := 500 // Process records in larger batches for better throughput
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
rows, err := stmt.Query(last, batchSize)
|
||||
if err != nil {
|
||||
time.Sleep(currentInterval)
|
||||
continue
|
||||
}
|
||||
hadNew := false
|
||||
recordCount := 0
|
||||
for rows.Next() {
|
||||
rec, rowID, err := c.scanToRecord(rows)
|
||||
if err != nil {
|
||||
log.Printf("Error scanning record: %v", err)
|
||||
break
|
||||
}
|
||||
// Override tracking data so that this table is treated as the new source
|
||||
rec.SourceNodeId = c.nodeId
|
||||
rec.SourcePath = c.tablePath
|
||||
rec.SourceRowID = rowID
|
||||
rec.SourceTimestamp = time.Now()
|
||||
last = rowID
|
||||
err = handler(rec)
|
||||
if err != nil {
|
||||
log.Printf("Error handling record: %v", err)
|
||||
}
|
||||
hadNew = true
|
||||
recordCount++
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Adaptive interval adjustment
|
||||
if hadNew {
|
||||
// Had records, speed up polling
|
||||
currentInterval = minPollInterval
|
||||
if recordCount == batchSize {
|
||||
// Full batch, poll immediately
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// No records, slow down gradually
|
||||
currentInterval = time.Duration(float64(currentInterval) * 1.5)
|
||||
if currentInterval > maxPollInterval {
|
||||
currentInterval = maxPollInterval
|
||||
}
|
||||
}
|
||||
time.Sleep(currentInterval)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) write(record RecordData) error {
|
||||
// If we don't know the schema yet, try to infer it from the first record
|
||||
if len(c.originalColumns) == 0 && len(record.Data) > 0 {
|
||||
c.mu.Lock()
|
||||
if len(c.originalColumns) == 0 {
|
||||
// Infer columns from the data
|
||||
for col := range record.Data {
|
||||
c.originalColumns = append(c.originalColumns, col)
|
||||
}
|
||||
// Sort for consistency
|
||||
sort.Strings(c.originalColumns)
|
||||
|
||||
// Add columns to table if they don't exist
|
||||
tx, err := c.db.Begin()
|
||||
if err == nil {
|
||||
defer tx.Rollback()
|
||||
for col := range record.Data {
|
||||
// Infer SQL type from Go type
|
||||
sqlType := "TEXT" // default
|
||||
switch record.Data[col].(type) {
|
||||
case int, int32, int64:
|
||||
sqlType = "INTEGER"
|
||||
case float32, float64:
|
||||
sqlType = "REAL"
|
||||
case bool:
|
||||
sqlType = "INTEGER"
|
||||
}
|
||||
|
||||
// Try to add column (will fail silently if it exists)
|
||||
tx.Exec(fmt.Sprintf(`ALTER TABLE "%s" ADD COLUMN "%s" %s`, c.tableName, col, sqlType))
|
||||
}
|
||||
tx.Commit()
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.pendingWrites = append(c.pendingWrites, record)
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) close() error {
|
||||
close(c.stop)
|
||||
c.wg.Wait()
|
||||
return c.db.Close()
|
||||
}
|
||||
|
||||
func (c *sqliteConnector) getType() string {
|
||||
return "sqlite"
|
||||
}
|
||||
|
||||
type typedPair struct {
|
||||
name string
|
||||
typeStr string
|
||||
}
|
||||
|
||||
func getJsonTagsWithSqliteTypes(t reflect.Type) []typedPair {
|
||||
typePairs := []typedPair{}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous {
|
||||
typePairs = append(typePairs, getJsonTagsWithSqliteTypes(f.Type)...)
|
||||
continue
|
||||
}
|
||||
tag := f.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if tag != "" {
|
||||
tag = strings.Split(tag, ",")[0]
|
||||
}
|
||||
if tag == "" {
|
||||
tag = strings.ToLower(f.Name)
|
||||
}
|
||||
var sqlType string
|
||||
switch f.Type.Kind() {
|
||||
case reflect.String:
|
||||
sqlType = "TEXT"
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
sqlType = "INTEGER"
|
||||
default:
|
||||
if f.Type == reflect.TypeOf(time.Time{}) {
|
||||
sqlType = "DATETIME"
|
||||
} else {
|
||||
sqlType = "BLOB"
|
||||
}
|
||||
}
|
||||
typePairs = append(typePairs, typedPair{tag, sqlType})
|
||||
}
|
||||
return typePairs
|
||||
}
|
||||
|
||||
func getJsonTagNames(t reflect.Type) []string {
|
||||
cols := []string{}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous {
|
||||
cols = append(cols, getJsonTagNames(f.Type)...)
|
||||
continue
|
||||
}
|
||||
tag := strings.Split(f.Tag.Get("json"), ",")[0]
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if tag == "" {
|
||||
tag = strings.ToLower(f.Name)
|
||||
}
|
||||
cols = append(cols, tag)
|
||||
}
|
||||
return cols
|
||||
}
|
||||
|
||||
func getFieldJsonTag(t reflect.Type, fieldName string) string {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous {
|
||||
if tag := getFieldJsonTag(f.Type, fieldName); tag != "" {
|
||||
return tag
|
||||
}
|
||||
continue
|
||||
}
|
||||
if f.Name == fieldName {
|
||||
tag := strings.Split(f.Tag.Get("json"), ",")[0]
|
||||
if tag == "" {
|
||||
return strings.ToLower(f.Name)
|
||||
}
|
||||
return tag
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -1,236 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func TestNewSQLiteConnectorCreatesTable(t *testing.T) {
|
||||
c, err := newSQLiteConnector(":memory:", "test_table")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create connector: %v", err)
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
rows, err := c.db.Query(`PRAGMA table_info("test_table")`)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to query table info: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
expectedCols := map[string]string{
|
||||
"source_node_id": "TEXT",
|
||||
"source_path": "TEXT",
|
||||
"source_row_id": "INTEGER",
|
||||
"source_timestamp": "DATETIME",
|
||||
}
|
||||
foundCols := make(map[string]string)
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name, typ string
|
||||
var notnull int
|
||||
var dflt interface{}
|
||||
var pk int
|
||||
if err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk); err != nil {
|
||||
t.Fatalf("failed to scan: %v", err)
|
||||
}
|
||||
foundCols[name] = typ
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCols, foundCols) {
|
||||
t.Errorf("expected columns %v, got %v", expectedCols, foundCols)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureTrackingColumnsAddsMissing(t *testing.T) {
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open db: %v", err)
|
||||
}
|
||||
_, err = db.Exec(`CREATE TABLE test_table (source_node_id TEXT, data TEXT)`)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create partial table: %v", err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
tempDB := t.TempDir() + "/test.db"
|
||||
db, err = sql.Open("sqlite3", tempDB)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open db: %v", err)
|
||||
}
|
||||
_, err = db.Exec(`CREATE TABLE test_table (source_node_id TEXT, data TEXT)`)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create partial table: %v", err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
c, err := newSQLiteConnector(tempDB, "test_table")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create connector: %v", err)
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
rows, err := c.db.Query(`PRAGMA table_info("test_table")`)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to query table info: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
expectedCols := []string{"source_node_id", "data", "source_path", "source_row_id", "source_timestamp"}
|
||||
foundCols := []string{}
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name string
|
||||
var typ string
|
||||
var notnull int
|
||||
var dflt interface{}
|
||||
var pk int
|
||||
if err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk); err != nil {
|
||||
t.Fatalf("failed to scan: %v", err)
|
||||
}
|
||||
foundCols = append(foundCols, name)
|
||||
}
|
||||
if len(foundCols) != len(expectedCols) {
|
||||
t.Errorf("expected %d columns, got %d: %v", len(expectedCols), len(foundCols), foundCols)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAndReadRecord(t *testing.T) {
|
||||
SetNodeId("node1")
|
||||
c, err := newSQLiteConnector("test_write_and_read_db1", "table")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create connector: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
c.close()
|
||||
os.Remove("test_write_and_read_db1")
|
||||
}()
|
||||
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{
|
||||
SourceNodeId: "node1",
|
||||
SourcePath: "test_write_and_read_db1:table",
|
||||
},
|
||||
SourceRowID: 42,
|
||||
SourceTimestamp: time.Now().UTC(),
|
||||
},
|
||||
Data: map[string]interface{}{
|
||||
"key": "value",
|
||||
"num": 123.45,
|
||||
},
|
||||
}
|
||||
if err := c.write(rec); err != nil {
|
||||
t.Fatalf("failed to write: %v", err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond) // Wait for flush
|
||||
|
||||
records, err := c.readRange(1, 999)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read: %v", err)
|
||||
}
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected 1 record, got %d", len(records))
|
||||
}
|
||||
got := records[0]
|
||||
if got.SourceNodeId != rec.SourceNodeId || got.SourcePath != rec.SourcePath || got.SourceRowID != 1 {
|
||||
t.Errorf("tracking data mismatch: got %+v, want %+v", got.TrackingData, rec.TrackingData)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Data, rec.Data) {
|
||||
t.Errorf("data mismatch: got %v, want %v", got.Data, rec.Data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTailDetectsWrites(t *testing.T) {
|
||||
SetNodeId("node2")
|
||||
db, errDb := sql.Open("sqlite3", "tail_detects_writes_db2")
|
||||
if errDb != nil {
|
||||
t.Fatalf("failed to open db for alter: %v", errDb)
|
||||
}
|
||||
|
||||
_, errExec := db.Exec("CREATE TABLE table2 (test BOOLEAN)")
|
||||
if errExec != nil {
|
||||
t.Fatalf("failed to create table: %v", errExec)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
c, err := newSQLiteConnector("tail_detects_writes_db2", "table2")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create connector: %v", err)
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
ch := make(chan RecordData, 1)
|
||||
c.tail(func(r RecordData) error {
|
||||
ch <- r
|
||||
return nil
|
||||
})
|
||||
time.Sleep(100 * time.Millisecond) // Let tail start
|
||||
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node2", SourcePath: "tail_detects_writes_db2:table2"},
|
||||
SourceRowID: 100,
|
||||
SourceTimestamp: time.Now().UTC(),
|
||||
},
|
||||
Data: map[string]interface{}{"test": true},
|
||||
}
|
||||
if err := c.write(rec); err != nil {
|
||||
t.Fatalf("failed to write: %v", err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond) // Wait for flush and tail poll
|
||||
|
||||
select {
|
||||
case got := <-ch:
|
||||
if !reflect.DeepEqual(got.Data, rec.Data) {
|
||||
t.Errorf("got %v, want %v", got, rec)
|
||||
}
|
||||
if got.SourceNodeId != rec.SourceNodeId || got.SourcePath != rec.SourcePath || got.SourceRowID != 1 {
|
||||
t.Errorf("tracking data mismatch: got %+v, want %+v", got.TrackingData, rec.TrackingData)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timeout waiting for tail handler")
|
||||
}
|
||||
os.Remove("tail_detects_writes_db2")
|
||||
os.Remove("tail_detects_writes_db2-wal")
|
||||
os.Remove("tail_detects_writes_db2-shm")
|
||||
|
||||
}
|
||||
|
||||
func TestBatchWriteMultipleEdge(t *testing.T) {
|
||||
c, err := newSQLiteConnector(":memory:", "test_table")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create connector: %v", err)
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
rec := RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: fmt.Sprintf("node%d", i), SourcePath: ""},
|
||||
SourceRowID: int64(i),
|
||||
SourceTimestamp: time.Time{},
|
||||
},
|
||||
Data: nil, // Edge: nil Data
|
||||
}
|
||||
if err := c.write(rec); err != nil {
|
||||
t.Fatalf("failed to write: %v", err)
|
||||
}
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
var count int
|
||||
err = c.db.QueryRow(`SELECT COUNT(*) FROM "test_table"`).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to count: %v", err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Errorf("expected 3 rows, got %d", count)
|
||||
}
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const gracePeriod = 5 * time.Second
|
||||
|
||||
type gap struct {
|
||||
GapRange
|
||||
firstSeen time.Time
|
||||
lastRequestSent time.Time
|
||||
timesRequestSent int
|
||||
}
|
||||
|
||||
type pendingRecordsRange struct {
|
||||
start int64
|
||||
end int64
|
||||
records map[int64]RecordData
|
||||
}
|
||||
|
||||
func (g gap) isResendable() bool {
|
||||
currentTime := time.Now()
|
||||
if currentTime.Before(g.firstSeen.Add(gracePeriod)) {
|
||||
return false
|
||||
}
|
||||
backoff := gracePeriod * (1 << g.timesRequestSent)
|
||||
return currentTime.After(g.lastRequestSent.Add(backoff))
|
||||
}
|
||||
|
||||
type stateStore struct {
|
||||
mu sync.RWMutex
|
||||
sourceKeyMu map[SourceKey]*sync.Mutex
|
||||
lastContiguousRowId map[SourceKey]int64
|
||||
recordsToWrite []RecordData
|
||||
gaps map[SourceKey][]gap
|
||||
pending map[SourceKey][]pendingRecordsRange
|
||||
}
|
||||
|
||||
func newStateStore(lastWrittenRowId map[SourceKey]int64) *stateStore {
|
||||
return &stateStore{
|
||||
lastContiguousRowId: lastWrittenRowId,
|
||||
recordsToWrite: []RecordData{},
|
||||
gaps: make(map[SourceKey][]gap),
|
||||
pending: make(map[SourceKey][]pendingRecordsRange),
|
||||
sourceKeyMu: make(map[SourceKey]*sync.Mutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stateStore) onRecord(record RecordData) {
|
||||
sk := SourceKey{SourceNodeId: record.SourceNodeId, SourcePath: record.SourcePath}
|
||||
|
||||
s.mu.Lock()
|
||||
if _, ok := s.sourceKeyMu[sk]; !ok {
|
||||
s.sourceKeyMu[sk] = &sync.Mutex{}
|
||||
if _, ok := s.lastContiguousRowId[sk]; !ok {
|
||||
s.lastContiguousRowId[sk] = 0
|
||||
}
|
||||
s.gaps[sk] = []gap{}
|
||||
s.pending[sk] = []pendingRecordsRange{}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.sourceKeyMu[sk].Lock()
|
||||
defer s.sourceKeyMu[sk].Unlock()
|
||||
l := s.lastContiguousRowId[sk]
|
||||
r := record.SourceRowID
|
||||
if r <= l {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ru := range s.pending[sk] {
|
||||
if _, has := ru.records[r]; has {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
currentHighest := l
|
||||
for _, ru := range s.pending[sk] {
|
||||
if ru.end > currentHighest {
|
||||
currentHighest = ru.end
|
||||
}
|
||||
}
|
||||
|
||||
gaps := s.gaps[sk]
|
||||
newGaps := []gap{}
|
||||
filled := false
|
||||
for _, g := range gaps {
|
||||
if g.Start <= r && r <= g.End {
|
||||
filled = true
|
||||
if g.Start < r {
|
||||
newGaps = append(newGaps, gap{GapRange: GapRange{Start: g.Start, End: r - 1}, firstSeen: g.firstSeen, lastRequestSent: g.lastRequestSent, timesRequestSent: g.timesRequestSent})
|
||||
}
|
||||
if r < g.End {
|
||||
newGaps = append(newGaps, gap{GapRange: GapRange{Start: r + 1, End: g.End}, firstSeen: g.firstSeen, lastRequestSent: g.lastRequestSent, timesRequestSent: g.timesRequestSent})
|
||||
}
|
||||
} else {
|
||||
newGaps = append(newGaps, g)
|
||||
}
|
||||
}
|
||||
s.gaps[sk] = mergeGaps(newGaps)
|
||||
|
||||
if !filled && r > currentHighest+1 {
|
||||
gr := GapRange{Start: currentHighest + 1, End: r - 1}
|
||||
if gr.Start <= gr.End {
|
||||
newG := gap{GapRange: gr, firstSeen: time.Now(), lastRequestSent: time.Time{}, timesRequestSent: 0}
|
||||
s.gaps[sk] = append(s.gaps[sk], newG)
|
||||
s.gaps[sk] = mergeGaps(s.gaps[sk])
|
||||
}
|
||||
}
|
||||
newRun := pendingRecordsRange{start: r, end: r, records: map[int64]RecordData{r: record}}
|
||||
s.pending[sk] = addPending(s.pending[sk], newRun)
|
||||
|
||||
var toWrite []RecordData
|
||||
runs := s.pending[sk]
|
||||
for len(runs) > 0 && runs[0].start == s.lastContiguousRowId[sk]+1 {
|
||||
ru := runs[0]
|
||||
for id := ru.start; id <= ru.end; id++ {
|
||||
toWrite = append(toWrite, ru.records[id])
|
||||
}
|
||||
s.lastContiguousRowId[sk] = ru.end
|
||||
s.pending[sk] = runs[1:]
|
||||
runs = s.pending[sk]
|
||||
}
|
||||
|
||||
if len(toWrite) > 0 {
|
||||
s.mu.Lock()
|
||||
s.recordsToWrite = append(s.recordsToWrite, toWrite...)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stateStore) getWriteableMessages() []RecordData {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
records := s.recordsToWrite[:]
|
||||
s.recordsToWrite = []RecordData{}
|
||||
return records
|
||||
}
|
||||
|
||||
func (s *stateStore) getResendRequests() []ResendRequest {
|
||||
s.mu.RLock()
|
||||
keys := make([]SourceKey, 0, len(s.gaps))
|
||||
for k := range s.gaps {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
|
||||
resendRequests := []ResendRequest{}
|
||||
for _, sk := range keys {
|
||||
if _, ok := s.sourceKeyMu[sk]; !ok {
|
||||
continue
|
||||
}
|
||||
s.sourceKeyMu[sk].Lock()
|
||||
gaps, ok := s.gaps[sk]
|
||||
if !ok {
|
||||
s.sourceKeyMu[sk].Unlock()
|
||||
continue
|
||||
}
|
||||
gapRanges := []GapRange{}
|
||||
for i := range gaps {
|
||||
if gaps[i].isResendable() {
|
||||
gapRanges = append(gapRanges, gaps[i].GapRange)
|
||||
gaps[i].lastRequestSent = time.Now()
|
||||
gaps[i].timesRequestSent++
|
||||
}
|
||||
}
|
||||
if len(gapRanges) > 0 {
|
||||
resendRequests = append(resendRequests, ResendRequest{
|
||||
SourceNodeID: sk.SourceNodeId,
|
||||
SourcePath: sk.SourcePath,
|
||||
Gaps: gapRanges,
|
||||
})
|
||||
}
|
||||
s.sourceKeyMu[sk].Unlock()
|
||||
}
|
||||
return resendRequests
|
||||
}
|
||||
|
||||
func (s *stateStore) getCurrentGaps() map[SourceKey][]gap {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
copied := make(map[SourceKey][]gap, len(s.gaps))
|
||||
for k, v := range s.gaps {
|
||||
gapCopy := make([]gap, len(v))
|
||||
copy(gapCopy, v)
|
||||
copied[k] = gapCopy
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
func addPending(pending []pendingRecordsRange, newPending pendingRecordsRange) []pendingRecordsRange {
|
||||
temp := append(append([]pendingRecordsRange{}, pending...), newPending)
|
||||
sort.Slice(temp, func(i, j int) bool { return temp[i].start < temp[j].start })
|
||||
merged := []pendingRecordsRange{}
|
||||
for _, p := range temp {
|
||||
if len(merged) == 0 || merged[len(merged)-1].end+1 < p.start {
|
||||
merged = append(merged, p)
|
||||
continue
|
||||
}
|
||||
lastIdx := len(merged) - 1
|
||||
if merged[lastIdx].end < p.end {
|
||||
merged[lastIdx].end = p.end
|
||||
}
|
||||
for k, v := range p.records {
|
||||
merged[lastIdx].records[k] = v
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func mergeGaps(gaps []gap) []gap {
|
||||
if len(gaps) == 0 {
|
||||
return gaps
|
||||
}
|
||||
sort.Slice(gaps, func(i, j int) bool { return gaps[i].Start < gaps[j].Start })
|
||||
merged := []gap{gaps[0]}
|
||||
for _, g := range gaps[1:] {
|
||||
lastIdx := len(merged) - 1
|
||||
last := merged[lastIdx]
|
||||
if last.End+1 >= g.Start {
|
||||
if last.End < g.End {
|
||||
merged[lastIdx].End = g.End
|
||||
}
|
||||
if g.firstSeen.Before(last.firstSeen) {
|
||||
merged[lastIdx].firstSeen = g.firstSeen
|
||||
}
|
||||
if g.lastRequestSent.After(last.lastRequestSent) {
|
||||
merged[lastIdx].lastRequestSent = g.lastRequestSent
|
||||
}
|
||||
if g.timesRequestSent > last.timesRequestSent {
|
||||
merged[lastIdx].timesRequestSent = g.timesRequestSent
|
||||
}
|
||||
} else {
|
||||
merged = append(merged, g)
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
@@ -1,283 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInOrderMessages_SingleSource(t *testing.T) {
|
||||
store := newStateStore(make(map[SourceKey]int64))
|
||||
sk := SourceKey{"node1", "path1"}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 2,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
writeable := store.getWriteableMessages()
|
||||
if len(writeable) != 3 || writeable[0].SourceRowID != 1 || writeable[1].SourceRowID != 2 || writeable[2].SourceRowID != 3 {
|
||||
t.Errorf("Expected 3 contiguous messages, got %v", writeable)
|
||||
}
|
||||
|
||||
gaps := store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 0 {
|
||||
t.Errorf("Expected no gaps, got %v", gaps)
|
||||
}
|
||||
|
||||
if store.lastContiguousRowId[sk] != 3 {
|
||||
t.Errorf("Expected lastContiguous=3, got %d", store.lastContiguousRowId[sk])
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrder_CreateGapThenFill(t *testing.T) {
|
||||
store := newStateStore(make(map[SourceKey]int64))
|
||||
sk := SourceKey{"node1", "path1"}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
gaps := store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 1 || gaps[0].Start != 2 || gaps[0].End != 2 {
|
||||
t.Errorf("Expected gap [2,2], got %v", gaps)
|
||||
}
|
||||
|
||||
writeable := store.getWriteableMessages()
|
||||
if len(writeable) != 1 || writeable[0].SourceRowID != 1 {
|
||||
t.Errorf("Expected only 1 written, got %v", writeable)
|
||||
}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 2,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
writeable = store.getWriteableMessages()
|
||||
if len(writeable) != 2 || writeable[0].SourceRowID != 2 || writeable[1].SourceRowID != 3 {
|
||||
t.Errorf("Expected 1 and 2 written, got %v", writeable)
|
||||
}
|
||||
|
||||
gaps = store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 0 {
|
||||
t.Errorf("Expected no gaps after fill, got %v", gaps)
|
||||
}
|
||||
|
||||
if store.lastContiguousRowId[sk] != 3 {
|
||||
t.Errorf("Expected lastContiguous=3, got %d", store.lastContiguousRowId[sk])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillMiddleOfGap_Split(t *testing.T) {
|
||||
store := newStateStore(make(map[SourceKey]int64))
|
||||
sk := SourceKey{"node1", "path1"}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 5,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
gaps := store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 1 || gaps[0].Start != 2 || gaps[0].End != 4 {
|
||||
t.Errorf("Expected gap [1,4], got %v", gaps)
|
||||
}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
gaps = store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 2 || gaps[0].Start != 2 || gaps[0].End != 2 || gaps[1].Start != 4 || gaps[1].End != 4 {
|
||||
t.Errorf("Expected gaps [1,1] and [3,4], got %v", gaps)
|
||||
}
|
||||
|
||||
writeable := store.getWriteableMessages()
|
||||
if len(writeable) != 1 || writeable[0].SourceRowID != 1 {
|
||||
t.Errorf("Expected only 0 written, got %v", writeable)
|
||||
}
|
||||
|
||||
if len(store.pending[sk]) != 2 {
|
||||
t.Errorf("Expected 2 pending runs, got %d", len(store.pending[sk]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleRuns_FillConnectingGap_MergeAndPartialAdvance(t *testing.T) {
|
||||
store := newStateStore(make(map[SourceKey]int64))
|
||||
sk := SourceKey{"node1", "path1"}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 2,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 4,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 5,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 7,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
gaps := store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 2 || gaps[0].Start != 3 || gaps[0].End != 3 || gaps[1].Start != 6 || gaps[1].End != 6 {
|
||||
t.Errorf("Expected gaps [3,3],[6,6], got %v", gaps)
|
||||
}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
writeable := store.getWriteableMessages()
|
||||
if len(writeable) != 5 || writeable[4].SourceRowID != 5 {
|
||||
t.Errorf("Expected 1-5 written, got %v", writeable)
|
||||
}
|
||||
|
||||
gaps = store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 1 || gaps[0].Start != 6 || gaps[0].End != 6 {
|
||||
t.Errorf("Expected gap [6,6], got %v", gaps)
|
||||
}
|
||||
|
||||
if store.lastContiguousRowId[sk] != 5 {
|
||||
t.Errorf("Expected lastContiguous=5, got %d", store.lastContiguousRowId[sk])
|
||||
}
|
||||
|
||||
if len(store.pending[sk]) != 1 || store.pending[sk][0].start != 7 {
|
||||
t.Errorf("Expected pending [7,7], got %v", store.pending[sk])
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialHighRowID_CreateGap_IgnoreDuplicateAndOld(t *testing.T) {
|
||||
store := newStateStore(make(map[SourceKey]int64))
|
||||
sk := SourceKey{"node1", "path1"}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
gaps := store.getCurrentGaps()[sk]
|
||||
if len(gaps) != 1 || gaps[0].Start != 1 || gaps[0].End != 2 {
|
||||
t.Errorf("Expected gap [1,2], got %v", gaps)
|
||||
}
|
||||
|
||||
writeable := store.getWriteableMessages()
|
||||
if len(writeable) != 0 {
|
||||
t.Errorf("Expected no writeable, got %v", writeable)
|
||||
}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: 3,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
if len(store.pending[sk]) != 1 || len(store.pending[sk][0].records) != 1 {
|
||||
t.Errorf("Duplicate added unexpectedly")
|
||||
}
|
||||
|
||||
store.onRecord(RecordData{
|
||||
TrackingData: TrackingData{
|
||||
SourceKey: SourceKey{SourceNodeId: "node1", SourcePath: "path1"},
|
||||
SourceRowID: -1,
|
||||
SourceTimestamp: time.Now(),
|
||||
},
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
if store.lastContiguousRowId[sk] != 0 {
|
||||
t.Errorf("Old message affected lastContiguous")
|
||||
}
|
||||
}
|
||||
@@ -1,678 +0,0 @@
|
||||
package forwarder
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
AgentPort = 7847
|
||||
|
||||
HandshakeTimeout = 5 * time.Second
|
||||
HeartbeatInterval = 1 * time.Second
|
||||
HeartbeatReadGrace = 4 * time.Second
|
||||
HeartbeatWriteGrace = 3 * time.Second
|
||||
|
||||
tbGraceWindow = 90 * time.Second
|
||||
|
||||
dialTimeoutDefault = 6 * time.Second
|
||||
dialTimeoutLinkLocal = 12 * time.Second
|
||||
initialBackoff = 500 * time.Millisecond
|
||||
maxBackoff = 60 * time.Second
|
||||
|
||||
scheduleTick = 300 * time.Millisecond
|
||||
maxConcurrentDials = 32
|
||||
|
||||
ttlDiscovered = 5 * time.Minute
|
||||
ttlObserved = 20 * time.Minute
|
||||
)
|
||||
|
||||
type HandshakeMessage struct {
|
||||
NodeID string `json:"node_id"`
|
||||
AgentVer string `json:"agent_version"`
|
||||
PeerID string `json:"peer_id"`
|
||||
IPv4s []string `json:"ipv4s,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type Edge struct {
|
||||
LocalNodeID string
|
||||
RemoteNodeID string
|
||||
LocalIP string
|
||||
RemoteIP string
|
||||
Proto string
|
||||
}
|
||||
|
||||
func (e Edge) Key() string {
|
||||
return fmt.Sprintf("%s|%s|%s|%s|%s", e.LocalNodeID, e.RemoteNodeID, e.LocalIP, e.RemoteIP, e.Proto)
|
||||
}
|
||||
|
||||
type connTrack struct {
|
||||
tc *net.TCPConn
|
||||
edge Edge
|
||||
dialer bool
|
||||
closed chan struct{}
|
||||
closeMx sync.Once
|
||||
}
|
||||
|
||||
type ipStamp struct {
|
||||
seenAt time.Time
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
type dialState struct {
|
||||
backoff time.Duration
|
||||
nextAttempt time.Time
|
||||
connecting bool
|
||||
}
|
||||
|
||||
type TCPAgent struct {
|
||||
nodeID string
|
||||
myPeerID peer.ID
|
||||
|
||||
listener *net.TCPListener
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
edgesMu sync.RWMutex
|
||||
activeEdges map[string]*connTrack
|
||||
|
||||
activeByRemoteIPMu sync.RWMutex
|
||||
activeByRemoteIP map[string]bool
|
||||
|
||||
ipDBMu sync.RWMutex
|
||||
ipDB map[peer.ID]map[string]ipStamp
|
||||
|
||||
dialStatesMu sync.Mutex
|
||||
dialStates map[string]*dialState
|
||||
stopScheduler chan struct{}
|
||||
schedulerOnce sync.Once
|
||||
schedulerWG sync.WaitGroup
|
||||
|
||||
dialSem chan struct{}
|
||||
|
||||
ifaceGraceUntilMu sync.RWMutex
|
||||
ifaceGraceUntil time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
TCPAgentInstance *TCPAgent
|
||||
TCPAgentOnce sync.Once
|
||||
)
|
||||
|
||||
func GetTCPAgent() *TCPAgent {
|
||||
TCPAgentOnce.Do(func() {
|
||||
TCPAgentInstance = &TCPAgent{
|
||||
nodeID: GetNodeId(),
|
||||
activeEdges: make(map[string]*connTrack),
|
||||
activeByRemoteIP: make(map[string]bool),
|
||||
ipDB: make(map[peer.ID]map[string]ipStamp),
|
||||
dialStates: make(map[string]*dialState),
|
||||
stopScheduler: make(chan struct{}),
|
||||
dialSem: make(chan struct{}, maxConcurrentDials),
|
||||
}
|
||||
})
|
||||
return TCPAgentInstance
|
||||
}
|
||||
|
||||
func (a *TCPAgent) Start(ctx context.Context, myPeerID peer.ID) error {
|
||||
a.nodeID = GetNodeId()
|
||||
a.myPeerID = myPeerID
|
||||
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
a.ctx, a.cancel = ctx2, cancel
|
||||
|
||||
ln, err := net.ListenTCP("tcp", &net.TCPAddr{Port: AgentPort})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start TCP agent listener: %w", err)
|
||||
}
|
||||
a.listener = ln
|
||||
log.Printf("TCP path agent listening on :%d", AgentPort)
|
||||
|
||||
a.schedulerOnce.Do(func() {
|
||||
a.schedulerWG.Add(1)
|
||||
go a.dialSchedulerLoop()
|
||||
})
|
||||
|
||||
go a.acceptLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *TCPAgent) Stop() error {
|
||||
if a.cancel != nil {
|
||||
a.cancel()
|
||||
}
|
||||
close(a.stopScheduler)
|
||||
a.schedulerWG.Wait()
|
||||
|
||||
if a.listener != nil {
|
||||
_ = a.listener.Close()
|
||||
}
|
||||
|
||||
a.edgesMu.Lock()
|
||||
for _, ct := range a.activeEdges {
|
||||
a.closeConn(ct, "agent_stop")
|
||||
}
|
||||
a.activeEdges = make(map[string]*connTrack)
|
||||
a.edgesMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *TCPAgent) UpdateDiscoveredIPs(peerID peer.ID, ips []net.IP) {
|
||||
now := time.Now()
|
||||
add := make(map[string]ipStamp)
|
||||
for _, ip := range ips {
|
||||
if ip == nil {
|
||||
continue
|
||||
}
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
add[v4.String()] = ipStamp{seenAt: now, ttl: ttlDiscovered}
|
||||
}
|
||||
}
|
||||
if len(add) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
a.ipDBMu.Lock()
|
||||
a.ipDB[peerID] = mergeStamps(a.ipDB[peerID], add)
|
||||
a.ipDBMu.Unlock()
|
||||
|
||||
a.dialStatesMu.Lock()
|
||||
for ipStr := range add {
|
||||
key := peerID.String() + "|" + ipStr
|
||||
if _, ok := a.dialStates[key]; !ok {
|
||||
a.dialStates[key] = &dialState{backoff: 0, nextAttempt: time.Now()}
|
||||
}
|
||||
}
|
||||
a.dialStatesMu.Unlock()
|
||||
}
|
||||
|
||||
func (a *TCPAgent) OnInterfaceChange() {
|
||||
now := time.Now()
|
||||
a.ifaceGraceUntilMu.Lock()
|
||||
a.ifaceGraceUntil = now.Add(tbGraceWindow)
|
||||
a.ifaceGraceUntilMu.Unlock()
|
||||
|
||||
a.dialStatesMu.Lock()
|
||||
for _, ds := range a.dialStates {
|
||||
ds.backoff = 0
|
||||
ds.nextAttempt = now
|
||||
}
|
||||
a.dialStatesMu.Unlock()
|
||||
}
|
||||
|
||||
func (a *TCPAgent) acceptLoop() {
|
||||
for {
|
||||
conn, err := a.listener.AcceptTCP()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-a.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
log.Printf("TCP accept error: %v", err)
|
||||
continue
|
||||
}
|
||||
a.setTCPOptions(conn)
|
||||
go a.handleIncoming(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *TCPAgent) dialSchedulerLoop() {
|
||||
defer a.schedulerWG.Done()
|
||||
t := time.NewTicker(scheduleTick)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-a.stopScheduler:
|
||||
return
|
||||
case <-a.ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
a.expireIPs(false)
|
||||
|
||||
type want struct {
|
||||
pid peer.ID
|
||||
ip string
|
||||
}
|
||||
var wants []want
|
||||
|
||||
a.ipDBMu.RLock()
|
||||
for pid, set := range a.ipDB {
|
||||
if a.myPeerID.String() <= pid.String() {
|
||||
continue
|
||||
}
|
||||
for ipStr, stamp := range set {
|
||||
if time.Since(stamp.seenAt) > stamp.ttl {
|
||||
continue
|
||||
}
|
||||
if a.hasActiveToRemoteIP(ipStr) {
|
||||
continue
|
||||
}
|
||||
wants = append(wants, want{pid: pid, ip: ipStr})
|
||||
}
|
||||
}
|
||||
a.ipDBMu.RUnlock()
|
||||
|
||||
sort.Slice(wants, func(i, j int) bool {
|
||||
if wants[i].pid == wants[j].pid {
|
||||
return wants[i].ip < wants[j].ip
|
||||
}
|
||||
return wants[i].pid.String() < wants[j].pid.String()
|
||||
})
|
||||
|
||||
now := time.Now()
|
||||
for _, w := range wants {
|
||||
key := w.pid.String() + "|" + w.ip
|
||||
a.dialStatesMu.Lock()
|
||||
ds, ok := a.dialStates[key]
|
||||
if !ok {
|
||||
ds = &dialState{}
|
||||
a.dialStates[key] = ds
|
||||
}
|
||||
if ds.connecting || now.Before(ds.nextAttempt) {
|
||||
a.dialStatesMu.Unlock()
|
||||
continue
|
||||
}
|
||||
ds.connecting = true
|
||||
a.dialStatesMu.Unlock()
|
||||
|
||||
select {
|
||||
case a.dialSem <- struct{}{}:
|
||||
case <-a.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
go func(pid peer.ID, ip string) {
|
||||
defer func() {
|
||||
<-a.dialSem
|
||||
a.dialStatesMu.Lock()
|
||||
if ds := a.dialStates[pid.String()+"|"+ip]; ds != nil {
|
||||
ds.connecting = false
|
||||
}
|
||||
a.dialStatesMu.Unlock()
|
||||
}()
|
||||
a.dialAndMaintain(pid, ip)
|
||||
}(w.pid, w.ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *TCPAgent) dialAndMaintain(pid peer.ID, remoteIP string) {
|
||||
remoteAddr := fmt.Sprintf("%s:%d", remoteIP, AgentPort)
|
||||
d := net.Dialer{Timeout: dialTimeoutForIP(remoteIP)}
|
||||
rawConn, err := d.DialContext(a.ctx, "tcp", remoteAddr)
|
||||
if err != nil {
|
||||
a.bumpDialBackoff(pid, remoteIP, err)
|
||||
return
|
||||
}
|
||||
tc := rawConn.(*net.TCPConn)
|
||||
a.setTCPOptions(tc)
|
||||
|
||||
remoteNodeID, remotePeerID, observedIPv4s, err := a.performHandshake(tc, true)
|
||||
if err != nil {
|
||||
_ = tc.Close()
|
||||
a.bumpDialBackoff(pid, remoteIP, err)
|
||||
return
|
||||
}
|
||||
|
||||
finalPID := pid
|
||||
if remotePeerID != "" {
|
||||
if parsed, perr := peer.Decode(remotePeerID); perr == nil {
|
||||
finalPID = parsed
|
||||
}
|
||||
}
|
||||
|
||||
a.updateObservedIPv4s(finalPID, observedIPv4s)
|
||||
|
||||
localIP := tc.LocalAddr().(*net.TCPAddr).IP.String()
|
||||
ct := &connTrack{
|
||||
tc: tc,
|
||||
dialer: true,
|
||||
edge: Edge{
|
||||
LocalNodeID: a.nodeID,
|
||||
RemoteNodeID: remoteNodeID,
|
||||
LocalIP: localIP,
|
||||
RemoteIP: remoteIP,
|
||||
Proto: "tcp",
|
||||
},
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
if !a.registerConn(ct) {
|
||||
_ = tc.Close()
|
||||
a.bumpDialBackoff(finalPID, remoteIP, errors.New("duplicate edge"))
|
||||
return
|
||||
}
|
||||
|
||||
a.dialStatesMu.Lock()
|
||||
if ds := a.dialStates[finalPID.String()+"|"+remoteIP]; ds != nil {
|
||||
ds.backoff = 0
|
||||
ds.nextAttempt = time.Now().Add(HeartbeatInterval)
|
||||
}
|
||||
a.dialStatesMu.Unlock()
|
||||
|
||||
a.runHeartbeatLoops(ct)
|
||||
}
|
||||
|
||||
func (a *TCPAgent) handleIncoming(tc *net.TCPConn) {
|
||||
remoteNodeID, remotePeerID, observedIPv4s, err := a.performHandshake(tc, false)
|
||||
if err != nil {
|
||||
_ = tc.Close()
|
||||
return
|
||||
}
|
||||
if remotePeerID != "" {
|
||||
if pid, perr := peer.Decode(remotePeerID); perr == nil {
|
||||
a.updateObservedIPv4s(pid, observedIPv4s)
|
||||
}
|
||||
}
|
||||
|
||||
localIP := tc.LocalAddr().(*net.TCPAddr).IP.String()
|
||||
remoteIP := tc.RemoteAddr().(*net.TCPAddr).IP.String()
|
||||
|
||||
ct := &connTrack{
|
||||
tc: tc,
|
||||
dialer: false,
|
||||
edge: Edge{
|
||||
LocalNodeID: a.nodeID,
|
||||
RemoteNodeID: remoteNodeID,
|
||||
LocalIP: localIP,
|
||||
RemoteIP: remoteIP,
|
||||
Proto: "tcp",
|
||||
},
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
if !a.registerConn(ct) {
|
||||
_ = tc.Close()
|
||||
return
|
||||
}
|
||||
a.runHeartbeatLoops(ct)
|
||||
}
|
||||
|
||||
func (a *TCPAgent) setTCPOptions(tc *net.TCPConn) {
|
||||
_ = tc.SetNoDelay(true)
|
||||
_ = tc.SetKeepAlive(true)
|
||||
_ = tc.SetKeepAlivePeriod(5 * time.Second)
|
||||
}
|
||||
|
||||
func (a *TCPAgent) performHandshake(tc *net.TCPConn, isDialer bool) (remoteNodeID, remotePeerID string, observedIPv4s []string, err error) {
|
||||
_ = tc.SetDeadline(time.Now().Add(HandshakeTimeout))
|
||||
defer tc.SetDeadline(time.Time{})
|
||||
|
||||
self := HandshakeMessage{
|
||||
NodeID: a.nodeID,
|
||||
AgentVer: "2.2.0",
|
||||
PeerID: a.myPeerID.String(),
|
||||
IPv4s: currentLocalIPv4s(),
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
var remote HandshakeMessage
|
||||
|
||||
if isDialer {
|
||||
if err = json.NewEncoder(tc).Encode(&self); err != nil {
|
||||
return "", "", nil, fmt.Errorf("send handshake: %w", err)
|
||||
}
|
||||
if err = json.NewDecoder(tc).Decode(&remote); err != nil {
|
||||
return "", "", nil, fmt.Errorf("read handshake: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err = json.NewDecoder(tc).Decode(&remote); err != nil {
|
||||
return "", "", nil, fmt.Errorf("read handshake: %w", err)
|
||||
}
|
||||
if err = json.NewEncoder(tc).Encode(&self); err != nil {
|
||||
return "", "", nil, fmt.Errorf("send handshake: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if remote.NodeID == "" {
|
||||
return "", "", nil, errors.New("empty remote node id")
|
||||
}
|
||||
for _, ip := range remote.IPv4s {
|
||||
if ip != "" && strings.Count(ip, ":") == 0 {
|
||||
observedIPv4s = append(observedIPv4s, ip)
|
||||
}
|
||||
}
|
||||
return remote.NodeID, remote.PeerID, observedIPv4s, nil
|
||||
}
|
||||
|
||||
func (a *TCPAgent) registerConn(ct *connTrack) bool {
|
||||
key := ct.edge.Key()
|
||||
|
||||
a.edgesMu.Lock()
|
||||
if _, exists := a.activeEdges[key]; exists {
|
||||
a.edgesMu.Unlock()
|
||||
return false
|
||||
}
|
||||
a.activeEdges[key] = ct
|
||||
|
||||
a.activeByRemoteIPMu.Lock()
|
||||
a.activeByRemoteIP[ct.edge.RemoteIP] = true
|
||||
a.activeByRemoteIPMu.Unlock()
|
||||
a.edgesMu.Unlock()
|
||||
|
||||
WriteEdgeCreatedEvent(ct.edge.LocalNodeID, ct.edge.RemoteNodeID, ct.edge.LocalIP, ct.edge.RemoteIP, ct.edge.Proto)
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *TCPAgent) hasActiveToRemoteIP(remoteIP string) bool {
|
||||
a.activeByRemoteIPMu.RLock()
|
||||
ok := a.activeByRemoteIP[remoteIP]
|
||||
a.activeByRemoteIPMu.RUnlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
func (a *TCPAgent) recalcRemoteIPActive(remoteIP string) {
|
||||
a.edgesMu.RLock()
|
||||
active := false
|
||||
for _, ct := range a.activeEdges {
|
||||
if ct.edge.RemoteIP == remoteIP {
|
||||
active = true
|
||||
break
|
||||
}
|
||||
}
|
||||
a.edgesMu.RUnlock()
|
||||
|
||||
a.activeByRemoteIPMu.Lock()
|
||||
if active {
|
||||
a.activeByRemoteIP[remoteIP] = true
|
||||
} else {
|
||||
delete(a.activeByRemoteIP, remoteIP)
|
||||
}
|
||||
a.activeByRemoteIPMu.Unlock()
|
||||
}
|
||||
|
||||
func (a *TCPAgent) closeConn(ct *connTrack, _ string) {
|
||||
ct.closeMx.Do(func() {
|
||||
_ = ct.tc.Close()
|
||||
key := ct.edge.Key()
|
||||
|
||||
a.edgesMu.Lock()
|
||||
delete(a.activeEdges, key)
|
||||
a.edgesMu.Unlock()
|
||||
|
||||
a.recalcRemoteIPActive(ct.edge.RemoteIP)
|
||||
|
||||
WriteEdgeDeletedEvent(ct.edge.LocalNodeID, ct.edge.RemoteNodeID, ct.edge.LocalIP, ct.edge.RemoteIP, ct.edge.Proto)
|
||||
})
|
||||
}
|
||||
|
||||
func (a *TCPAgent) runHeartbeatLoops(ct *connTrack) {
|
||||
go func() {
|
||||
r := bufio.NewReader(ct.tc)
|
||||
for {
|
||||
_ = ct.tc.SetReadDeadline(time.Now().Add(HeartbeatReadGrace))
|
||||
if _, err := r.ReadByte(); err != nil {
|
||||
a.closeConn(ct, "read_error")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
t := time.NewTicker(HeartbeatInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
_ = ct.tc.SetWriteDeadline(time.Now().Add(HeartbeatWriteGrace))
|
||||
if _, err := ct.tc.Write([]byte{0x01}); err != nil {
|
||||
a.closeConn(ct, "write_error")
|
||||
return
|
||||
}
|
||||
case <-a.ctx.Done():
|
||||
a.closeConn(ct, "agent_ctx_done")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (a *TCPAgent) bumpDialBackoff(pid peer.ID, ip string, err error) {
|
||||
key := pid.String() + "|" + ip
|
||||
a.dialStatesMu.Lock()
|
||||
ds, ok := a.dialStates[key]
|
||||
if !ok {
|
||||
ds = &dialState{}
|
||||
a.dialStates[key] = ds
|
||||
}
|
||||
if ds.backoff == 0 {
|
||||
ds.backoff = initialBackoff
|
||||
} else {
|
||||
ds.backoff *= 2
|
||||
if ds.backoff > maxBackoff {
|
||||
ds.backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
ds.nextAttempt = time.Now().Add(ds.backoff)
|
||||
a.dialStatesMu.Unlock()
|
||||
|
||||
log.Printf("dial %s@%s failed: %v; next in %s", pid, ip, err, ds.backoff)
|
||||
}
|
||||
|
||||
func mergeStamps(dst map[string]ipStamp, src map[string]ipStamp) map[string]ipStamp {
|
||||
if dst == nil {
|
||||
dst = make(map[string]ipStamp, len(src))
|
||||
}
|
||||
for ip, s := range src {
|
||||
prev, ok := dst[ip]
|
||||
if !ok || s.seenAt.After(prev.seenAt) {
|
||||
dst[ip] = s
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (a *TCPAgent) updateObservedIPv4s(pid peer.ID, ipv4s []string) {
|
||||
if len(ipv4s) == 0 {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
add := make(map[string]ipStamp, len(ipv4s))
|
||||
for _, ip := range ipv4s {
|
||||
if ip != "" && strings.Count(ip, ":") == 0 {
|
||||
add[ip] = ipStamp{seenAt: now, ttl: ttlObserved}
|
||||
}
|
||||
}
|
||||
|
||||
a.ipDBMu.Lock()
|
||||
a.ipDB[pid] = mergeStamps(a.ipDB[pid], add)
|
||||
a.ipDBMu.Unlock()
|
||||
|
||||
a.dialStatesMu.Lock()
|
||||
for ip := range add {
|
||||
key := pid.String() + "|" + ip
|
||||
if _, ok := a.dialStates[key]; !ok {
|
||||
a.dialStates[key] = &dialState{backoff: 0, nextAttempt: time.Now()}
|
||||
}
|
||||
}
|
||||
a.dialStatesMu.Unlock()
|
||||
}
|
||||
|
||||
func (a *TCPAgent) expireIPs(_ bool) {
|
||||
a.ifaceGraceUntilMu.RLock()
|
||||
graceUntil := a.ifaceGraceUntil
|
||||
a.ifaceGraceUntilMu.RUnlock()
|
||||
if time.Now().Before(graceUntil) {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
a.ipDBMu.Lock()
|
||||
for pid, set := range a.ipDB {
|
||||
for ip, stamp := range set {
|
||||
if now.Sub(stamp.seenAt) > stamp.ttl {
|
||||
delete(set, ip)
|
||||
|
||||
a.dialStatesMu.Lock()
|
||||
delete(a.dialStates, pid.String()+"|"+ip)
|
||||
a.dialStatesMu.Unlock()
|
||||
|
||||
log.Printf("TCP agent: expired ip %s for %s", ip, pid)
|
||||
}
|
||||
}
|
||||
if len(set) == 0 {
|
||||
delete(a.ipDB, pid)
|
||||
}
|
||||
}
|
||||
a.ipDBMu.Unlock()
|
||||
}
|
||||
|
||||
func currentLocalIPv4s() []string {
|
||||
var out []string
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
for _, ifi := range ifaces {
|
||||
if ifi.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
addrs, _ := ifi.Addrs()
|
||||
for _, a := range addrs {
|
||||
if ipnet, ok := a.(*net.IPNet); ok && ipnet.IP != nil {
|
||||
if v4 := ipnet.IP.To4(); v4 != nil && !v4.IsLoopback() && !v4.IsUnspecified() {
|
||||
out = append(out, v4.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(out)
|
||||
return dedupeStrings(out)
|
||||
}
|
||||
|
||||
func dedupeStrings(xs []string) []string {
|
||||
if len(xs) < 2 {
|
||||
return xs
|
||||
}
|
||||
out := xs[:0]
|
||||
last := ""
|
||||
for _, s := range xs {
|
||||
if s == last {
|
||||
continue
|
||||
}
|
||||
out = append(out, s)
|
||||
last = s
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func dialTimeoutForIP(ip string) time.Duration {
|
||||
if strings.HasPrefix(ip, "169.254.") {
|
||||
return dialTimeoutLinkLocal
|
||||
}
|
||||
return dialTimeoutDefault
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
["9gG9JZ5YY1zLE5xVYA2L8DoCTxkYKfxrGi33stPqq1cb", "F4p3DefvhUk9fGfToJXteT7GL9JuF4qMbUCvUKeB7VPZ", "J7AAM7DiMfnvxNvA1AXUFfencsSfwp4Qi851Y7v9hP1M", "7BbDVE6oN35avU6xY7e75m3r3EjADNBTm2ZMZB83EsLf"]
|
||||
@@ -1,194 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import os
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from exo.shared.constants import (
|
||||
EXO_GLOBAL_EVENT_DB,
|
||||
EXO_WORKER_EVENT_DB,
|
||||
LIBP2P_GLOBAL_EVENTS_TOPIC,
|
||||
LIBP2P_LOCAL_EVENTS_TOPIC,
|
||||
)
|
||||
from exo.shared.types.common import NodeId
|
||||
|
||||
|
||||
class ForwarderRole(str, Enum):
|
||||
"""Role determines which forwarding pairs to use"""
|
||||
|
||||
MASTER = "master"
|
||||
REPLICA = "replica"
|
||||
|
||||
|
||||
class ForwarderSupervisor:
|
||||
"""
|
||||
Manages the forwarder subprocess for SQLite ↔ libp2p event forwarding.
|
||||
The forwarder is a single process that handles multiple forwarding pairs.
|
||||
|
||||
Master mode forwards:
|
||||
- sqlite:worker_events.db:events → libp2p:worker_events (share local worker events)
|
||||
- libp2p:worker_events → sqlite:global_events.db:events (collect network worker events)
|
||||
- sqlite:global_events.db:events → libp2p:global_events (broadcast merged global log)
|
||||
|
||||
Replica mode forwards:
|
||||
- sqlite:worker_events.db:events → libp2p:worker_events (share local worker events)
|
||||
- libp2p:global_events → sqlite:global_events.db:events (receive global log from master)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
node_id: NodeId,
|
||||
forwarder_binary_path: Path,
|
||||
health_check_interval: float = 5.0,
|
||||
):
|
||||
self.node_id = node_id
|
||||
self._binary_path = forwarder_binary_path
|
||||
self._health_check_interval = health_check_interval
|
||||
self._current_role: ForwarderRole | None = None
|
||||
self._process: asyncio.subprocess.Process | None = None
|
||||
self._health_check_task: asyncio.Task[None] | None = None
|
||||
|
||||
async def notify_role_change(self, new_role: ForwarderRole) -> None:
|
||||
"""
|
||||
Called by external systems (e.g., election handler) when role changes.
|
||||
This is the main public interface.
|
||||
"""
|
||||
if self._current_role == new_role:
|
||||
logger.debug(f"Role unchanged: {new_role}")
|
||||
return
|
||||
logger.info(f"Node changing from {self._current_role} to {new_role}")
|
||||
self._current_role = new_role
|
||||
await self._restart_with_role(new_role)
|
||||
|
||||
async def start_as_replica(self) -> None:
|
||||
"""Convenience method to start in replica mode"""
|
||||
await self.notify_role_change(ForwarderRole.REPLICA)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop forwarder and cleanup"""
|
||||
await self._stop_process()
|
||||
self._current_role = None
|
||||
|
||||
def _get_forwarding_pairs(self, role: ForwarderRole) -> str:
|
||||
"""
|
||||
Generate forwarding pairs based on role.
|
||||
Returns list of "source,sink" strings.
|
||||
"""
|
||||
pairs: list[str] = []
|
||||
|
||||
# Both master and replica forward local worker events to network
|
||||
pairs.append(
|
||||
f"sqlite:{EXO_WORKER_EVENT_DB}:events|libp2p:{LIBP2P_LOCAL_EVENTS_TOPIC}"
|
||||
)
|
||||
|
||||
if role == ForwarderRole.MASTER:
|
||||
# Master: collect worker events from network into global log
|
||||
pairs.append(
|
||||
f"libp2p:{LIBP2P_LOCAL_EVENTS_TOPIC}|sqlite:{EXO_GLOBAL_EVENT_DB}:events"
|
||||
)
|
||||
# Master: broadcast global events to network
|
||||
pairs.append(
|
||||
f"sqlite:{EXO_GLOBAL_EVENT_DB}:events|libp2p:{LIBP2P_GLOBAL_EVENTS_TOPIC}"
|
||||
)
|
||||
else: # REPLICA
|
||||
# Replica: receive global events from master
|
||||
pairs.append(
|
||||
f"libp2p:{LIBP2P_GLOBAL_EVENTS_TOPIC}|sqlite:{EXO_GLOBAL_EVENT_DB}:events"
|
||||
)
|
||||
|
||||
return ",".join(pairs)
|
||||
|
||||
async def _restart_with_role(self, role: ForwarderRole) -> None:
|
||||
"""Internal method to restart forwarder with new role"""
|
||||
await self._stop_process()
|
||||
|
||||
pairs: str = self._get_forwarding_pairs(role)
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["FORWARDER_NODE_ID"] = str(self.node_id)
|
||||
self._process = await asyncio.create_subprocess_exec(
|
||||
str(self._binary_path),
|
||||
"--events-db",
|
||||
str(EXO_WORKER_EVENT_DB),
|
||||
# pair arguments
|
||||
f"{pairs}",
|
||||
stdout=None,
|
||||
stderr=None,
|
||||
env=env_vars,
|
||||
)
|
||||
logger.info(f"Starting forwarder with forwarding pairs: {pairs}")
|
||||
|
||||
# Start health monitoring
|
||||
self._health_check_task = asyncio.create_task(self._monitor_health())
|
||||
|
||||
async def _stop_process(self) -> None:
|
||||
"""Stop the forwarder process gracefully"""
|
||||
if self._health_check_task:
|
||||
self._health_check_task.cancel()
|
||||
with contextlib.suppress(asyncio.CancelledError):
|
||||
await self._health_check_task
|
||||
self._health_check_task = None
|
||||
|
||||
if self._process:
|
||||
# Check if process is already dead
|
||||
if self._process.returncode is None:
|
||||
# Process is still alive, terminate it
|
||||
try:
|
||||
self._process.terminate()
|
||||
await asyncio.wait_for(self._process.wait(), timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
logger.bind(user_facing=True).warning(
|
||||
"Forwarder didn't terminate, killing"
|
||||
)
|
||||
self._process.kill()
|
||||
await self._process.wait()
|
||||
except ProcessLookupError:
|
||||
# Process already dead
|
||||
pass
|
||||
self._process = None
|
||||
|
||||
async def _monitor_health(self) -> None:
|
||||
"""Monitor process health and restart if it crashes"""
|
||||
while self._process and self._current_role:
|
||||
try:
|
||||
# Check if process is still alive
|
||||
retcode = await asyncio.wait_for(
|
||||
self._process.wait(), timeout=self._health_check_interval
|
||||
)
|
||||
# Process exited
|
||||
logger.bind(user_facing=True).error(
|
||||
f"Forwarder died with code {retcode}"
|
||||
)
|
||||
|
||||
# Auto-restart
|
||||
await asyncio.sleep(0.2) # Brief delay before restart
|
||||
if self._current_role: # Still have a role
|
||||
await self._restart_with_role(self._current_role)
|
||||
break
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Process still running, continue monitoring
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
"""Check if forwarder process is running"""
|
||||
return self._process is not None and self._process.returncode is None
|
||||
|
||||
@property
|
||||
def current_role(self) -> ForwarderRole | None:
|
||||
"""Get current forwarder role (for testing)"""
|
||||
return self._current_role
|
||||
|
||||
@property
|
||||
def process_pid(self) -> int | None:
|
||||
"""Get current process PID (for testing)"""
|
||||
return self._process.pid if self._process else None
|
||||
|
||||
@property
|
||||
def process(self) -> asyncio.subprocess.Process | None:
|
||||
"""Get current process (for testing)"""
|
||||
return self._process
|
||||
@@ -1,397 +0,0 @@
|
||||
"""
|
||||
Comprehensive unit tests for ForwarderSupervisor.
|
||||
Tests basic functionality, process management, and edge cases.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import AsyncGenerator, Callable, Generator
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from exo.master.election_callback import ElectionCallbacks
|
||||
from exo.master.forwarder_supervisor import (
|
||||
ForwarderRole,
|
||||
ForwarderSupervisor,
|
||||
)
|
||||
from exo.shared.constants import (
|
||||
EXO_GLOBAL_EVENT_DB,
|
||||
EXO_WORKER_EVENT_DB,
|
||||
LIBP2P_GLOBAL_EVENTS_TOPIC,
|
||||
LIBP2P_LOCAL_EVENTS_TOPIC,
|
||||
)
|
||||
from exo.shared.types.common import NodeId
|
||||
|
||||
# Mock forwarder script content
|
||||
MOCK_FORWARDER_SCRIPT = '''#!/usr/bin/env python3
|
||||
"""Mock forwarder for testing."""
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def log(message: str) -> None:
|
||||
"""Write to both stdout and a log file for test verification"""
|
||||
print(message, flush=True)
|
||||
|
||||
# Also write to a file for test verification
|
||||
log_file = os.environ.get("MOCK_LOG_FILE")
|
||||
if log_file:
|
||||
with open(log_file, "a") as f:
|
||||
f.write(f"{time.time()}: {message}\\n")
|
||||
|
||||
|
||||
def handle_signal(signum: int, frame: object) -> None:
|
||||
"""Handle termination signals gracefully"""
|
||||
log(f"Received signal {signum}")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
# Register signal handlers
|
||||
signal.signal(signal.SIGTERM, handle_signal)
|
||||
signal.signal(signal.SIGINT, handle_signal)
|
||||
|
||||
# Log startup with arguments
|
||||
args = sys.argv[1:] if len(sys.argv) > 1 else []
|
||||
log(f"Mock forwarder started with args: {args}")
|
||||
|
||||
# Write PID file if requested (for testing process management)
|
||||
pid_file = os.environ.get("MOCK_PID_FILE")
|
||||
if pid_file:
|
||||
Path(pid_file).write_text(str(os.getpid()))
|
||||
|
||||
# Check for test control environment variables
|
||||
exit_after = os.environ.get("MOCK_EXIT_AFTER")
|
||||
exit_code = int(os.environ.get("MOCK_EXIT_CODE", "0"))
|
||||
hang_mode = os.environ.get("MOCK_HANG_MODE", "false").lower() == "true"
|
||||
ignore_signals = os.environ.get("MOCK_IGNORE_SIGNALS", "false").lower() == "true"
|
||||
|
||||
if ignore_signals:
|
||||
# Ignore SIGTERM for testing force kill scenarios
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
log("Ignoring SIGTERM signal")
|
||||
|
||||
# Simulate work
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if exit_after and (time.time() - start_time) >= float(exit_after):
|
||||
log(f"Exiting after {exit_after} seconds with code {exit_code}")
|
||||
sys.exit(exit_code)
|
||||
|
||||
if hang_mode:
|
||||
# Simulate a hanging process (no CPU usage but not responding)
|
||||
time.sleep(3600) # Sleep for an hour
|
||||
else:
|
||||
# Normal operation - small sleep to not consume CPU
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
'''
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir() -> Generator[Path, None, None]:
|
||||
"""Create a temporary directory and clean it up after test."""
|
||||
temp_path = Path(tempfile.mkdtemp(prefix="exo_test_"))
|
||||
yield temp_path
|
||||
# Clean up
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(temp_path, ignore_errors=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_forwarder_script(temp_dir: Path) -> Path:
|
||||
"""Create the mock forwarder executable."""
|
||||
mock_script = temp_dir / "mock_forwarder.py"
|
||||
mock_script.write_text(MOCK_FORWARDER_SCRIPT)
|
||||
mock_script.chmod(0o755)
|
||||
return mock_script
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_logger() -> logging.Logger:
|
||||
"""Create a test logger."""
|
||||
logger = logging.getLogger("test_forwarder")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Add console handler for debugging
|
||||
if not logger.handlers:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.DEBUG)
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_env_vars(temp_dir: Path) -> dict[str, str]:
|
||||
"""Environment variables for controlling mock forwarder behavior."""
|
||||
return {
|
||||
"MOCK_LOG_FILE": str(temp_dir / "mock_forwarder.log"),
|
||||
"MOCK_PID_FILE": str(temp_dir / "mock_forwarder.pid"),
|
||||
}
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def cleanup_processes() -> AsyncGenerator[set[int], None]:
|
||||
"""Track and cleanup any processes created during tests."""
|
||||
tracked_pids: set[int] = set()
|
||||
|
||||
yield tracked_pids
|
||||
|
||||
# Cleanup any remaining processes - simplified to avoid psutil dependency
|
||||
import contextlib
|
||||
import subprocess
|
||||
|
||||
for pid in tracked_pids:
|
||||
with contextlib.suppress(Exception):
|
||||
subprocess.run(["kill", str(pid)], check=False, timeout=1)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def track_subprocess(
|
||||
cleanup_processes: set[int],
|
||||
) -> Callable[[asyncio.subprocess.Process], asyncio.subprocess.Process]:
|
||||
"""Function to track subprocess PIDs for cleanup."""
|
||||
|
||||
def track(process: asyncio.subprocess.Process) -> asyncio.subprocess.Process:
|
||||
if process.pid:
|
||||
cleanup_processes.add(process.pid)
|
||||
return process
|
||||
|
||||
return track
|
||||
|
||||
|
||||
class TestForwardersupervisorBasic:
|
||||
"""Basic functionality tests for Forwardersupervisor."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_as_replica(
|
||||
self,
|
||||
mock_forwarder_script: Path,
|
||||
mock_env_vars: dict[str, str],
|
||||
test_logger: logging.Logger,
|
||||
track_subprocess: Callable[
|
||||
[asyncio.subprocess.Process], asyncio.subprocess.Process
|
||||
],
|
||||
) -> None:
|
||||
"""Test starting forwarder in replica mode."""
|
||||
# Set environment
|
||||
os.environ.update(mock_env_vars)
|
||||
|
||||
supervisor = ForwarderSupervisor(NodeId(), mock_forwarder_script)
|
||||
await supervisor.start_as_replica()
|
||||
|
||||
# Track the process for cleanup
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
try:
|
||||
# Verify process is running
|
||||
assert supervisor.is_running
|
||||
assert supervisor.current_role == ForwarderRole.REPLICA
|
||||
|
||||
# Wait a bit for log file to be written
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Verify forwarding pairs in log
|
||||
log_content = Path(mock_env_vars["MOCK_LOG_FILE"]).read_text()
|
||||
|
||||
# Expected replica forwarding pairs
|
||||
expected_pairs = [
|
||||
f"sqlite:{EXO_WORKER_EVENT_DB}:events|libp2p:{LIBP2P_LOCAL_EVENTS_TOPIC}",
|
||||
f"libp2p:{LIBP2P_GLOBAL_EVENTS_TOPIC}|sqlite:{EXO_GLOBAL_EVENT_DB}:events",
|
||||
]
|
||||
|
||||
# Check that the forwarder received the correct arguments
|
||||
assert all(pair in log_content for pair in expected_pairs)
|
||||
|
||||
finally:
|
||||
await supervisor.stop()
|
||||
assert not supervisor.is_running
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_role_change_replica_to_master(
|
||||
self,
|
||||
mock_forwarder_script: Path,
|
||||
mock_env_vars: dict[str, str],
|
||||
test_logger: logging.Logger,
|
||||
track_subprocess: Callable[
|
||||
[asyncio.subprocess.Process], asyncio.subprocess.Process
|
||||
],
|
||||
) -> None:
|
||||
"""Test changing role from replica to master."""
|
||||
os.environ.update(mock_env_vars)
|
||||
|
||||
supervisor = ForwarderSupervisor(NodeId(), mock_forwarder_script)
|
||||
await supervisor.start_as_replica()
|
||||
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
try:
|
||||
# Change to master
|
||||
await supervisor.notify_role_change(ForwarderRole.MASTER)
|
||||
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
# Wait for restart
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
assert supervisor.is_running
|
||||
assert supervisor.current_role == ForwarderRole.MASTER
|
||||
|
||||
# Verify new forwarding pairs
|
||||
log_content = Path(mock_env_vars["MOCK_LOG_FILE"]).read_text()
|
||||
|
||||
# Expected master forwarding pairs
|
||||
master_pairs = [
|
||||
f"libp2p:{LIBP2P_LOCAL_EVENTS_TOPIC}|sqlite:{EXO_GLOBAL_EVENT_DB}:events",
|
||||
f"sqlite:{EXO_GLOBAL_EVENT_DB}:events|libp2p:{LIBP2P_GLOBAL_EVENTS_TOPIC}",
|
||||
]
|
||||
|
||||
assert all(pair in log_content for pair in master_pairs)
|
||||
|
||||
finally:
|
||||
await supervisor.stop()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_idempotent_role_change(
|
||||
self,
|
||||
mock_forwarder_script: Path,
|
||||
mock_env_vars: dict[str, str],
|
||||
test_logger: logging.Logger,
|
||||
track_subprocess: Callable[
|
||||
[asyncio.subprocess.Process], asyncio.subprocess.Process
|
||||
],
|
||||
) -> None:
|
||||
"""Test that setting the same role twice doesn't restart the process."""
|
||||
os.environ.update(mock_env_vars)
|
||||
|
||||
supervisor = ForwarderSupervisor(NodeId(), mock_forwarder_script)
|
||||
await supervisor.start_as_replica()
|
||||
|
||||
original_pid = supervisor.process_pid
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
try:
|
||||
# Try to change to the same role
|
||||
await supervisor.notify_role_change(ForwarderRole.REPLICA)
|
||||
|
||||
# Should not restart (same PID)
|
||||
assert supervisor.process_pid == original_pid
|
||||
|
||||
finally:
|
||||
await supervisor.stop()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_crash_and_restart(
|
||||
self,
|
||||
mock_forwarder_script: Path,
|
||||
mock_env_vars: dict[str, str],
|
||||
test_logger: logging.Logger,
|
||||
track_subprocess: Callable[
|
||||
[asyncio.subprocess.Process], asyncio.subprocess.Process
|
||||
],
|
||||
) -> None:
|
||||
"""Test that Forwardersupervisor restarts the process if it crashes."""
|
||||
# Configure mock to exit after 1 second
|
||||
mock_env_vars["MOCK_EXIT_AFTER"] = "1"
|
||||
mock_env_vars["MOCK_EXIT_CODE"] = "1"
|
||||
os.environ.update(mock_env_vars)
|
||||
|
||||
supervisor = ForwarderSupervisor(
|
||||
NodeId(),
|
||||
mock_forwarder_script,
|
||||
health_check_interval=0.5, # Faster health checks for testing
|
||||
)
|
||||
await supervisor.start_as_replica()
|
||||
|
||||
original_pid = supervisor.process_pid
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
try:
|
||||
# Wait for first crash
|
||||
await asyncio.sleep(1.5)
|
||||
|
||||
# Process should have crashed
|
||||
assert not supervisor.is_running or supervisor.process_pid != original_pid
|
||||
|
||||
# Clear the crash-inducing environment variables so restart works
|
||||
if "MOCK_EXIT_AFTER" in os.environ:
|
||||
del os.environ["MOCK_EXIT_AFTER"]
|
||||
if "MOCK_EXIT_CODE" in os.environ:
|
||||
del os.environ["MOCK_EXIT_CODE"]
|
||||
|
||||
# Wait for restart
|
||||
await asyncio.sleep(1.0)
|
||||
|
||||
# Process should have restarted with new PID
|
||||
assert supervisor.is_running
|
||||
assert supervisor.process_pid != original_pid
|
||||
|
||||
# Track new process
|
||||
if supervisor.process:
|
||||
track_subprocess(supervisor.process)
|
||||
|
||||
finally:
|
||||
await supervisor.stop()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_nonexistent_binary(
|
||||
self, test_logger: logging.Logger, temp_dir: Path
|
||||
) -> None:
|
||||
"""Test behavior when forwarder binary doesn't exist."""
|
||||
nonexistent_path = temp_dir / "nonexistent_forwarder"
|
||||
|
||||
supervisor = ForwarderSupervisor(NodeId(), nonexistent_path)
|
||||
|
||||
# Should raise FileNotFoundError
|
||||
with pytest.raises(FileNotFoundError):
|
||||
await supervisor.start_as_replica()
|
||||
|
||||
|
||||
class TestElectionCallbacks:
|
||||
"""Test suite for ElectionCallbacks."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_on_became_master(self, test_logger: logging.Logger) -> None:
|
||||
"""Test callback when becoming master."""
|
||||
mock_supervisor = MagicMock(spec=ForwarderSupervisor)
|
||||
mock_supervisor.notify_role_change = AsyncMock()
|
||||
|
||||
callbacks = ElectionCallbacks(mock_supervisor)
|
||||
await callbacks.on_became_master()
|
||||
|
||||
mock_supervisor.notify_role_change.assert_called_once_with(ForwarderRole.MASTER) # type: ignore
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_on_became_replica(self, test_logger: logging.Logger) -> None:
|
||||
"""Test callback when becoming replica."""
|
||||
mock_supervisor = MagicMock(spec=ForwarderSupervisor)
|
||||
mock_supervisor.notify_role_change = AsyncMock()
|
||||
|
||||
callbacks = ElectionCallbacks(mock_supervisor)
|
||||
await callbacks.on_became_replica()
|
||||
|
||||
mock_supervisor.notify_role_change.assert_called_once_with( # type: ignore
|
||||
ForwarderRole.REPLICA
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from exo.shared.constants import EXO_GLOBAL_EVENT_DB
|
||||
|
||||
|
||||
class EventLogConfig(BaseModel):
|
||||
"""Configuration for the event log system"""
|
||||
|
||||
# Batch processing settings
|
||||
batch_size: int = 100
|
||||
batch_timeout_ms: int = 100
|
||||
debounce_ms: int = 10
|
||||
max_age_ms: int = 100
|
||||
|
||||
def get_db_path(self) -> Path:
|
||||
"""Get the full path for a specific event log type"""
|
||||
return EXO_GLOBAL_EVENT_DB
|
||||
@@ -1,418 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import json
|
||||
import random
|
||||
from asyncio import Queue, Task
|
||||
from collections.abc import Sequence
|
||||
from pathlib import Path
|
||||
from typing import Any, cast
|
||||
|
||||
from loguru import logger
|
||||
from pydantic import TypeAdapter
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.exc import OperationalError
|
||||
from sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, create_async_engine
|
||||
|
||||
from exo.shared.types.common import NodeId
|
||||
from exo.shared.types.events import Event, IndexedEvent, event_tag
|
||||
|
||||
from .types import StoredEvent
|
||||
|
||||
|
||||
class AsyncSQLiteEventStorage:
|
||||
"""High-performance SQLite event storage with async batching.
|
||||
|
||||
Features:
|
||||
- Non-blocking writes via adaptive async batching with debouncing
|
||||
- Automatic sequence numbering using SQLite rowid
|
||||
- Type-safe event serialization/deserialization
|
||||
- Efficient indexing for common query patterns
|
||||
|
||||
Batching behavior:
|
||||
- Low load: Minimal latency via short debounce windows
|
||||
- High load: Efficient batching up to batch_size limit
|
||||
- Max age constraint prevents indefinite delays
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db_path: str | Path,
|
||||
batch_size: int,
|
||||
batch_timeout_ms: int,
|
||||
debounce_ms: int,
|
||||
max_age_ms: int,
|
||||
):
|
||||
self._db_path = Path(db_path)
|
||||
self._batch_size = batch_size
|
||||
self._batch_timeout_s = batch_timeout_ms / 1000.0
|
||||
self._debounce_s = debounce_ms / 1000.0
|
||||
self._max_age_s = max_age_ms / 1000.0
|
||||
self._write_queue: Queue[tuple[Event, NodeId]] = Queue()
|
||||
self._batch_writer_task: Task[None] | None = None
|
||||
self._engine = None
|
||||
self._closed = False
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Initialize the storage and start the batch writer."""
|
||||
if self._batch_writer_task is not None:
|
||||
raise RuntimeError("Storage already started")
|
||||
|
||||
# Create database and tables
|
||||
await self._initialize_database()
|
||||
|
||||
# Start batch writer
|
||||
self._batch_writer_task = asyncio.create_task(self._batch_writer())
|
||||
logger.info(f"Started SQLite event storage: {self._db_path}")
|
||||
|
||||
async def append_events(self, events: Sequence[Event], origin: NodeId) -> None:
|
||||
"""Append events to the log (fire-and-forget). The writes are batched and committed
|
||||
in the background so readers don't have a guarantee of seeing events immediately."""
|
||||
if self._closed:
|
||||
raise RuntimeError("Storage is closed")
|
||||
|
||||
for event in events:
|
||||
await self._write_queue.put((event, origin))
|
||||
|
||||
async def get_events_since(self, last_idx: int) -> Sequence[IndexedEvent]:
|
||||
"""Retrieve events after a specific index."""
|
||||
if self._closed:
|
||||
raise RuntimeError("Storage is closed")
|
||||
|
||||
assert self._engine is not None
|
||||
|
||||
async with AsyncSession(self._engine) as session:
|
||||
# Use raw SQL to get rowid along with the stored event data
|
||||
result = await session.execute(
|
||||
text(
|
||||
"SELECT rowid, origin, event_data FROM events WHERE rowid > :last_idx ORDER BY rowid"
|
||||
),
|
||||
{"last_idx": last_idx},
|
||||
)
|
||||
rows = result.fetchall()
|
||||
|
||||
events: list[IndexedEvent] = []
|
||||
for row in rows:
|
||||
rowid: int = cast(int, row[0])
|
||||
# origin: str = cast(str, row[1])
|
||||
# Parse JSON string to dict
|
||||
raw_event_data = row[2] # type: ignore[reportAny] - SQLAlchemy result is Any
|
||||
if isinstance(raw_event_data, str):
|
||||
event_data: dict[str, Any] = cast(
|
||||
dict[str, Any], json.loads(raw_event_data)
|
||||
)
|
||||
else:
|
||||
event_data = cast(dict[str, Any], raw_event_data)
|
||||
event: Event = TypeAdapter(Event).validate_python(event_data) # type: ignore
|
||||
events.append(
|
||||
IndexedEvent(
|
||||
event=event, # type: ignore
|
||||
# origin=NodeId(origin),
|
||||
idx=rowid, # rowid becomes idx_in_log
|
||||
)
|
||||
)
|
||||
|
||||
return events
|
||||
|
||||
async def get_last_idx(self) -> int:
|
||||
if self._closed:
|
||||
raise RuntimeError("Storaged is closed")
|
||||
|
||||
assert self._engine is not None
|
||||
|
||||
async with AsyncSession(self._engine) as session:
|
||||
result = await session.execute(
|
||||
text(
|
||||
"SELECT rowid, origin, event_data FROM events ORDER BY rowid DESC LIMIT 1"
|
||||
),
|
||||
{},
|
||||
)
|
||||
rows = result.fetchall()
|
||||
|
||||
if len(rows) == 0:
|
||||
return 0
|
||||
if len(rows) == 1:
|
||||
row = rows[0]
|
||||
return cast(int, row[0])
|
||||
else:
|
||||
raise AssertionError(
|
||||
"There should have been at most 1 row returned from this SQL query."
|
||||
)
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the storage connection and cleanup resources."""
|
||||
if self._closed:
|
||||
return
|
||||
|
||||
self._closed = True
|
||||
|
||||
# Stop batch writer
|
||||
if self._batch_writer_task is not None:
|
||||
self._batch_writer_task.cancel()
|
||||
with contextlib.suppress(asyncio.CancelledError):
|
||||
await self._batch_writer_task
|
||||
|
||||
# Close database
|
||||
if self._engine is not None:
|
||||
await self._engine.dispose()
|
||||
|
||||
logger.info("Closed SQLite event storage")
|
||||
|
||||
async def delete_all_events(self) -> None:
|
||||
"""Delete all events from the database."""
|
||||
assert self._engine is not None
|
||||
async with AsyncSession(self._engine) as session:
|
||||
await session.execute(text("DELETE FROM events"))
|
||||
await session.commit()
|
||||
|
||||
async def _initialize_database(self) -> None:
|
||||
"""Initialize database connection and create tables."""
|
||||
self._engine = create_async_engine(
|
||||
f"sqlite+aiosqlite:///{self._db_path}",
|
||||
echo=False,
|
||||
connect_args={
|
||||
"check_same_thread": False,
|
||||
"timeout": 30.0, # Connection timeout in seconds
|
||||
},
|
||||
pool_pre_ping=True, # Test connections before using them
|
||||
pool_size=5,
|
||||
max_overflow=10,
|
||||
)
|
||||
|
||||
# Create tables with proper race condition handling
|
||||
async with self._engine.begin() as conn:
|
||||
# First check if the table exists using SQLite's master table
|
||||
result = await conn.execute(
|
||||
text(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='events'"
|
||||
)
|
||||
)
|
||||
table_exists = result.fetchone() is not None
|
||||
|
||||
if not table_exists:
|
||||
try:
|
||||
# Use CREATE TABLE IF NOT EXISTS as a more atomic operation
|
||||
# This avoids race conditions between check and create
|
||||
await conn.execute(
|
||||
text("""
|
||||
CREATE TABLE IF NOT EXISTS events (
|
||||
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
origin TEXT NOT NULL,
|
||||
event_type TEXT NOT NULL,
|
||||
event_id TEXT NOT NULL,
|
||||
event_data TEXT NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
""")
|
||||
)
|
||||
|
||||
# Create indexes if they don't exist
|
||||
await conn.execute(
|
||||
text(
|
||||
"CREATE INDEX IF NOT EXISTS idx_events_origin ON events(origin)"
|
||||
)
|
||||
)
|
||||
await conn.execute(
|
||||
text(
|
||||
"CREATE INDEX IF NOT EXISTS idx_events_event_type ON events(event_type)"
|
||||
)
|
||||
)
|
||||
await conn.execute(
|
||||
text(
|
||||
"CREATE INDEX IF NOT EXISTS idx_events_event_id ON events(event_id)"
|
||||
)
|
||||
)
|
||||
await conn.execute(
|
||||
text(
|
||||
"CREATE INDEX IF NOT EXISTS idx_events_created_at ON events(created_at)"
|
||||
)
|
||||
)
|
||||
await conn.execute(
|
||||
text(
|
||||
"CREATE INDEX IF NOT EXISTS idx_events_origin_created ON events(origin, created_at)"
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Events table and indexes created successfully")
|
||||
except OperationalError as e:
|
||||
# Even with IF NOT EXISTS, log any unexpected errors
|
||||
logger.error(f"Error creating table: {e}")
|
||||
# Re-check if table exists now
|
||||
result = await conn.execute(
|
||||
text(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='events'"
|
||||
)
|
||||
)
|
||||
if result.fetchone() is None:
|
||||
raise RuntimeError(f"Failed to create events table: {e}") from e
|
||||
else:
|
||||
logger.info(
|
||||
"Events table exists (likely created by another process)"
|
||||
)
|
||||
else:
|
||||
logger.debug("Events table already exists")
|
||||
|
||||
# Enable WAL mode and other optimizations with retry logic
|
||||
await self._execute_pragma_with_retry(
|
||||
conn,
|
||||
[
|
||||
"PRAGMA journal_mode=WAL",
|
||||
"PRAGMA synchronous=NORMAL",
|
||||
"PRAGMA cache_size=10000",
|
||||
"PRAGMA busy_timeout=30000", # 30 seconds busy timeout
|
||||
],
|
||||
)
|
||||
|
||||
async def _batch_writer(self) -> None:
|
||||
"""Background task that drains the queue and commits batches.
|
||||
|
||||
Uses adaptive batching with debouncing:
|
||||
- Blocks waiting for first item (no CPU waste when idle)
|
||||
- Opens debounce window to collect more items
|
||||
- Respects max age to prevent stale batches
|
||||
- Resets debounce timer with each new item
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
while not self._closed:
|
||||
batch: list[tuple[Event, NodeId]] = []
|
||||
|
||||
try:
|
||||
# Block waiting for first item
|
||||
event, origin = await self._write_queue.get()
|
||||
batch.append((event, origin))
|
||||
first_ts = loop.time() # monotonic seconds
|
||||
|
||||
# Open debounce window
|
||||
while True:
|
||||
# How much longer can we wait?
|
||||
age_left = self._max_age_s - (loop.time() - first_ts)
|
||||
if age_left <= 0:
|
||||
break # max age reached → flush
|
||||
|
||||
# Shrink the wait to honour both debounce and max-age
|
||||
try:
|
||||
event, origin = await asyncio.wait_for(
|
||||
self._write_queue.get(),
|
||||
timeout=min(self._debounce_s, age_left),
|
||||
)
|
||||
batch.append((event, origin))
|
||||
|
||||
if len(batch) >= self._batch_size:
|
||||
break # size cap reached → flush
|
||||
# else: loop again, resetting debounce timer
|
||||
except asyncio.TimeoutError:
|
||||
break # debounce window closed → flush
|
||||
|
||||
except asyncio.CancelledError:
|
||||
# Drain any remaining items before exiting
|
||||
if batch:
|
||||
await self._commit_batch(batch)
|
||||
raise
|
||||
|
||||
if batch:
|
||||
await self._commit_batch(batch)
|
||||
|
||||
async def _commit_batch(self, batch: list[tuple[Event, NodeId]]) -> None:
|
||||
"""Commit a batch of events to SQLite."""
|
||||
assert self._engine is not None
|
||||
|
||||
try:
|
||||
async with AsyncSession(self._engine) as session:
|
||||
for event, origin in batch:
|
||||
stored_event = StoredEvent(
|
||||
origin=origin,
|
||||
event_type=event_tag(event),
|
||||
event_id=str(event.event_id),
|
||||
event_data=event.model_dump(
|
||||
mode="json"
|
||||
), # Serialize UUIDs and other objects to JSON-compatible strings
|
||||
)
|
||||
session.add(stored_event)
|
||||
|
||||
await session.commit()
|
||||
logger.debug(f"Committed batch of {len(batch)} events")
|
||||
|
||||
except OperationalError as e:
|
||||
if "database is locked" in str(e):
|
||||
logger.warning(f"Database locked during batch commit, will retry: {e}")
|
||||
# Retry with exponential backoff
|
||||
await self._commit_batch_with_retry(batch)
|
||||
else:
|
||||
logger.error(f"Failed to commit batch: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to commit batch: {e}")
|
||||
raise
|
||||
|
||||
async def _execute_pragma_with_retry(
|
||||
self, conn: AsyncConnection, pragmas: list[str], max_retries: int = 5
|
||||
) -> None:
|
||||
"""Execute PRAGMA statements with retry logic for database lock errors."""
|
||||
for pragma in pragmas:
|
||||
retry_count = 0
|
||||
base_delay: float = 0.1 # 100ms
|
||||
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
await conn.execute(text(pragma))
|
||||
break
|
||||
except OperationalError as e:
|
||||
if "database is locked" in str(e) and retry_count < max_retries - 1:
|
||||
delay = cast(
|
||||
float,
|
||||
base_delay * (2**retry_count) + random.uniform(0, 0.1),
|
||||
)
|
||||
logger.warning(
|
||||
f"Database locked on '{pragma}', retry {retry_count + 1}/{max_retries} after {delay:.2f}s"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
retry_count += 1
|
||||
else:
|
||||
logger.error(
|
||||
f"Failed to execute '{pragma}' after {retry_count + 1} attempts: {e}"
|
||||
)
|
||||
raise
|
||||
|
||||
async def _commit_batch_with_retry(
|
||||
self, batch: list[tuple[Event, NodeId]], max_retries: int = 5
|
||||
) -> None:
|
||||
"""Commit a batch with retry logic for database lock errors."""
|
||||
retry_count = 0
|
||||
base_delay: float = 0.1 # 100ms
|
||||
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
assert self._engine is not None
|
||||
|
||||
async with AsyncSession(self._engine) as session:
|
||||
for event, origin in batch:
|
||||
stored_event = StoredEvent(
|
||||
origin=origin,
|
||||
event_type=event_tag(event),
|
||||
event_id=str(event.event_id),
|
||||
event_data=event.model_dump(mode="json"),
|
||||
)
|
||||
session.add(stored_event)
|
||||
|
||||
await session.commit()
|
||||
|
||||
logger.debug(
|
||||
f"Committed batch of {len(batch)} events after {retry_count} retries"
|
||||
)
|
||||
return
|
||||
|
||||
except OperationalError as e:
|
||||
if "database is locked" in str(e) and retry_count < max_retries - 1:
|
||||
delay = cast(
|
||||
float, base_delay * (2**retry_count) + random.uniform(0, 0.1)
|
||||
)
|
||||
logger.warning(
|
||||
f"Database locked on batch commit, retry {retry_count + 1}/{max_retries} after {delay:.2f}s"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
retry_count += 1
|
||||
else:
|
||||
logger.error(
|
||||
f"Failed to commit batch after {retry_count + 1} attempts: {e}"
|
||||
)
|
||||
raise
|
||||
@@ -1,110 +0,0 @@
|
||||
import asyncio
|
||||
from typing import cast
|
||||
|
||||
from loguru import logger
|
||||
from sqlalchemy.exc import OperationalError
|
||||
|
||||
from exo.shared.constants import EXO_HOME
|
||||
from exo.shared.db.config import EventLogConfig
|
||||
from exo.shared.db.connector import AsyncSQLiteEventStorage
|
||||
from exo.utils.fs import ensure_directory_exists
|
||||
|
||||
|
||||
class EventLogManager:
|
||||
"""
|
||||
Manages both worker and global event log connectors.
|
||||
Used by both master and worker processes with different access patterns:
|
||||
|
||||
- Worker: writes to worker_events, tails global_events
|
||||
- Master (elected): writes to global_events, tails global_events
|
||||
- Master (replica): writes to worker_events, tails global_events
|
||||
"""
|
||||
|
||||
def __init__(self, config: EventLogConfig):
|
||||
self._config = config
|
||||
self._connector: AsyncSQLiteEventStorage | None = None
|
||||
|
||||
# Ensure base directory exists
|
||||
ensure_directory_exists(EXO_HOME)
|
||||
|
||||
# TODO: This seems like it's a pattern to avoid an async __init__ function. But as we know, there's a better pattern for this - using a create() function, like in runner_supervisor.
|
||||
async def initialize(self, max_retries: int = 3) -> None:
|
||||
"""Initialize both connectors with retry logic - call this during startup"""
|
||||
# Both master and worker need both connectors
|
||||
retry_count: int = 0
|
||||
last_error: Exception | None = None
|
||||
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
await self.get_connector()
|
||||
break
|
||||
except OperationalError as e:
|
||||
last_error = e
|
||||
if "database is locked" in str(e) and retry_count < max_retries - 1:
|
||||
retry_count += 1
|
||||
delay = cast(float, 0.5 * (2**retry_count))
|
||||
logger.warning(
|
||||
f"Database locked while initializing db, retry {retry_count}/{max_retries} after {delay}s"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
else:
|
||||
logger.opt(exception=e).error(
|
||||
f"Failed to initialize db after {retry_count + 1} attempts"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Could not initialize db after {retry_count + 1} attempts"
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.opt(exception=e).error("Unexpected error initializing db")
|
||||
raise
|
||||
|
||||
if retry_count >= max_retries and last_error:
|
||||
raise RuntimeError(
|
||||
f"Could not initialize db after {max_retries} attempts"
|
||||
) from last_error
|
||||
logger.bind(user_facing=True).info("Initialized all event log connectors")
|
||||
|
||||
async def get_connector(self) -> AsyncSQLiteEventStorage:
|
||||
"""Get or create a connector for the specified log type"""
|
||||
if not self._connector:
|
||||
db_path = self._config.get_db_path()
|
||||
|
||||
try:
|
||||
connector = AsyncSQLiteEventStorage(
|
||||
db_path=db_path,
|
||||
batch_size=self._config.batch_size,
|
||||
batch_timeout_ms=self._config.batch_timeout_ms,
|
||||
debounce_ms=self._config.debounce_ms,
|
||||
max_age_ms=self._config.max_age_ms,
|
||||
)
|
||||
|
||||
# Start the connector (creates tables if needed)
|
||||
await connector.start()
|
||||
|
||||
self._connector = connector
|
||||
logger.bind(user_facing=True).info(
|
||||
f"Initialized db connector at {db_path}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.bind(user_facing=True).opt(exception=e).error(
|
||||
"Failed to create db connector"
|
||||
)
|
||||
raise
|
||||
|
||||
return self._connector
|
||||
|
||||
@property
|
||||
def events(self) -> AsyncSQLiteEventStorage:
|
||||
"""Access event log (must call initialize() first)"""
|
||||
if not self._connector:
|
||||
raise RuntimeError(
|
||||
"Event log manager not initialized. Call initialize() first."
|
||||
)
|
||||
return self._connector
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close all open connectors"""
|
||||
assert self._connector is not None
|
||||
await self._connector.close()
|
||||
logger.bind(user_facing=True).info("Closed db connector")
|
||||
self._connector = None
|
||||
@@ -1,27 +0,0 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import DateTime, Index
|
||||
from sqlmodel import JSON, Column, Field, SQLModel
|
||||
|
||||
|
||||
class StoredEvent(SQLModel, table=True):
|
||||
"""SQLite representation of an event in the event log.
|
||||
|
||||
The rowid serves as the global sequence number (idx_in_log) for ordering.
|
||||
"""
|
||||
|
||||
__tablename__ = "events" # type: ignore[assignment]
|
||||
|
||||
# SQLite's rowid as primary key - we alias it but don't actually use it in queries
|
||||
rowid: int | None = Field(default=None, primary_key=True, alias="rowid")
|
||||
origin: str = Field(index=True)
|
||||
event_type: str = Field(index=True)
|
||||
event_id: str = Field(index=True)
|
||||
event_data: dict[str, Any] = Field(sa_column=Column(JSON))
|
||||
created_at: datetime = Field(
|
||||
default_factory=lambda: datetime.now(timezone.utc),
|
||||
sa_column=Column(DateTime, index=True),
|
||||
)
|
||||
|
||||
__table_args__ = (Index("idx_events_origin_created", "origin", "created_at"),)
|
||||
@@ -1,14 +0,0 @@
|
||||
"""
|
||||
A set of IPC primitives intended for cross-language use.
|
||||
Includes things like file-locks, named-pipe duplexes, and so on.
|
||||
|
||||
TODO: implement System V IPC primitives??
|
||||
1. semaphores w/ SEM_UNDO flag ???
|
||||
2. Message Queues => as a replacement for pipe duplexes???
|
||||
see: https://www.softprayog.in/programming/system-v-semaphores
|
||||
https://tldp.org/LDP/lpg/node21.html
|
||||
https://tldp.org/LDP/tlk/ipc/ipc.html
|
||||
https://docs.oracle.com/cd/E19683-01/816-5042/auto32/index.html
|
||||
https://www.softprayog.in/programming/posix-semaphores
|
||||
|
||||
"""
|
||||
@@ -1,4 +0,0 @@
|
||||
"""
|
||||
A file-lock based IPC mutex primitives.
|
||||
|
||||
"""
|
||||
@@ -1,147 +0,0 @@
|
||||
"""
|
||||
File-based mutex primitive implemented using UNIX-based `flock` syscall.
|
||||
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from exo.utils.fs import StrPath, ensure_parent_directory_exists
|
||||
|
||||
# open in read-write mode, creates file if it doesn't exist already,
|
||||
# closes this file descriptor in any children processes (prevents FD leaking),
|
||||
# truncates this file on opening (lock-files shouldn't hold content FOR NOW!!!)
|
||||
# SEE: https://man7.org/linux/man-pages/man2/openat.2.html
|
||||
OPEN_FLAGS = os.O_RDWR | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC
|
||||
|
||||
# 0x644 mode flags -> user has read-write permissions, others have read permission only
|
||||
# SEE: https://man7.org/linux/man-pages/man2/openat.2.html
|
||||
MODE_FLAGS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
|
||||
|
||||
# default poll-interval for spin-blocking lock
|
||||
POLL_INTERVAL = 0.05
|
||||
|
||||
|
||||
class LockType(Enum):
|
||||
READ = fcntl.LOCK_SH
|
||||
WRITE = fcntl.LOCK_EX
|
||||
|
||||
|
||||
class AcquireMode(Enum):
|
||||
OS_BLOCKING = 0
|
||||
SPIN_BLOCKING = 1
|
||||
NON_BLOCKING = 2
|
||||
|
||||
|
||||
class FlockMutex:
|
||||
def __init__(self, file_path: StrPath):
|
||||
self._file_path = file_path
|
||||
self._fd: Optional[int] = None
|
||||
self.lock_held: Optional[LockType] = None
|
||||
|
||||
def _open_fd(self):
|
||||
assert self._fd is None
|
||||
ensure_parent_directory_exists(self._file_path)
|
||||
|
||||
# open file & TRY to change permissions to `MODE_FLAGS` flags
|
||||
self._fd = os.open(self._file_path, OPEN_FLAGS, MODE_FLAGS)
|
||||
with contextlib.suppress(
|
||||
PermissionError
|
||||
): # This locked is not owned by this UID
|
||||
os.chmod(self._fd, MODE_FLAGS)
|
||||
|
||||
def _close_fd(self):
|
||||
assert self._fd is not None
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
|
||||
def _acquire(self, lock_type: LockType, blocking: bool) -> bool:
|
||||
assert (self._fd is not None) and (self.lock_held is None)
|
||||
|
||||
# create flags for acquiring lock
|
||||
flags = lock_type.value
|
||||
if not blocking:
|
||||
flags |= fcntl.LOCK_NB
|
||||
|
||||
# continually try to acquire lock (since it may fail due to interrupts)
|
||||
while True:
|
||||
try:
|
||||
fcntl.flock(self._fd, flags)
|
||||
break
|
||||
except OSError as e:
|
||||
if e.errno == errno.EINTR: # call interrupted by signal -> try again
|
||||
continue
|
||||
elif (
|
||||
e.errno == errno.EWOULDBLOCK
|
||||
): # file is locked & non-blocking is enabled -> return false to indicate
|
||||
return False
|
||||
|
||||
# unhandleable errors -> close FD & raise
|
||||
self._close_fd()
|
||||
if e.errno == errno.ENOSYS: # NotImplemented error
|
||||
raise NotImplementedError(
|
||||
"This system doesn't support flock"
|
||||
) from e
|
||||
else:
|
||||
raise
|
||||
|
||||
# set lock-type held
|
||||
self.lock_held = lock_type
|
||||
return True
|
||||
|
||||
def _release(self):
|
||||
assert (self._fd is not None) and (self.lock_held is not None)
|
||||
|
||||
# continually try to release lock (since it may fail due to interrupts)
|
||||
while True:
|
||||
try:
|
||||
fcntl.flock(self._fd, fcntl.LOCK_UN)
|
||||
break
|
||||
except OSError as e:
|
||||
if e.errno == errno.EINTR: # call interrupted by signal -> try again
|
||||
continue
|
||||
|
||||
# unhandleable errors -> close FD & raise
|
||||
self._close_fd()
|
||||
if e.errno == errno.ENOSYS: # NotImplemented error
|
||||
raise NotImplementedError(
|
||||
"This system doesn't support flock"
|
||||
) from e
|
||||
else:
|
||||
raise
|
||||
|
||||
self.lock_held = None
|
||||
|
||||
def acquire(
|
||||
self,
|
||||
lock_type: LockType = LockType.WRITE,
|
||||
acquire_mode: AcquireMode = AcquireMode.SPIN_BLOCKING,
|
||||
) -> bool:
|
||||
if self._fd is None:
|
||||
self._open_fd()
|
||||
|
||||
# OS-blocking & non-blocking is direct passthrough to private function
|
||||
match acquire_mode:
|
||||
case AcquireMode.OS_BLOCKING:
|
||||
return self._acquire(lock_type, blocking=True)
|
||||
case AcquireMode.NON_BLOCKING:
|
||||
return self._acquire(lock_type, blocking=False)
|
||||
case _:
|
||||
pass
|
||||
|
||||
# spin-blocking works by trying to acquire the lock in non-blocking mode, and retrying until success
|
||||
while True:
|
||||
locked = self._acquire(lock_type, blocking=False)
|
||||
if locked:
|
||||
return True
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
def release(self):
|
||||
self._release()
|
||||
self._close_fd()
|
||||
@@ -1,415 +0,0 @@
|
||||
"""
|
||||
SEE:
|
||||
- https://pubs.opengroup.org/onlinepubs/007904875/functions/open.html
|
||||
- https://man7.org/linux/man-pages/man2/openat.2.html
|
||||
- https://man7.org/linux/man-pages/man3/mkfifo.3.html
|
||||
- https://man7.org/linux/man-pages/man7/pipe.7.html
|
||||
|
||||
TODO: add locking on reader/writer ends to prevent multiwriters??
|
||||
TODO: use signal bytes to ensure proper packet consistency
|
||||
+stretch: implement packet IDs, retries, dual-stream confirmations, RPCs & so on
|
||||
|
||||
TODO: for more hardening -> check if any of the syscalls used return signal interrupt errors (like in the locking case)
|
||||
and interrupt on that happening -> this may not be an issue PER SE but might potentially create insanely bizzare bugs
|
||||
if it happens that this behavior DOES occasionally happen for no apparent reason
|
||||
|
||||
TODO: maybe consider padding all messages with 0s on both ends ?? so as to prevent ANY ambiguous boundaries ever!!
|
||||
"""
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import queue
|
||||
import stat
|
||||
import threading
|
||||
import time
|
||||
from enum import Enum
|
||||
from multiprocessing.queues import Queue as MQueueT
|
||||
from multiprocessing.synchronize import Event as MEventT
|
||||
from threading import Event as TEventT
|
||||
from typing import Callable
|
||||
|
||||
from cobs import cobs # pyright: ignore[reportMissingTypeStubs]
|
||||
from pytest import LogCaptureFixture
|
||||
|
||||
from exo.utils.fs import (
|
||||
StrPath,
|
||||
delete_if_exists,
|
||||
ensure_parent_directory_exists,
|
||||
)
|
||||
|
||||
OPEN_READER_FLAGS = os.O_RDONLY | os.O_NONBLOCK
|
||||
OPEN_WRITER_FLAGS = os.O_WRONLY | os.O_NONBLOCK
|
||||
|
||||
# 0x644 mode flags -> user has read-write permissions, others have read permission only
|
||||
MODE_FLAGS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
|
||||
|
||||
POLL_INTERVAL = 0.05 # TODO: maybe parametrize this in classes??
|
||||
PIPE_BUF = 4096 # size of atomic writes on (most) UNIX pipes
|
||||
|
||||
|
||||
class SignalMessage(Enum):
|
||||
"""
|
||||
Signal messages range from 1 to 255 & indicate control flow for the bytestream of the pipe.
|
||||
|
||||
"""
|
||||
|
||||
DISCARD_PREVIOUS = b"\x01"
|
||||
|
||||
|
||||
class PipeDuplex:
|
||||
"""
|
||||
Creates a named-pipe communication duplex. The reader end is responsible for creating the pipe.
|
||||
|
||||
The layers are:
|
||||
1. Raw binary data over pipes
|
||||
2. Variable-length binary packets with COBS
|
||||
3. JSON-like values with Message Pack
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_pipe: StrPath,
|
||||
out_pipe: StrPath,
|
||||
in_callback: Callable[[bytes], None],
|
||||
):
|
||||
assert in_pipe != out_pipe # they must be different files
|
||||
|
||||
# pipes should only ever be created, and only by the reader (one-way operations)
|
||||
_ensure_fifo_exists(in_pipe) # ensures reader pipe exists
|
||||
|
||||
# create readonly properties (useful for inspection)
|
||||
self._in_pipe = in_pipe
|
||||
self._out_pipe = out_pipe
|
||||
|
||||
# init synchronisation variables
|
||||
self._mkill = multiprocessing.Event()
|
||||
self._tkill = threading.Event()
|
||||
in_mq: MQueueT[bytes] = multiprocessing.Queue()
|
||||
self._out_mq: MQueueT[bytes] = multiprocessing.Queue()
|
||||
in_mstarted = multiprocessing.Event()
|
||||
|
||||
# process for reading in binary messages from pipe
|
||||
self._p_in = multiprocessing.Process(
|
||||
target=_pipe_buffer_reader,
|
||||
args=(in_pipe, in_mq, in_mstarted, self._mkill),
|
||||
daemon=True,
|
||||
)
|
||||
self._p_in.start()
|
||||
|
||||
# thread for pulling down binary messages from message queue & calling the callback
|
||||
self._t_in = threading.Thread(
|
||||
target=_binary_object_dispatcher,
|
||||
args=(in_mq, in_callback, self._tkill),
|
||||
daemon=True,
|
||||
)
|
||||
self._t_in.start()
|
||||
|
||||
# process to write binary messages to pipe
|
||||
out_mstarted = multiprocessing.Event()
|
||||
self._p_out = multiprocessing.Process(
|
||||
target=_pipe_buffer_writer,
|
||||
args=(out_pipe, self._out_mq, out_mstarted, self._mkill),
|
||||
daemon=True,
|
||||
)
|
||||
self._p_out.start()
|
||||
|
||||
# wait for processes to start properly
|
||||
in_mstarted.wait()
|
||||
out_mstarted.wait()
|
||||
|
||||
def __del__(self):
|
||||
# signal to these processes to die (if they haven't already)
|
||||
self._mkill.set()
|
||||
self._tkill.set()
|
||||
|
||||
def send_message(self, msg: bytes):
|
||||
self._out_mq.put_nowait(msg)
|
||||
|
||||
@property
|
||||
def in_pipe(self):
|
||||
return self._in_pipe
|
||||
|
||||
@property
|
||||
def out_pipe(self):
|
||||
return self._out_pipe
|
||||
|
||||
|
||||
def _ensure_fifo_exists(path: StrPath):
|
||||
# try to make a file if one doesn't exist already
|
||||
ensure_parent_directory_exists(path)
|
||||
try:
|
||||
os.mkfifo(path, mode=MODE_FLAGS)
|
||||
except OSError as e:
|
||||
# misc error, do not handle
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
# ensure the file exists is FIFO
|
||||
st = os.stat(path)
|
||||
if stat.S_ISFIFO(st.st_mode):
|
||||
return
|
||||
|
||||
# this file is not FIFO
|
||||
raise FileExistsError(f"The file '{path}' isn't a FIFO") from e
|
||||
|
||||
|
||||
def _pipe_buffer_reader(
|
||||
path: StrPath, mq: MQueueT[bytes], started: MEventT, kill: MEventT
|
||||
):
|
||||
# TODO: right now the `kill` control flow is somewhat haphazard -> ensure every loop-y or blocking part always
|
||||
# checks for kill.is_set() and returns/cleans up early if so
|
||||
|
||||
# open reader in nonblocking mode -> should not fail & immediately open;
|
||||
# this marks when the writer process has "started"
|
||||
fd = os.open(path, OPEN_READER_FLAGS)
|
||||
started.set()
|
||||
print("(reader):", "started")
|
||||
|
||||
# continually pull from the pipe and interpret messages as such:
|
||||
# - all messages are separated/framed by NULL bytes (zero)
|
||||
# - messages with >=2 bytes are COBS-encoded messages, because
|
||||
# the smallest COBS-encoded message is 2 bytes
|
||||
# - 1-byte messages are therefore to be treated as control signals
|
||||
#
|
||||
# TODO: right now i just need to get this to work, but the scheme is fundamentally
|
||||
# extensible for robustness, e.g. signal-bytes can be used to drive state-machines
|
||||
# for ensuring message atomicity/transmission
|
||||
# e.g. we can use single-bytes to discriminate COBS values to say "this is length of upcoming message"
|
||||
# vs. this is the actual content of the message, and so on
|
||||
# .
|
||||
# BUT for now we can just use signal (0xff 0x00) to mean "discard previous message" or similar...
|
||||
# .
|
||||
# BUT in THEORY we very well could have something like
|
||||
# (0x10 0x00)[header signal] + (...)[header data like length & so on]
|
||||
# + (0x20 0x00)[body signal] + (...)[body data]
|
||||
# + (0x30 0x00)[checksum signal] + (...)[checksum data]
|
||||
# And requests to re-send messages that were lost, and so on, like this is a fully 2-layer duplex
|
||||
# communication so we could turn this into a VERY powerful thing some time in the future, like
|
||||
# a whole-ass reimplementation of TCP/PIPES lmaooooo
|
||||
buffer = bytearray()
|
||||
while not kill.is_set():
|
||||
try:
|
||||
# read available data (and try again if nothing)
|
||||
try:
|
||||
data = os.read(fd, PIPE_BUF)
|
||||
if data == b"":
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
except OSError as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
# if there is a writer connected & the buffer is empty, this would block
|
||||
# so we must consume this error gracefully and try again
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
# extend buffer with new data
|
||||
buffer.extend(data)
|
||||
|
||||
# if there are no NULL bytes in the buffer, no new message has been formed
|
||||
chunks = buffer.split(sep=b"\x00")
|
||||
if len(chunks) == 1:
|
||||
continue
|
||||
|
||||
# last chunk is always an unfinished message, so that becomes our new buffer;
|
||||
# the rest should be decoded as either signals or COBS and put on queue
|
||||
buffer = chunks.pop()
|
||||
for chunk in chunks:
|
||||
chunk = bytes(chunk)
|
||||
|
||||
# ignore empty messages (they mean nothing)
|
||||
if chunk == b"":
|
||||
continue
|
||||
|
||||
# interpret 1-byte messages as signals (they indicate control-flow on messages)
|
||||
if len(chunk) == 1:
|
||||
print("(reader):", f"gotten control signal: {chunk[0]}")
|
||||
continue # TODO: right now they should be ignored, since I'm not sure what I want them to do
|
||||
|
||||
# interpret >=2 byte messages as COBS-encoded data (decode them)
|
||||
decoded = cobs.decode(chunk) # pyright: ignore[reportUnknownMemberType]
|
||||
mq.put(decoded)
|
||||
except BaseException as e:
|
||||
# perform cleanup & log before re-raising
|
||||
os.close(fd)
|
||||
logging.error(msg=f"Error when reading from named pipe at '{path}': {e}")
|
||||
raise
|
||||
os.close(fd)
|
||||
|
||||
|
||||
def _binary_object_dispatcher(
|
||||
mq: MQueueT[bytes], callback: Callable[[bytes], None], kill: TEventT
|
||||
):
|
||||
while not kill.is_set():
|
||||
# try to get with timeout (to allow to read the kill-flag)
|
||||
try:
|
||||
message = mq.get(block=True, timeout=POLL_INTERVAL)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
# dispatch binary object with callback
|
||||
callback(message)
|
||||
|
||||
|
||||
def _pipe_buffer_writer(
|
||||
path: StrPath, mq: MQueueT[bytes], started: MEventT, kill: MEventT
|
||||
):
|
||||
# TODO: right now the `kill` control flow is somewhat haphazard -> ensure every loop-y or blocking part always
|
||||
# checks for kill.is_set() and returns/cleans up early if so
|
||||
|
||||
# for now, started events for writer are rather vacuous: TODO: remove or make more usefull??
|
||||
started.set()
|
||||
print("(writer):", "started")
|
||||
|
||||
# continually attempt to open FIFO for reading in nonblocking mode -> will error that:
|
||||
# - ENOENT[2] No such file or directory: until a reader creates FIFO
|
||||
# - ENXIO[6] No such device or address: until a reader opens FIFO
|
||||
fd = None
|
||||
while not kill.is_set():
|
||||
try:
|
||||
fd = os.open(path, os.O_WRONLY | os.O_NONBLOCK)
|
||||
|
||||
# ensure the file exists is FIFO
|
||||
st = os.fstat(fd)
|
||||
print("mode:", st.st_mode & 0o170000)
|
||||
if stat.S_ISFIFO(st.st_mode):
|
||||
break
|
||||
|
||||
# cleanup on error
|
||||
os.close(fd)
|
||||
raise FileExistsError(f"The file '{path}' isn't a FIFO")
|
||||
except FileExistsError:
|
||||
raise # propagate error
|
||||
except OSError as e:
|
||||
# misc error, do not handle
|
||||
if not (e.errno == errno.ENOENT or e.errno == errno.ENXIO):
|
||||
raise
|
||||
|
||||
# try again if waiting for FIFO creation or reader-end opening
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
assert fd is not None
|
||||
|
||||
while not kill.is_set():
|
||||
try:
|
||||
# try to get with timeout (to allow to read the kill-flag)
|
||||
try:
|
||||
data = mq.get(block=True, timeout=POLL_INTERVAL)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
# write all data (by continually re-trying until it is done)
|
||||
_write_data(fd, data)
|
||||
except BaseException as e:
|
||||
# perform cleanup & log before re-raising
|
||||
os.close(fd)
|
||||
logging.error(msg=f"Error when writing to named pipe at '{path}': {e}")
|
||||
raise
|
||||
|
||||
os.close(fd)
|
||||
|
||||
|
||||
def _write_data(fd: int, buf: bytes):
|
||||
# COBS-encode the data & append NULL-byte to signify end-of-frame
|
||||
buf = cobs.encode(buf) + b"\x00" # pyright: ignore[reportUnknownMemberType]
|
||||
total = len(buf)
|
||||
sent = 0
|
||||
|
||||
# begin transmission progress
|
||||
while sent < total:
|
||||
try:
|
||||
# Write remaining bytes to the pipe
|
||||
written = os.write(fd, buf[sent:])
|
||||
sent += written
|
||||
except OSError as e:
|
||||
# non-blocking pipe is full, wait a bit and retry
|
||||
if e.errno == errno.EAGAIN:
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
# reader disconnected -> handle failure-recovery by doing:
|
||||
# 1. signal DISCARD_PREVIOUS to any reader
|
||||
# 2. re-setting the progress & trying again
|
||||
if e.errno == errno.EPIPE:
|
||||
_write_signal(fd, SignalMessage.DISCARD_PREVIOUS)
|
||||
sent = 0
|
||||
continue
|
||||
|
||||
raise # misc error, do not handle
|
||||
|
||||
|
||||
def _write_signal(fd: int, signal: SignalMessage):
|
||||
signal_message_length = 2
|
||||
|
||||
# Turn signal-byte into message by terminating with NULL-byte
|
||||
buf = signal.value + b"\x00"
|
||||
assert len(buf) == signal_message_length
|
||||
|
||||
# attempt to write until successful
|
||||
while True:
|
||||
try:
|
||||
# small writes (e.g. 2 bytes) should be atomic as per Pipe semantics,
|
||||
# meaning IF SUCCESSFUL: the number of bytes written MUST be exactly 2
|
||||
written = os.write(fd, buf)
|
||||
assert written == signal_message_length
|
||||
break
|
||||
except OSError as e:
|
||||
# wait a bit and retry if:
|
||||
# - non-blocking pipe is full
|
||||
# - the pipe is broken because of reader disconnection
|
||||
if e.errno == errno.EAGAIN or e.errno == errno.EPIPE:
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
raise # misc error, do not handle
|
||||
|
||||
|
||||
def _test_one_two_three():
|
||||
one_path = "/tmp/one.pipe"
|
||||
two_path = "/tmp/two.pipe"
|
||||
delete_if_exists(one_path)
|
||||
delete_if_exists(two_path)
|
||||
|
||||
owner = PipeDuplex(
|
||||
in_pipe=one_path,
|
||||
out_pipe=two_path,
|
||||
in_callback=lambda x: print(f"wow, owner got: [{len(x)}]{x}"),
|
||||
)
|
||||
|
||||
guest = PipeDuplex(
|
||||
in_pipe=two_path,
|
||||
out_pipe=one_path,
|
||||
in_callback=lambda x: print(f"wow, guest1 got: [{len(x)}]{x}"),
|
||||
)
|
||||
|
||||
owner.send_message(bytes(0 for _ in range(10)))
|
||||
|
||||
guest.send_message(bytes(0 for _ in range(200)))
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
del guest
|
||||
guest = PipeDuplex(
|
||||
in_pipe=two_path,
|
||||
out_pipe=one_path,
|
||||
in_callback=lambda x: print(f"wow, guest2 got: [{len(x)}]{x}"),
|
||||
)
|
||||
|
||||
guest.send_message(bytes(0 for _ in range(21)))
|
||||
|
||||
owner.send_message(bytes(0 for _ in range(12)))
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
delete_if_exists(one_path)
|
||||
delete_if_exists(two_path)
|
||||
|
||||
|
||||
def test_running_pipe_duplex(caplog: LogCaptureFixture):
|
||||
caplog.set_level(logging.INFO)
|
||||
|
||||
_test_one_two_three()
|
||||
time.sleep(1)
|
||||
@@ -1,48 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from exo.shared.ipc.file_mutex.flock_mutex import FlockMutex, LockType
|
||||
from exo.utils.fs import delete_if_exists, make_temp_path
|
||||
|
||||
|
||||
def test_lock_held():
|
||||
path = make_temp_path("testing_flock.lock")
|
||||
lock = FlockMutex(path)
|
||||
|
||||
assert lock.lock_held is None
|
||||
|
||||
assert lock.acquire(lock_type=LockType.WRITE)
|
||||
assert lock.lock_held == LockType.WRITE
|
||||
lock.release()
|
||||
|
||||
assert lock.lock_held is None
|
||||
|
||||
assert lock.acquire(lock_type=LockType.READ)
|
||||
assert lock.lock_held == LockType.READ
|
||||
lock.release()
|
||||
|
||||
assert lock.lock_held is None
|
||||
|
||||
delete_if_exists(path)
|
||||
|
||||
|
||||
def test_no_reentrant_lock():
|
||||
path = make_temp_path("testing_flock.lock")
|
||||
lock = FlockMutex(path)
|
||||
|
||||
# no write-lock reentrancy
|
||||
lock.acquire(lock_type=LockType.WRITE)
|
||||
with pytest.raises(AssertionError):
|
||||
lock.acquire(lock_type=LockType.WRITE)
|
||||
with pytest.raises(AssertionError):
|
||||
lock.acquire(lock_type=LockType.READ)
|
||||
lock.release()
|
||||
|
||||
# no read-lock reentrancy
|
||||
lock.acquire(lock_type=LockType.READ)
|
||||
with pytest.raises(AssertionError):
|
||||
lock.acquire(lock_type=LockType.WRITE)
|
||||
with pytest.raises(AssertionError):
|
||||
lock.acquire(lock_type=LockType.READ)
|
||||
lock.release()
|
||||
|
||||
delete_if_exists(path)
|
||||
Reference in New Issue
Block a user