From 16f50940d06615cc530cc5e7ab3e3f56f952026a Mon Sep 17 00:00:00 2001 From: Alexander Renz Date: Fri, 13 Jun 2025 04:24:11 +0200 Subject: [PATCH] release: hmac-file-server 3.2 --- .gitignore | 4 - CHANGELOG.MD | 154 + INSTALL.MD | 302 ++ MONITOR_OPTIMIZATION_SUMMARY.md | 0 PROTOCOL_SPECIFICATIONS.MD | 295 ++ README.MD | 592 ++- WIKI.MD | 1208 ++++++ build-multi-arch-fixed.sh | 0 build-multi-arch.sh | 196 + builddebian.sh | 407 ++ builddocker.sh | 15 + buildgo.sh | 80 + cmd/monitor/monitor.go | 1050 +++++ cmd/server/config.toml | 67 - cmd/server/config_test_scenarios.go | 294 ++ cmd/server/config_validator.go | 1131 ++++++ cmd/server/helpers.go | 713 ++++ cmd/server/main.go | 3373 +++++++++-------- config-example-xmpp.toml | 0 dashboard/dashboard.json | 723 ++-- dashboard/hmac_icon.png | Bin 0 -> 61498 bytes dockerenv/config/config.toml | 83 + .../duplicates/all-deduplications-here.txt | 0 dockerenv/data/logs/all-logs-here.txt | 0 dockerenv/data/uploads/all-uploads-here.txt | 0 dockerenv/docker-compose.yml | 17 + dockerenv/dockerbuild/Dockerfile | 27 + go.mod | 35 +- go.sum | 126 +- installer.sh | 1273 +++++++ test/hmac_test.go | 2 +- test/server_flags_test.go | 39 + test/test_installer_config.sh | 173 + verify_installation.sh | 230 ++ 34 files changed, 10354 insertions(+), 2255 deletions(-) delete mode 100644 .gitignore create mode 100644 CHANGELOG.MD create mode 100644 INSTALL.MD create mode 100644 MONITOR_OPTIMIZATION_SUMMARY.md create mode 100644 PROTOCOL_SPECIFICATIONS.MD create mode 100644 WIKI.MD create mode 100644 build-multi-arch-fixed.sh create mode 100755 build-multi-arch.sh create mode 100755 builddebian.sh create mode 100755 builddocker.sh create mode 100755 buildgo.sh create mode 100644 cmd/monitor/monitor.go delete mode 100644 cmd/server/config.toml create mode 100644 cmd/server/config_test_scenarios.go create mode 100644 cmd/server/config_validator.go create mode 100644 cmd/server/helpers.go create mode 100644 config-example-xmpp.toml create mode 100644 dashboard/hmac_icon.png create mode 100644 dockerenv/config/config.toml create mode 100644 dockerenv/data/duplicates/all-deduplications-here.txt create mode 100644 dockerenv/data/logs/all-logs-here.txt create mode 100644 dockerenv/data/uploads/all-uploads-here.txt create mode 100644 dockerenv/docker-compose.yml create mode 100644 dockerenv/dockerbuild/Dockerfile create mode 100755 installer.sh create mode 100644 test/server_flags_test.go create mode 100755 test/test_installer_config.sh create mode 100755 verify_installation.sh diff --git a/.gitignore b/.gitignore deleted file mode 100644 index d996008..0000000 --- a/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json diff --git a/CHANGELOG.MD b/CHANGELOG.MD new file mode 100644 index 0000000..4ec4cbb --- /dev/null +++ b/CHANGELOG.MD @@ -0,0 +1,154 @@ +# Changelog + +> **Note:** This file is a technical changelog for developers and maintainers. For user-focused highlights, migration notes, and upgrade instructions, see [README.MD](./README.MD). + +All notable changes to this project will be documented in this file. + +## [3.2] - Stable Release + +### Added (3.2) +- **Enhanced Documentation**: Comprehensive update of all documentation files to match current codebase +- **Protocol Specification Updates**: Detailed protocol documentation with implementation examples +- **Configuration Validation**: Improved configuration structure validation and error reporting +- **Developer Resources**: Updated build instructions and development setup guides + +### Changed (3.2) +- **Documentation Structure**: Reorganized documentation for better clarity and maintenance +- **Configuration Examples**: Updated all configuration examples to reflect current options +- **API Documentation**: Enhanced API endpoint documentation with comprehensive examples + +### Completed (3.2) +- **Feature Development**: Active development of new features and improvements +- **Testing Enhancements**: Expanded test coverage and validation +- **Performance Optimizations**: Ongoing performance improvements and monitoring + +--- + +## [3.1-Stable] - 2025-06-08 + +### Added (3.1) +- **v3 (mod_http_upload_external) Support**: Implemented secure file uploads using HMAC-SHA256 validation and expiration checks, specifically designed for Prosody's mod_http_upload_external compatibility. +- **JWT Authentication**: Complete JWT token authentication system with configurable algorithms and expiration times. +- **Multiple Authentication Protocols**: Support for legacy v1, enhanced v2, token-based, and v3 HMAC protocols alongside JWT authentication. +- **File Naming Strategy**: Configurable file naming options including HMAC-based, original filename preservation, or no specific naming convention. +- **Advanced Configuration Structure**: Comprehensive configuration sections including server, security, uploads, downloads, logging, deduplication, ISO, timeouts, versioning, ClamAV, Redis, and workers. + +### Changed (3.1) +- **Enhanced HMAC Validation**: Improved validation logic to support multiple protocol versions (v1, v2, token, v3) with proper fallback mechanisms. +- **Authentication Priority**: Implemented authentication priority system with JWT taking precedence when enabled, falling back to HMAC protocols. +- **Network Protocol Support**: Enhanced IPv4/IPv6 dual-stack support with protocol forcing options (ipv4, ipv6, auto). +- **Configuration Hot-Reloading**: Added support for reloading logging configuration via SIGHUP signal without full server restart. + +### Fixed (3.1) +- **Protocol Compatibility**: Addressed compatibility issues with different HMAC protocol versions and mod_http_upload_external clients. +- **Error Handling**: Improved error handling for invalid or expired signatures during file uploads. +- **Configuration Validation**: Enhanced configuration validation to prevent common misconfigurations. + +--- + +## [3.0-Stable] - 2025-06-07 + +### Added (3.0) +- Official Docker Compose support and example (`dockerenv/docker-compose.yml`). +- Multi-stage Dockerfile for minimal images (`dockerenv/dockerbuild/Dockerfile`). +- Extended documentation for Docker, Compose, and deployment paths. +- Quickstart and configuration examples for containerized environments. +- Monitoring and Prometheus metrics documentation improvements. +- **Seamless IPv4 and IPv6 support:** The server now automatically supports both IPv4 and IPv6 connections out of the box, with improved dual-stack handling and configuration via `forceprotocol`. + +### Changed (3.0) +- Minimum Go version is now **1.24** (was 1.20). +- Updated all documentation and config examples to reflect new version and Docker usage. +- Improved configuration normalization and environment variable overrides for containers. +- Enhanced worker pool and resource auto-scaling logic. + +### Fixed (3.0) +- Minor bugfixes for config parsing and Docker path handling. +- Improved error messages for missing or invalid configuration in container environments. + +--- + +## [2.8-Stable] - 2026-05-01 + +### Added (2.8) +- Version check history for improved tracking. +- Enhanced ClamAV scanning with concurrent workers. + +### Changed (2.8) +- Improved ISO-based storage for specialized use cases. +- Auto-scaling workers for optimized performance. + +### Fixed (2.8) +- Minor issues in worker thread adjustments under high load. + +--- + +## [2.7] - 2026-02-10 + +### Added (2.7) +- Concurrency improvements and auto-scaling worker enhancements +- Cleanup and removal of unused parameters in sorting functions + +### Changed (2.7) +- Additional logging for file scanning operations + +### Fixed (2.7) +- Minor stability issues related to ISO container mounting +- Fixed dual stack for upload (IPv4/IPv6) + +--- + +## [2.6-Stable] - 2025-12-01 + +### Added (2.6) +- Deduplication support (removes duplicate files). +- ISO Container management. +- Dynamic worker scaling based on CPU & memory. +- PreCaching feature for faster file access. + +### Changed (2.6) +- Worker pool scaling strategies for better performance. +- Enhanced logging with rotating logs using lumberjack. + +### Fixed (2.6) +- Temporary file handling issues causing "Unsupported file type" warnings. +- MIME type checks for file extension mismatches. + +--- + +## [2.5] - 2025-09-15 + +### Added (2.5) +- Redis caching integration for file metadata. +- ClamAV scanning for virus detection before finalizing uploads. + +### Changed (2.5) +- Extended the default chunk size for chunked uploads. +- Updated official documentation links. + +### Fixed (2.5) +- Edge case with versioning causing file rename conflicts. + +--- + +## [2.0] - 2025-06-01 + +### Added (2.0) +- Chunked file uploads and downloads. +- Resumable upload support with partial file retention. + +### Changed (2.0) +- Moved configuration management to Viper. +- Default Prometheus metrics for tracking memory & CPU usage. + +### Fixed (2.0) +- Race conditions in file locking under heavy concurrency. + +--- + +## [1.0] - 2025-01-01 + +### Added (1.0) +- Initial release with HMAC-based authentication. +- Basic file upload/download endpoints. +- Logging and fundamental configuration using .toml files. diff --git a/INSTALL.MD b/INSTALL.MD new file mode 100644 index 0000000..a5f9040 --- /dev/null +++ b/INSTALL.MD @@ -0,0 +1,302 @@ +# HMAC File Server 3.2 Installation Guide + +## Quick Installation for XMPP Operators + +The HMAC File Server includes an automated installer script designed specifically for XMPP operators who want to quickly deploy a file sharing service for their chat servers. + +### Prerequisites + +- Linux system with systemd (Ubuntu 18.04+, CentOS 7+, Debian 9+, etc.) +- Root or sudo access +- At least 1GB free disk space +- Internet connection for downloading dependencies + +### Installation + +1. **Download or clone the repository:** + ```bash + git clone https://github.com/PlusOne/hmac-file-server.git + cd hmac-file-server + ``` + +2. **Run the installer:** + ```bash + sudo ./installer.sh + ``` + + **Alternative: Pre-set secrets via environment variables:** + ```bash + # For automation or if interactive input doesn't work + HMAC_SECRET='your-super-secret-hmac-key-here-minimum-32-characters' sudo -E ./installer.sh + + # With both HMAC and JWT secrets + HMAC_SECRET='your-hmac-secret-32-chars-minimum' \ + JWT_SECRET='your-jwt-secret-also-32-chars-minimum' \ + sudo -E ./installer.sh + ``` + +3. **Follow the interactive prompts:** + - System user (default: `hmac-server`) + - Installation directories + - Server ports + - **HMAC secret**: Choose automatic generation (recommended) or enter manually + - **Optional features** (JWT, Redis, ClamAV, SSL/TLS) + - **JWT secret**: Also supports automatic generation if enabled + +### Configuration Options + +#### Core Settings +- **Server Port**: Default 8080 (HTTP file server) +- **Metrics Port**: Default 9090 (Prometheus metrics) +- **HMAC Secret**: Strong secret for authentication + - **Automatic generation** (recommended): Creates 48-character secure random key + - **Manual entry**: Minimum 32 characters required + - **Environment variable**: `HMAC_SECRET='your-secret'` + +#### Optional Features +- **JWT Authentication**: Token-based auth for enhanced security + - **Automatic generation** available for JWT secrets + - Configurable expiration and algorithms +- **Redis Integration**: For session management and caching +- **ClamAV Scanning**: Real-time virus scanning of uploaded files +- **SSL/TLS**: Direct HTTPS support (or use reverse proxy) + +### XMPP Server Integration + +#### Prosody Configuration +Add to your Prosody configuration: +```lua +Component "upload.yourdomain.com" "http_file_share" + http_file_share_url = "http://localhost:8080" +``` + +#### Ejabberd Configuration +Add to your Ejabberd configuration: +```yaml +mod_http_file_share: + external_secret: "your-hmac-secret" + service_url: "http://localhost:8080" +``` + +### Post-Installation + +1. **Start the service:** + ```bash + sudo systemctl start hmac-file-server + ``` + +2. **Check status:** + ```bash + sudo systemctl status hmac-file-server + ``` + +3. **View logs:** + ```bash + sudo journalctl -u hmac-file-server -f + ``` + +4. **Configure firewall (required):** + ```bash + # Example for ufw (Ubuntu/Debian) + sudo ufw allow 8080/tcp comment "HMAC File Server" + sudo ufw allow 9090/tcp comment "HMAC File Server Metrics" + + # Example for firewalld (CentOS/RHEL/Fedora) + sudo firewall-cmd --permanent --add-port=8080/tcp + sudo firewall-cmd --permanent --add-port=9090/tcp + sudo firewall-cmd --reload + + # Example for iptables (manual) + sudo iptables -A INPUT -p tcp --dport 8080 -j ACCEPT + sudo iptables -A INPUT -p tcp --dport 9090 -j ACCEPT + ``` + +5. **Configure reverse proxy (recommended):** + ```nginx + server { + listen 443 ssl http2; + server_name upload.yourdomain.com; + + ssl_certificate /path/to/cert.pem; + ssl_certificate_key /path/to/key.pem; + + location / { + proxy_pass http://localhost:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # File upload settings + client_max_body_size 100M; + proxy_request_buffering off; + } + } + ``` + +### File Locations + +After installation: +- **Binary**: `/opt/hmac-file-server/hmac-file-server` +- **Configuration**: `/etc/hmac-file-server/config.toml` +- **Uploads**: `/var/lib/hmac-file-server/uploads/` +- **Logs**: `/var/log/hmac-file-server/hmac-file-server.log` + +### Management Commands + +```bash +# Service management +sudo systemctl start hmac-file-server +sudo systemctl stop hmac-file-server +sudo systemctl restart hmac-file-server +sudo systemctl reload hmac-file-server + +# View logs +sudo journalctl -u hmac-file-server -f +sudo tail -f /var/log/hmac-file-server/hmac-file-server.log + +# Edit configuration +sudo nano /etc/hmac-file-server/config.toml +sudo systemctl reload hmac-file-server # Apply changes +``` + +### Uninstallation + +The HMAC File Server installer includes a comprehensive uninstallation system with advanced data preservation options: + +```bash +sudo ./installer.sh --uninstall +``` + +#### Safe Uninstallation Features + +๐Ÿ”’ **Interactive Confirmation System** +- Multiple confirmation steps prevent accidental data loss +- Automatic detection of data directories from configuration +- Smart backup system with timestamped backups in `/var/backups/hmac-file-server-*` +- Detailed reporting showing file counts and directory sizes + +#### Five Data Handling Options + +**1. ๐Ÿ—‘๏ธ Complete Removal** +- Deletes all data including uploads, deduplication files, and logs +- Requires typing "DELETE" for final confirmation +- Provides comprehensive warning about permanent data loss + +**2. ๐Ÿ’พ Preserve Uploads and Deduplication** +- Preserves critical user files and deduplication data +- Removes logs (typically not needed for data recovery) +- Ideal for system migration or reinstallation + +**3. ๐Ÿ“‹ Preserve All Data** +- Keeps uploads, deduplication data, and logs +- Comprehensive data preservation option +- Best for troubleshooting or temporary removal + +**4. ๐ŸŽฏ Custom Selection** +- Interactive selection of which directories to preserve +- Shows detailed information for each directory before decision +- Allows granular control over data preservation + +**5. โŒ Cancel Operation** +- Safely exits without making any changes +- No system modifications performed + +#### What Gets Removed (Service Components) +- โœ“ Systemd service (stopped and disabled) +- โœ“ Installation directory (`/opt/hmac-file-server/`) +- โœ“ Configuration files (`/etc/hmac-file-server/`) +- โœ“ System user (`hmac-server`) +- โœ“ Any remaining binaries + +#### Data Backup Location +When data preservation is selected, files are moved to: +- `/var/backups/hmac-file-server-TIMESTAMP/` +- Timestamped directories for multiple backup versions +- Preserves original directory structure + +**โš ๏ธ Important**: The uninstaller provides multiple safety checks and data preservation options. Choose wisely based on your needs! + +### Security Considerations + +1. **Configure firewall properly** - Only allow necessary ports (8080, 9090) to authorized networks +2. **Use strong HMAC secrets** (minimum 32 characters, use random generators) +3. **Enable JWT authentication** for enhanced security +4. **Set up SSL/TLS** either directly or via reverse proxy +5. **Enable ClamAV** for virus scanning if handling untrusted files +6. **Regular backups** of configuration and uploaded files +7. **Monitor logs** for suspicious activity +8. **Restrict network access** - Consider limiting access to internal networks only + +### Monitoring + +The server provides Prometheus metrics at `/metrics` endpoint: +```bash +curl http://localhost:9090/metrics +``` + +Key metrics to monitor: +- `hmac_requests_total` - Total requests +- `hmac_upload_size_bytes` - Upload sizes +- `hmac_errors_total` - Error counts +- `hmac_active_connections` - Active connections + +### Troubleshooting + +#### Service won't start +1. Check logs: `sudo journalctl -u hmac-file-server -f` +2. Verify configuration: `sudo nano /etc/hmac-file-server/config.toml` +3. Check permissions on data directories +4. Ensure ports are not in use: `sudo netstat -tlnp | grep :8080` + +#### High memory usage +1. Adjust worker settings in configuration +2. Enable Redis for session management +3. Check for large file uploads in progress + +#### Files not uploading +1. Verify HMAC secret matches between XMPP server and file server +2. Check file size limits in configuration +3. Ensure sufficient disk space +4. Review ClamAV logs if virus scanning enabled + +### Support + +- **Documentation**: See `README.MD` and `WIKI.MD` +- **Protocol Details**: See `PROTOCOL_SPECIFICATIONS.MD` +- **Issues**: GitHub issue tracker +- **Configuration**: All options documented in `WIKI.MD` + +### Example Production Setup + +For a production XMPP server with 1000+ users: + +```toml +[server] +listenport = "8080" +metricsenabled = true +deduplicationenabled = true + +[security] +enablejwt = true +# Strong secrets here + +[uploads] +maxfilesize = "50MB" +ttlenabled = true +ttl = "720h" # 30 days + +[workers] +max = 200 +autoscaling = true + +[redis] +enabled = true +host = "localhost" +port = 6379 + +[clamav] +enabled = true +``` + +This setup provides robust file sharing with deduplication, automatic cleanup, virus scanning, and scalable worker management. diff --git a/MONITOR_OPTIMIZATION_SUMMARY.md b/MONITOR_OPTIMIZATION_SUMMARY.md new file mode 100644 index 0000000..e69de29 diff --git a/PROTOCOL_SPECIFICATIONS.MD b/PROTOCOL_SPECIFICATIONS.MD new file mode 100644 index 0000000..7bb9ddf --- /dev/null +++ b/PROTOCOL_SPECIFICATIONS.MD @@ -0,0 +1,295 @@ +# HMAC File Server Authentication Protocol Specifications + +This document outlines the different authentication protocols supported by the HMAC File Server for secure file uploads and downloads. The server supports multiple authentication methods to ensure backward compatibility while providing enhanced security features. + +## Overview + +The HMAC File Server supports two primary authentication mechanisms: +1. **HMAC-based Authentication** (Multiple versions: v1, v2, token, v3) +2. **JWT Authentication** (Bearer tokens) + +All protocols use SHA256 hashing and require a shared secret key configured on the server. + +--- + +## HMAC Authentication Protocols + +### Common Elements +- **Algorithm**: HMAC-SHA256 +- **Secret**: Shared secret key configured in `[security]` section +- **Transport**: URL query parameters for HMAC, headers for signatures +- **Encoding**: Hexadecimal encoding for HMAC values + +--- + +### Legacy v1 Protocol (`v` parameter) + +**Overview**: The original HMAC authentication protocol. + +**URL Format**: +``` +PUT /filename.ext?v=HMAC_SIGNATURE +``` + +**Message Construction**: +``` +fileStorePath + "\x20" + contentLength +``` + +**Example**: +```bash +# For file "test.txt" with 1024 bytes +# Message: "test.txt\x201024" +curl -X PUT "http://localhost:8080/test.txt?v=a1b2c3d4..." --data-binary @test.txt +``` + +**Implementation Notes**: +- Uses space character (`\x20`) as separator +- Content-Length header must be accurate +- Simplest protocol, minimal metadata validation + +--- + +### Enhanced v2 Protocol (`v2` parameter) + +**Overview**: Enhanced version including content type validation. + +**URL Format**: +``` +PUT /filename.ext?v2=HMAC_SIGNATURE +``` + +**Message Construction**: +``` +fileStorePath + "\x00" + contentLength + "\x00" + contentType +``` + +**Example**: +```bash +# For file "document.pdf" with 2048 bytes +# Message: "document.pdf\x002048\x00application/pdf" +curl -X PUT "http://localhost:8080/document.pdf?v2=e5f6g7h8..." --data-binary @document.pdf +``` + +**Implementation Notes**: +- Uses null characters (`\x00`) as separators +- Content-Type automatically detected from file extension +- Fallback to "application/octet-stream" for unknown extensions + +--- + +### Token Protocol (`token` parameter) + +**Overview**: Alternative parameter name for v2-style authentication. + +**URL Format**: +``` +PUT /filename.ext?token=HMAC_SIGNATURE +``` + +**Message Construction**: Same as v2 protocol +``` +fileStorePath + "\x00" + contentLength + "\x00" + contentType +``` + +**Example**: +```bash +curl -X PUT "http://localhost:8080/image.jpg?token=i9j0k1l2..." --data-binary @image.jpg +``` + +**Implementation Notes**: +- Identical to v2 protocol but uses `token` parameter +- Useful for clients that prefer different parameter naming + +--- + +### v3 Protocol - mod_http_upload_external Compatible (`v3` parameter) + +**Overview**: Specifically designed for Prosody's `mod_http_upload_external` compatibility with expiration support. + +**URL Format**: +``` +PUT /path/to/file.ext?v3=HMAC_SIGNATURE&expires=UNIX_TIMESTAMP +``` + +**Message Construction**: +``` +METHOD + "\n" + expires + "\n" + requestPath +``` + +**Example**: +```bash +# Current timestamp: 1717804800 +# Message: "PUT\n1717804800\n/upload/myfile.txt" +curl -X PUT "http://localhost:8080/upload/myfile.txt?v3=m3n4o5p6...&expires=1717804800" --data-binary @myfile.txt +``` + +**Verification Process**: +1. Extract `v3` signature and `expires` timestamp +2. Validate `expires` is in the future +3. Construct message: `"{METHOD}\n{expires}\n{path}"` +4. Calculate HMAC-SHA256 of message +5. Compare with provided signature + +**Implementation Notes**: +- Includes expiration timestamp validation +- Prevents replay attacks through time-based validation +- Path-only signing (no query parameters in signed message) +- HTTP method is part of the signed message + +--- + +## JWT Authentication + +**Overview**: Token-based authentication using JSON Web Tokens. + +**Configuration**: +```toml +[security] +enablejwt = true +jwtsecret = "your-256-bit-secret" +jwtalgorithm = "HS256" +jwtexpiration = "24h" +``` + +**Header Format**: +``` +Authorization: Bearer JWT_TOKEN +``` + +**Fallback Query Parameter**: +``` +GET /file.txt?token=JWT_TOKEN +``` + +**Example Usage**: +```bash +# Header-based JWT +curl -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIs..." http://localhost:8080/file.txt + +# Query parameter fallback +curl "http://localhost:8080/file.txt?token=eyJhbGciOiJIUzI1NiIs..." +``` + +**JWT Claims**: Standard JWT claims (exp, iat, iss, etc.) as configured. + +--- + +## POST Upload Authentication + +### X-Signature Header Method + +**Overview**: For multipart form uploads via POST requests. + +**Header Format**: +``` +X-Signature: HMAC_OF_REQUEST_PATH +``` + +**Message Construction**: +``` +requestPath (e.g., "/upload") +``` + +**Example**: +```bash +# HMAC of "/upload" +curl -X POST \ + -H "X-Signature: CALCULATED_HMAC" \ + -F 'file=@myfile.txt' \ + http://localhost:8080/upload +``` + +--- + +## Authentication Priority and Fallbacks + +The server checks authentication in the following order: + +1. **JWT Authentication** (if `enablejwt = true`) + - Authorization header (Bearer token) + - Query parameter `token` + +2. **HMAC Authentication** (if JWT disabled or not found) + - X-Signature header (for POST uploads) + - v3 protocol (with expires validation) + - v2 protocol + - token protocol + - v1 protocol (legacy) + +--- + +## Security Considerations + +### HMAC Protocols +- **Secret Management**: Use strong, randomly generated secrets +- **Time Validation**: v3 protocol includes expiration to prevent replay attacks +- **Content Validation**: v2/token protocols include content-type validation +- **Path Sanitization**: All protocols validate and sanitize file paths + +### JWT Authentication +- **Token Expiration**: Configure appropriate expiration times +- **Secret Rotation**: Regularly rotate JWT signing keys +- **Algorithm Security**: Default HS256 is secure for most use cases +- **Transport Security**: Always use HTTPS in production + +### General Security +- **HTTPS Only**: Use TLS encryption for all production deployments +- **Rate Limiting**: Implement reverse proxy rate limiting +- **File Validation**: Configure allowed file extensions +- **Virus Scanning**: Enable ClamAV integration for malware detection +- **Access Logs**: Monitor authentication failures and suspicious activity + +--- + +## Migration Guide + +### From v1 to v2 +- Update HMAC calculation to include content type +- Change separator from `\x20` to `\x00` +- No breaking changes in URL structure + +### From HMAC to JWT +- Set `enablejwt = true` in configuration +- Generate JWT tokens server-side or use external auth provider +- HMAC authentication remains available as fallback + +### Adding v3 Support +- Implement expiration timestamp generation +- Update HMAC calculation to include HTTP method and expiration +- Useful for mod_http_upload_external compatibility + +--- + +## Example Implementations + +### Client-Side HMAC Generation (Python) +```python +import hmac +import hashlib +import time + +def generate_v3_signature(secret, method, expires, path): + message = f"{method}\n{expires}\n{path}" + signature = hmac.new( + secret.encode(), + message.encode(), + hashlib.sha256 + ).hexdigest() + return signature + +# Example usage +secret = "your-hmac-secret" +expires = int(time.time()) + 3600 # 1 hour from now +signature = generate_v3_signature(secret, "PUT", expires, "/upload/file.txt") +``` + +### Server-Side Validation (Reference) +See the main.go file for complete implementation details of all validation functions: +- `validateHMAC()`: Legacy v1, v2, and token protocols +- `validateV3HMAC()`: v3 protocol with expiration +- `validateJWTFromRequest()`: JWT validation + +--- + +This specification ensures consistent implementation across clients and provides multiple authentication options for different use cases and security requirements. diff --git a/README.MD b/README.MD index e169cb6..1b2b5f0 100644 --- a/README.MD +++ b/README.MD @@ -1,216 +1,454 @@ -# HMAC File Server Release Notes +# HMAC File Server 3.2 -**HMAC File Server** is a secure, scalable, and feature-rich file server with advanced capabilities like HMAC authentication, resumable uploads, chunked uploads, file versioning, and optional ClamAV scanning for file integrity and security. This server is built with extensibility and operational monitoring in mind, including Prometheus metrics support and Redis integration. +## Overview +The **HMAC File Server** ensures secure file uploads and downloads using HMAC authentication and JWT tokens. It incorporates comprehensive security features, file versioning, deduplication, ISO container support, virus scanning, and Unix socket support for enhanced flexibility. Redis integration provides efficient caching and session management. Prometheus metrics and graceful shutdown mechanisms ensure reliable and efficient file handling. + +Special thanks to **Thomas Leister** for inspiration drawn from [prosody-filer](https://github.com/ThomasLeister/prosody-filer). ## Features +- **Multiple Authentication Methods**: HMAC-based authentication and JWT token support +- **Multiple Protocol Support**: v1 (legacy), v2, v3 (mod_http_upload_external), and token-based uploads +- **File Management**: Deduplication, configurable TTL for automatic file cleanup +- **Upload Methods**: POST multipart uploads, PUT uploads for legacy protocols, v3 protocol support +- **Security**: Virus scanning via ClamAV, configurable file extensions validation +- **Performance**: Chunked uploads and downloads, worker pool management with auto-scaling +- **Storage Options**: Local storage, ISO container mounting for specialized needs +- **Monitoring**: Prometheus metrics integration with detailed system and operation metrics +- **Network Support**: IPv4/IPv6 dual-stack support with protocol forcing options +- **Configuration**: Hot-reloading of logging settings via SIGHUP signal -- **HMAC Authentication:** Secure file uploads and downloads with HMAC tokens. -- **File Versioning:** Enable versioning for uploaded files with configurable retention. -- **Chunked and Resumable Uploads:** Handle large files efficiently with support for resumable and chunked uploads. -- **ClamAV Scanning:** Optional virus scanning for uploaded files. -- **Prometheus Metrics:** Monitor system and application-level metrics. -- **Redis Integration:** Use Redis for caching or storing application states. -- **File Expiration:** Automatically delete files after a specified TTL. -- **Graceful Shutdown:** Handles signals and ensures proper cleanup. -- **Deduplication:** Remove duplicate files based on hashing for storage efficiency. +## Table of Contents +1. [Installation](#installation) +2. [Configuration](#configuration) +3. [Authentication](#authentication) +4. [API Endpoints](#api-endpoints) +5. [Usage Examples](#usage-examples) +6. [Setup](#setup) + - [Reverse Proxy](#reverse-proxy) + - [Systemd Service](#systemd-service) +7. [Building](#building) +8. [Docker Support](#docker-support) +9. [Changelog](#changelog) +10. [License](#license) --- ## Installation -### Prerequisites +### Quick Installation for XMPP Operators -- Go 1.20+ -- Redis (optional, if Redis integration is enabled) -- ClamAV (optional, if file scanning is enabled) - -### Clone and Build +The easiest way to install HMAC File Server is using the automated installer: ```bash -git clone https://github.com/your-repo/hmac-file-server.git +git clone https://github.com/PlusOne/hmac-file-server.git cd hmac-file-server -go build -o hmac-file-server main.go +sudo ./installer.sh ``` +The installer will: +- Install Go 1.24 (if needed) +- Create system user and directories +- Build and configure the server +- Set up systemd service +- Optionally install Redis and ClamAV + +For detailed installation instructions, see [INSTALL.MD](INSTALL.MD). + +### Manual Installation + +> **Tip:** You can also run HMAC File Server using Docker Compose for easy deployment. See the Wiki for Docker setup instructions. The official image is available at `ghcr.io/plusone/hmac-file-server:latest`. + +#### Prerequisites +- Go **1.24** or higher +- Redis server (optional, for caching) +- ClamAV (optional, for virus scanning) + +#### Steps +1. Clone the repository: + ```bash + git clone https://github.com/PlusOne/hmac-file-server.git + cd hmac-file-server + ``` + +2. Build the server: + ```bash + go build -o hmac-file-server ./cmd/server/main.go + ``` + +3. Generate example configuration: + ```bash + ./hmac-file-server -genconfig + # or write to file: + ./hmac-file-server -genconfig-path config.toml + ``` + +4. Create necessary directories: + ```bash + mkdir -p /path/to/hmac-file-server/data/ + mkdir -p /path/to/hmac-file-server/deduplication/ + ``` + +5. Edit your `config.toml` file with appropriate settings. + +6. Start the server: + ```bash + ./hmac-file-server -config config.toml + ``` + +--- + +## Uninstallation + +The installer script provides comprehensive uninstallation options with data preservation: + +```bash +sudo ./installer.sh --uninstall +``` + +### Uninstall Options + +The uninstaller offers five data handling options: + +1. **๐Ÿ—‘๏ธ Delete all data** - Complete removal (requires typing "DELETE" to confirm) +2. **๐Ÿ’พ Preserve uploads and deduplication data** - Keeps important files, removes logs +3. **๐Ÿ“‹ Preserve all data** - Keeps uploads, deduplication data, and logs +4. **๐ŸŽฏ Custom selection** - Choose exactly what to preserve +5. **โŒ Cancel** - Exit without making changes + +### Data Preservation + +When preserving data, the uninstaller: +- Creates timestamped backups in `/var/backups/hmac-file-server-YYYYMMDD-HHMMSS/` +- Shows file counts and sizes before deletion decisions +- Safely moves data to backup locations +- Provides clear feedback on what was preserved or removed + +### What Gets Removed + +The uninstaller always removes: +- โœ“ Systemd service and service file +- โœ“ Installation directory (`/opt/hmac-file-server`) +- โœ“ Configuration directory (`/etc/hmac-file-server`) +- โœ“ System user (`hmac-server`) +- โœ“ Binary files in common locations + +Data directories are handled according to your selection. + --- ## Configuration -The server configuration is managed through a `config.toml` file. Below are the supported configuration options: +The server uses a comprehensive `config.toml` file with the following main sections: -### **Server Configuration** +### Key Configuration Sections -| Key | Description | Example | -|------------------------|-----------------------------------------------------|---------------------------------| -| `ListenPort` | Port or Unix socket to listen on | `":8080"` | -| `UnixSocket` | Use a Unix socket (`true`/`false`) | `false` | -| `Secret` | Secret key for HMAC authentication | `"your-secret-key"` | -| `StoragePath` | Directory to store uploaded files | `"/mnt/storage/hmac-file-server"` | -| `LogLevel` | Logging level (`info`, `debug`, etc.) | `"info"` | -| `LogFile` | Log file path (optional) | `"/var/log/hmac-file-server.log"` | -| `MetricsEnabled` | Enable Prometheus metrics (`true`/`false`) | `true` | -| `MetricsPort` | Prometheus metrics server port | `"9090"` | -| `FileTTL` | File Time-to-Live duration | `"168h0m0s"` | -| `DeduplicationEnabled` | Enable file deduplication based on hashing | `true` | -| `MinFreeBytes` | Minimum free space required on storage path (in bytes) | `104857600` | +- **[server]**: Basic server settings (port, storage, metrics) +- **[security]**: HMAC secrets, JWT configuration +- **[uploads/downloads]**: File handling settings, allowed extensions +- **[logging]**: Log levels, file rotation settings +- **[clamav]**: Virus scanning configuration +- **[redis]**: Cache and session management +- **[workers]**: Thread pool and performance tuning +- **[iso]**: ISO container mounting (specialized storage) +- **[timeouts]**: HTTP timeout configurations -### **Uploads** - -| Key | Description | Example | -|----------------------------|-----------------------------------------------|-------------| -| `ResumableUploadsEnabled` | Enable resumable uploads | `true` | -| `ChunkedUploadsEnabled` | Enable chunked uploads | `true` | -| `ChunkSize` | Chunk size for chunked uploads (bytes) | `1048576` | -| `AllowedExtensions` | Allowed file extensions for uploads | `[".png", ".jpg"]` | - -### **Time Settings** - -| Key | Description | Example | -|------------------|--------------------------------|----------| -| `ReadTimeout` | HTTP server read timeout | `"2h"` | -| `WriteTimeout` | HTTP server write timeout | `"2h"` | -| `IdleTimeout` | HTTP server idle timeout | `"2h"` | - -### **ClamAV Configuration** - -| Key | Description | Example | -|--------------------|-------------------------------------------|----------------------------------| -| `ClamAVEnabled` | Enable ClamAV virus scanning (`true`) | `true` | -| `ClamAVSocket` | Path to ClamAV Unix socket | `"/var/run/clamav/clamd.ctl"` | -| `NumScanWorkers` | Number of workers for file scanning | `2` | - -### **Redis Configuration** - -| Key | Description | Example | -|----------------------------|----------------------------------|-------------------| -| `RedisEnabled` | Enable Redis integration | `true` | -| `RedisDBIndex` | Redis database index | `0` | -| `RedisAddr` | Redis server address | `"localhost:6379"`| -| `RedisPassword` | Password for Redis authentication| `""` | -| `RedisHealthCheckInterval` | Health check interval for Redis | `"30s"` | - -### **Workers and Connections** - -| Key | Description | Example | -|------------------------|------------------------------------|-------------------| -| `NumWorkers` | Number of upload workers | `2` | -| `UploadQueueSize` | Size of the upload queue | `50` | - ---- - -## Running the Server - -### Basic Usage - -Run the server with a configuration file: - -```bash -./hmac-file-server -config ./config.toml -``` - -### Metrics Server - -If `MetricsEnabled` is `true`, the Prometheus metrics server will run on the port specified in `MetricsPort` (default: `9090`). - ---- - -## Development Notes - -- **Versioning:** Enabled via `EnableVersioning`. Ensure `MaxVersions` is set appropriately to prevent storage issues. -- **File Cleaner:** The file cleaner runs hourly and deletes files older than the configured `FileTTL`. -- **Redis Health Check:** Automatically monitors Redis connectivity and logs warnings on failure. - ---- - -## Testing - -To run the server locally for development: - -```bash -go run main.go -config ./config.toml -``` - -Use tools like **cURL** or **Postman** to test file uploads and downloads. - -### Example File Upload with HMAC Token - -```bash -curl -X PUT -H "Authorization: Bearer " -F "file=@example.txt" http://localhost:8080/uploads/example.txt -``` - -Replace `` with a valid HMAC signature generated using the configured `Secret`. - ---- - -## Monitoring - -Prometheus metrics include: -- File upload/download durations -- Memory usage -- CPU usage -- Active connections -- HTTP requests metrics (total, method, path) - ---- - -## Example `config.toml` +### Example Configuration ```toml [server] +bind_ip = "0.0.0.0" listenport = "8080" unixsocket = false -storagepath = "/mnt/storage/" -loglevel = "info" -logfile = "/var/log/file-server.log" +storagepath = "./uploads" metricsenabled = true metricsport = "9090" -DeduplicationEnabled = true -filettl = "336h" # 14 days -minfreebytes = 104857600 # 100 MB in bytes - -[timeouts] -readtimeout = "4800s" -writetimeout = "4800s" -idletimeout = "24h" +deduplicationenabled = true +filenaming = "HMAC" # Options: "HMAC", "original", "None" +forceprotocol = "auto" # Options: "ipv4", "ipv6", "auto" [security] -secret = "example-secret-key" - -[versioning] -enableversioning = false -maxversions = 1 +secret = "your-secure-hmac-secret" +enablejwt = false +jwtsecret = "your-jwt-secret" +jwtalgorithm = "HS256" +jwtexpiration = "24h" [uploads] -resumableuploadsenabled = true +allowedextensions = [".txt", ".pdf", ".jpg", ".png", ".zip"] chunkeduploadsenabled = true -chunksize = 8192 -allowedextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp", ".wav", ".mp4", ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", ".mp3", ".ogg"] - -[clamav] -clamavenabled = true -clamavsocket = "/var/run/clamav/clamd.ctl" -numscanworkers = 2 - -[redis] -redisenabled = true -redisdbindex = 0 -redisaddr = "localhost:6379" -redispassword = "" -redishealthcheckinterval = "120s" - -[workers] -numworkers = 2 -uploadqueuesize = 50 +chunksize = "10MB" ``` -This configuration file is set up with essential features like Prometheus integration, ClamAV scanning, and file handling with deduplication and versioning options. Adjust the settings according to your infrastructure needs. +For complete configuration details, see the [Wiki](./WIKI.MD). -### Additional Features +--- -- **Deduplication**: Automatically remove duplicate files based on hashing. -- **Versioning**: Store multiple versions of files and keep a maximum of `MaxVersions` versions. -- **ClamAV Integration**: Scan uploaded files for viruses using ClamAV. -- **Redis Caching**: Utilize Redis for caching file metadata for faster access. +## Authentication -This release ensures an efficient and secure file management system, suited for environments requiring high levels of data security and availability. -``` \ No newline at end of file +The server supports multiple authentication methods: + +### 1. HMAC Authentication (Default) +- **Legacy v1**: Basic HMAC with path + content length +- **v2**: Enhanced HMAC with content type validation +- **Token**: Alternative HMAC parameter name +- **v3**: mod_http_upload_external compatible with expiration + +### 2. JWT Authentication +When `enablejwt = true`: +- Bearer tokens in Authorization header +- Query parameter `token` as fallback +- Configurable expiration and algorithm + +### Authentication Examples + +```bash +# HMAC v2 upload +curl -X PUT "http://localhost:8080/myfile.txt?v2=HMAC_SIGNATURE" -d @file.txt + +# JWT upload +curl -X POST -H "Authorization: Bearer JWT_TOKEN" -F 'file=@myfile.txt' http://localhost:8080/upload + +# v3 protocol (mod_http_upload_external) +curl -X PUT "http://localhost:8080/upload/file.txt?v3=SIGNATURE&expires=TIMESTAMP" -d @file.txt +``` + +--- + +## API Endpoints + +### Upload Endpoints +- **POST /upload**: Multipart form uploads (modern clients) +- **PUT /{filename}**: Direct uploads with HMAC or JWT authentication +- **PUT with v3 protocol**: mod_http_upload_external compatible uploads + +### Download Endpoints +- **GET /{filename}**: Direct file downloads +- **HEAD /{filename}**: File metadata (size, type) +- **GET /download/{filename}**: Alternative download endpoint + +### Management Endpoints +- **GET /health**: Health check endpoint for monitoring +- **GET /metrics**: Prometheus metrics (if enabled) +- **Various helper endpoints**: Defined in router setup + +--- + +## Usage Examples + +### Upload Examples + +#### Multipart POST Upload +```bash +curl -X POST -F 'file=@example.jpg' \ + -H "X-Signature: HMAC_OF_PATH" \ + http://localhost:8080/upload +``` + +#### Legacy PUT Upload (v2) +```bash +# Calculate HMAC of: filename + "\x00" + content_length + "\x00" + content_type +curl -X PUT "http://localhost:8080/example.jpg?v2=CALCULATED_HMAC" \ + --data-binary @example.jpg +``` + +#### v3 Protocol Upload (mod_http_upload_external) +```bash +# HMAC of: "PUT\n{timestamp}\n/path/to/file" +curl -X PUT "http://localhost:8080/upload/file.txt?v3=SIGNATURE&expires=1234567890" \ + --data-binary @file.txt +``` + +#### JWT Upload +```bash +curl -X POST \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -F 'file=@example.jpg' \ + http://localhost:8080/upload +``` + +### Download Examples + +#### Direct Download +```bash +curl http://localhost:8080/example.jpg -o downloaded_file.jpg +``` + +#### Get File Info +```bash +curl -I http://localhost:8080/example.jpg +``` + +#### Health Check +```bash +curl http://localhost:8080/health +``` + +--- + +## Setup + +### Reverse Proxy + +#### Nginx Configuration +```nginx +server { + listen 80; + server_name your-domain.com; + client_max_body_size 10G; # Important for large uploads + + location / { + proxy_pass http://localhost:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Timeout settings for large uploads + proxy_read_timeout 300; + proxy_connect_timeout 60; + proxy_send_timeout 300; + } +} +``` + +#### Apache2 Configuration +```apache + + ServerName your-domain.com + + ProxyPreserveHost On + ProxyPass / http://localhost:8080/ + ProxyPassReverse / http://localhost:8080/ + + # Large upload support + LimitRequestBody 10737418240 # 10GB + ProxyTimeout 300 + +``` + +### Systemd Service + +```ini +[Unit] +Description=HMAC File Server +After=network.target redis.service + +[Service] +Type=simple +ExecStart=/path/to/hmac-file-server -config /path/to/config.toml +ExecReload=/bin/kill -SIGHUP $MAINPID +WorkingDirectory=/path/to/hmac-file-server +Restart=always +RestartSec=10 +User=hmac-server +Group=hmac-server + +# Security settings +NoNewPrivileges=true +PrivateTmp=true +ProtectSystem=strict +ReadWritePaths=/path/to/uploads /path/to/logs + +[Install] +WantedBy=multi-user.target +``` + +Enable and start: +```bash +sudo systemctl daemon-reload +sudo systemctl enable hmac-file-server +sudo systemctl start hmac-file-server + +# Reload configuration (logging settings) +sudo systemctl reload hmac-file-server +``` + +--- + +## Building + +### Local Build +```bash +go build -o hmac-file-server ./cmd/server/main.go +``` + +### Cross-Platform Builds +```bash +# Linux amd64 +GOOS=linux GOARCH=amd64 go build -o hmac-file-server-linux-amd64 ./cmd/server/main.go + +# Linux arm64 +GOOS=linux GOARCH=arm64 go build -o hmac-file-server-linux-arm64 ./cmd/server/main.go + +# Windows +GOOS=windows GOARCH=amd64 go build -o hmac-file-server-windows-amd64.exe ./cmd/server/main.go +``` + +--- + +## Docker Support + +### Quick Start with Docker Compose +```yaml +version: '3.8' +services: + hmac-file-server: + image: ghcr.io/plusone/hmac-file-server:latest + ports: + - "8080:8080" + - "9090:9090" # Metrics + volumes: + - ./config:/etc/hmac-file-server + - ./uploads:/opt/hmac-file-server/data/uploads + environment: + - CONFIG_PATH=/etc/hmac-file-server/config.toml + restart: unless-stopped +``` + +### Docker Build +```bash +docker build -t hmac-file-server . +docker run -p 8080:8080 -v $(pwd)/config.toml:/etc/hmac-file-server/config.toml hmac-file-server +``` + +See the Wiki for detailed Docker setup instructions. + +--- + +## Changelog + +### Version 3.2 (Stable) +- **Development Version**: Active development branch with latest features +- **Enhanced Documentation**: Updated comprehensive documentation and protocol specifications +- **Configuration Improvements**: Better configuration validation and structure +- **Authentication System**: Full JWT and multi-protocol HMAC support + +### Version 3.1-Stable (2025-06-08) +- **v3 Protocol Support**: Added mod_http_upload_external compatibility +- **Enhanced Authentication**: Improved HMAC validation with multiple protocol support +- **JWT Integration**: Complete JWT authentication system +- **File Naming Options**: HMAC-based or original filename preservation +- **Network Improvements**: IPv4/IPv6 dual-stack with protocol forcing + +### Version 3.0-Stable (2025-06-07) +- **Docker Support**: Official Docker images and compose files +- **Go 1.24 Requirement**: Updated minimum Go version +- **Configuration Improvements**: Better validation and hot-reloading +- **Performance Enhancements**: Worker auto-scaling and memory optimization + +### Previous Versions +See [CHANGELOG.MD](./CHANGELOG.MD) for complete version history. + +--- + +## License + +MIT License + +Copyright (c) 2025 Alexander Renz + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/WIKI.MD b/WIKI.MD new file mode 100644 index 0000000..bfbebd7 --- /dev/null +++ b/WIKI.MD @@ -0,0 +1,1208 @@ +This documentation provides detailed information on configuring, setting up, and maintaining the HMAC File Server. Whether you're a developer, system administrator, or an enthusiast, this guide will help you navigate through the server's features and configurations effectively. + +--- + +## Table of Contents + +1. [Introduction](#introduction) +2. [Configuration](#configuration) + - [Server Configuration](#server-configuration) + - [Deduplication Settings](#deduplication-settings) + - [ISO Settings](#iso-settings) + - [Timeout Settings](#timeout-settings) + - [Security Settings](#security-settings) + - [Versioning Settings](#versioning-settings) + - [Uploads Settings](#uploads-settings) + - [Downloads Settings](#downloads-settings) + - [ClamAV Settings](#clamav-settings) + - [Redis Settings](#redis-settings) + - [Worker Settings](#worker-settings) +3. [Example Configuration](#example-configuration) +4. [Setup Instructions](#setup-instructions) + - [1. HMAC File Server Installation](#1-hmac-file-server-installation) + - [2. Reverse Proxy Configuration](#2-reverse-proxy-configuration) + - [Apache2 Reverse Proxy](#apache2-reverse-proxy) + - [Nginx Reverse Proxy](#nginx-reverse-proxy) + - [3. ejabberd Configuration](#3-ejabberd-configuration) + - [4. Systemd Service Setup](#4-systemd-service-setup) +5. [Running with Docker & Docker Compose](#running-with-docker--docker-compose) +6. [Building for Different Architectures](#building-for-different-architectures) +7. [Additional Recommendations](#additional-recommendations) +8. [Notes](#notes) +9. [Using HMAC File Server for CI/CD Build Artifacts](#using-hmac-file-server-for-ci-cd-build-artifacts) +10. [Monitoring](#monitoring) + +--- + +## Introduction + +The **HMAC File Server** is a secure and efficient file management solution designed to handle file uploads, downloads, deduplication, and more. Built with a focus on security, scalability, and performance, it integrates seamlessly with various tools and services to provide a comprehensive file handling experience. + +--- + +## Configuration + +The HMAC File Server is configured using a `config.toml` file. Below are the detailed explanations of each configuration section and their respective options. + +### Server Configuration + +```toml +# Server configuration +listenport = "8080" # TCP port for incoming requests +unixsocket = false # Use Unix domain socket instead of TCP +storagepath = "/path/to/hmac-file-server/data/" # Directory to store uploaded files +loglevel = "debug" # Logging level: "debug", "info", "warn", "error" +logfile = "/path/to/hmac-file-server.log" # Path to log file; leave empty to use stdout +metricsenabled = true # Enable Prometheus metrics +metricsport = "9090" # Port for Prometheus metrics +deduplicationenabled = true +minfreebytes = "5GB" # Minimum free disk space required +filettl = "2Y" # Time-to-live for files +filettlenabled = false # Enable TTL checks and cleanup +autoadjustworkers = true # Automatically adjust worker threads based on load +networkevents = false # Enable detailed network event logging +pidfilepath = "./hmac-file-server.pid" # Path to PID file +precaching = true # Pre-cache file structures on startup + +# New option to force network protocol +forceprotocol = "auto" # Options: "ipv4", "ipv6", "auto" +``` + +#### Configuration Options + +- **listenport**: + - *Type*: `String` + - *Description*: Specifies the TCP port on which the server listens for incoming requests. + - *Default*: `"8080"` + +- **unixsocket**: + - *Type*: `Boolean` + - *Description*: Determines whether to use a Unix domain socket instead of a TCP port for communication. + - *Default*: `false` + +- **storagepath**: + - *Type*: `String` + - *Description*: Defines the directory path where uploaded files are stored. Ensure this path exists and has appropriate permissions. + - *Default*: `"/path/to/hmac-file-server/data/"` + +- **loglevel**: + - *Type*: `String` + - *Description*: Sets the verbosity level of logs. + - *Options*: `"debug"`, `"info"`, `"warn"`, `"error"` + - *Default*: `"debug"` + +- **logfile**: + - *Type*: `String` + - *Description*: Specifies the file path for logging. If left empty, logs are output to `stdout`. + - *Default*: `"/path/to/hmac-file-server.log"` + +- **metricsenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables the Prometheus metrics endpoint. + - *Default*: `true` + +- **metricsport**: + - *Type*: `String` + - *Description*: Defines the port on which Prometheus metrics are exposed. + - *Default*: `"9090"` + +- **deduplicationenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables file deduplication to optimize storage usage. + - *Default*: `true` + +- **minfreebytes**: + - *Type*: `String` + - *Description*: Specifies the minimum free disk space required for the server to operate effectively. + - *Default*: `"5GB"` + +- **filettl**: + - *Type*: `String` + - *Description*: Sets the default Time-to-Live (TTL) for files, determining how long files are retained before deletion. + - *Format*: Duration (e.g., `"2Y"` for two years) + - *Default*: `"2Y"` + +- **filettlenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables TTL checks and automatic file cleanup based on the `filettl` value. + - *Default*: `false` + +- **autoadjustworkers**: + - *Type*: `Boolean` + - *Description*: Automatically adjusts the number of worker threads based on server load and system resources. + - *Default*: `true` + +- **networkevents**: + - *Type*: `Boolean` + - *Description*: Enables detailed logging of network events, which can be useful for debugging but may increase log verbosity. + - *Default*: `false` + +- **pidfilepath**: + - *Type*: `String` + - *Description*: Specifies the file path where the server writes its Process ID (PID) file. This is useful for managing the server process. + - *Default*: `"./hmac-file-server.pid"` + +- **precaching**: + - *Type*: `Boolean` + - *Description*: Enables pre-caching of file structures on startup to improve access speed and performance. + - *Default*: `true` + +- **forceprotocol**: + - *Type*: `String` + - *Description*: Specifies the network protocol to use for server communication. + - `"ipv4"`: Forces the server to use IPv4. + - `"ipv6"`: Forces the server to use IPv6. + - `"auto"`: Uses the system's default behavior (dual-stack). + - *Default*: `"auto"` + +--- + +### Deduplication Settings + +```toml +# Deduplication settings +[deduplication] +enabled = true +directory = "/path/to/hmac-file-server/deduplication/" # Path to deduplication metadata store +``` + +#### Configuration Options + +- **enabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables the deduplication feature, which helps in eliminating duplicate files to save storage space. + - *Default*: `true` + +- **directory**: + - *Type*: `String` + - *Description*: Specifies the directory path where deduplication metadata is stored. Ensure this directory exists and has appropriate permissions. + - *Default*: `"/path/to/hmac-file-server/deduplication/"` + +--- + +### ISO Settings + +```toml +# ISO settings +[iso] +enabled = false +size = "1TB" # Maximum ISO size +mountpoint = "/path/to/hmac-file-server/iso/" # ISO mount point +charset = "utf-8" # Filesystem character set encoding +``` + +#### Configuration Options + +- **enabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables the mounting of an ISO-based filesystem for specialized storage needs. + - *Default*: `false` + +- **size**: + - *Type*: `String` + - *Description*: Defines the maximum allowed size for the ISO container. + - *Default*: `"1TB"` + +- **mountpoint**: + - *Type*: `String` + - *Description*: Specifies the directory path where the ISO is mounted. Ensure this path exists and has appropriate permissions. + - *Default*: `"/path/to/hmac-file-server/iso/"` + +- **charset**: + - *Type*: `String` + - *Description*: Sets the filesystem character set encoding for the ISO. + - *Default*: `"utf-8"` + +> **Note**: Ensure only one `[iso]` block is active in your `config.toml` to avoid configuration conflicts. + +--- + +### Timeout Settings + +```toml +# Timeout settings +[timeouts] +readtimeout = "3600s" # Maximum time to read a request +writetimeout = "3600s" # Maximum time to write a response +idletimeout = "3600s" # Maximum keep-alive time for idle connections +``` + +#### Configuration Options + +- **readtimeout**: + - *Type*: `String` + - *Description*: Sets the maximum duration for reading the entire request, including the body. + - *Format*: Duration (e.g., `"3600s"` for one hour) + - *Default*: `"3600s"` + +- **writetimeout**: + - *Type*: `String` + - *Description*: Defines the maximum duration before timing out writes of the response. + - *Format*: Duration (e.g., `"3600s"` for one hour) + - *Default*: `"3600s"` + +- **idletimeout**: + - *Type*: `String` + - *Description*: Specifies the maximum amount of time to wait for the next request when keep-alives are enabled. + - *Format*: Duration (e.g., `"3600s"` for one hour) + - *Default*: `"3600s"` + +--- + +### Security Configuration + +```toml +# Security settings +[security] +secret = "your-secure-secret-key" # HMAC shared secret key (change to a secure value) +enablejwt = false # Enable JWT authentication +jwtsecret = "your-jwt-secret" # JWT signing secret +jwtalgorithm = "HS256" # JWT algorithm +jwtexpiration = "24h" # JWT token expiration +``` + +#### Configuration Options + +- **secret**: + - *Type*: `String` + - *Description*: The HMAC shared secret key used for signing requests and operations. + - *Default*: `"your-secure-secret-key"` + - *Warning*: **Change this immediately** to a unique, strong string in production environments to ensure the security of HMAC operations. + +- **enablejwt**: + - *Type*: `Boolean` + - *Description*: Enables or disables JWT token authentication. When enabled, the server will accept JWT tokens for authentication. + - *Default*: `false` + +- **jwtsecret**: + - *Type*: `String` + - *Description*: The secret key used for signing and validating JWT tokens. Must be strong and secure. + - *Default*: `"your-jwt-secret"` + +- **jwtalgorithm**: + - *Type*: `String` + - *Description*: The algorithm used for JWT token signing. + - *Options*: `"HS256"`, `"HS384"`, `"HS512"` + - *Default*: `"HS256"` + +- **jwtexpiration**: + - *Type*: `String` + - *Description*: The expiration time for JWT tokens. + - *Format*: Duration (e.g., `"24h"` for 24 hours, `"30m"` for 30 minutes) + - *Default*: `"24h"` + +--- + +### Versioning Settings + +```toml +# Versioning settings +[versioning] +enableversioning = false +maxversions = 1 # Number of file versions to retain +``` + +#### Configuration Options + +- **enableversioning**: + - *Type*: `Boolean` + - *Description*: Enables or disables the versioning feature, which maintains multiple versions of the same file. + - *Default*: `false` + +- **maxversions**: + - *Type*: `Integer` + - *Description*: Specifies the maximum number of versions to retain for each file. + - *Default*: `1` + +--- + +### Logging Configuration + +```toml +# Logging settings +[logging] +level = "debug" +file = "/path/to/hmac-file-server.log" +max_size = 100 # Maximum log file size in MB +max_backups = 7 # Number of backup log files to keep +max_age = 30 # Maximum age of log files in days +compress = true # Compress old log files +``` + +#### Configuration Options + +- **level**: + - *Type*: `String` + - *Description*: Sets the verbosity level of logs. + - *Options*: `"debug"`, `"info"`, `"warn"`, `"error"` + - *Default*: `"debug"` + +- **file**: + - *Type*: `String` + - *Description*: Specifies the file path for logging. If left empty, logs are output to `stdout`. + - *Default*: `"/path/to/hmac-file-server.log"` + +- **max_size**: + - *Type*: `Integer` + - *Description*: Maximum size of log files before rotation (in MB). + - *Default*: `100` + +- **max_backups**: + - *Type*: `Integer` + - *Description*: Number of backup log files to retain after rotation. + - *Default*: `7` + +- **max_age**: + - *Type*: `Integer` + - *Description*: Maximum age of log files in days before deletion. + - *Default*: `30` + +- **compress**: + - *Type*: `Boolean` + - *Description*: Whether to compress old log files with gzip. + - *Default*: `true` + +--- + +### Uploads Configuration + +```toml +# Upload settings +[uploads] +resumableuploadsenabled = false +chunkeduploadsenabled = true +chunksize = "32MB" # Chunk size for uploads +allowedextensions = [ + ".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", + ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", ".mp3", ".ogg" +] +``` + +#### Configuration Options + +- **resumableuploadsenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables support for resumable (chunked) file uploads. + - *Default*: `false` + +- **chunkeduploadsenabled**: + - *Type*: `Boolean` + - *Description*: Specifically enables or disables chunked uploads. + - *Default*: `true` + +- **chunksize**: + - *Type*: `String` + - *Description*: Defines the size of each chunk in chunked uploads. + - *Format*: Size (e.g., `"32MB"`) + - *Default*: `"32MB"` + +- **allowedextensions**: + - *Type*: `Array of Strings` + - *Description*: Lists the file extensions permitted for upload. + - *Default*: + ```toml + allowedextensions = [ + ".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", + ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", ".mp3", ".ogg" + ] + ``` + +--- + +### Downloads Configuration + +```toml +# Downloads settings +[downloads] +resumabledownloadsenabled = false +chunkeddownloadsenabled = true +chunksize = "32MB" +``` + +#### Configuration Options + +- **resumabledownloadsenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables support for resumable (chunked) downloads. + - *Default*: `false` + +- **chunkeddownloadsenabled**: + - *Type*: `Boolean` + - *Description*: Specifically enables or disables chunked downloads. + - *Default*: `true` + +- **chunksize**: + - *Type*: `String` + - *Description*: Defines the size of each chunk in chunked downloads. + - *Format*: Size (e.g., `"32MB"`) + - *Default*: `"32MB"` + +> **Note**: Downloads inherit allowed extensions from the uploads configuration. There is no separate `allowedextensions` setting for downloads. + +--- + +### ClamAV Settings + +```toml +# ClamAV settings +[clamav] +clamavenabled = true +clamavsocket = "/path/to/clamav/clamd.ctl" # Path to ClamAV socket +numscanworkers = 4 # Number of concurrent scan workers +scanfileextensions = [ + ".exe", ".dll", ".bin", ".com", ".bat", + ".sh", ".php", ".js" +] +``` + +#### Configuration Options + +- **clamavenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables ClamAV integration for virus scanning of uploaded files. + - *Default*: `true` + +- **clamavsocket**: + - *Type*: `String` + - *Description*: Specifies the file path to the ClamAV socket (`.ctl` file). Ensure ClamAV is installed and the socket path is correct. + - *Default*: `"/path/to/clamav/clamd.ctl"` + +- **numscanworkers**: + - *Type*: `Integer` + - *Description*: Sets the number of concurrent workers dedicated to scanning files with ClamAV. + - *Default*: `4` + +- **scanfileextensions**: + - *Type*: `Array of Strings` + - *Description*: Lists the file extensions that should be scanned for viruses. + - *Default*: + ```toml + scanfileextensions = [ + ".exe", ".dll", ".bin", ".com", ".bat", + ".sh", ".php", ".js" + ] + ``` + +--- + +### Redis Settings + +```toml +# Redis settings +[redis] +redisenabled = true +redisdbindex = 0 +redisaddr = "localhost:6379" # Redis server address +redispassword = "" # Redis password if required +redishealthcheckinterval = "120s" # Interval for Redis health checks +``` + +#### Configuration Options + +- **redisenabled**: + - *Type*: `Boolean` + - *Description*: Enables or disables Redis integration for caching or session tracking. + - *Default*: `true` + +- **redisaddr**: + - *Type*: `String` + - *Description*: Specifies the address of the Redis server (e.g., `"localhost:6379"`). + - *Default*: `"localhost:6379"` + +- **redispassword**: + - *Type*: `String` + - *Description*: Sets the Redis authentication password, if required. + - *Default*: `""` + +- **redisdbindex**: + - *Type*: `Integer` + - *Description*: Specifies the Redis database index to use. + - *Default*: `0` + +- **redishealthcheckinterval**: + - *Type*: `String` + - *Description*: Defines the interval for performing health checks on the Redis connection. + - *Format*: Duration (e.g., `"120s"` for two minutes) + - *Default*: `"120s"` + +--- + +### Workers Configuration + +```toml +# Workers settings +[workers] +numworkers = 10 # Number of worker threads +uploadqueuesize = 5000 # Size of upload queue +``` + +#### Configuration Options + +- **numworkers**: + - *Type*: `Integer` + - *Description*: Specifies the number of worker threads to handle file operations. + - *Default*: `10` + +- **uploadqueuesize**: + - *Type*: `Integer` + - *Description*: Sets the size of the upload queue buffer. + - *Default*: `5000` + +--- + +#### Configuration Options + +- **maxfilesize**: + - *Type*: `String` + - *Description*: Defines the maximum allowed file size for uploads. + - *Format*: Size (e.g., `"10GB"`) + - *Default*: `"10GB"` + +--- + +## Configuration Validation + +The HMAC File Server v3.2 includes a comprehensive configuration validation system with specialized command-line flags for different validation scenarios. + +### Available Validation Flags + +#### Core Validation Commands + +**`--validate-config`** +- **Purpose**: Full comprehensive validation of all configuration sections +- **Usage**: `./hmac-file-server --validate-config` +- **Output**: Complete validation report with all errors and warnings + +```bash +# Example +./hmac-file-server -config config.toml --validate-config +``` + +**`--test-config`** +- **Purpose**: Run predefined configuration test scenarios +- **Usage**: `./hmac-file-server --test-config` +- **Output**: Test scenario results for configuration validation + +#### Specialized Validation Modes + +**`--check-security`** +- **Purpose**: Security-focused validation only +- **Checks**: Secret strength, default values, JWT algorithms, network exposure, file permissions +- **Example**: `./hmac-file-server -config config.toml --check-security` + +**`--check-performance`** +- **Purpose**: Performance-focused validation only +- **Checks**: Worker optimization, memory usage, timeout balance, large file handling +- **Example**: `./hmac-file-server -config config.toml --check-performance` + +**`--check-connectivity`** +- **Purpose**: Network connectivity validation only +- **Checks**: Redis connections, ClamAV sockets, address validation, DNS resolution +- **Example**: `./hmac-file-server -config config.toml --check-connectivity` + +#### Output Control Flags + +**`--validate-quiet`** +- **Purpose**: Minimal output, returns only exit codes +- **Usage**: Perfect for automation and scripts + +**`--validate-verbose`** +- **Purpose**: Detailed output with comprehensive analysis +- **Usage**: Best for troubleshooting and development + +**`--check-fixable`** +- **Purpose**: Show only issues that can be automatically fixed +- **Usage**: Helps prioritize configuration improvements + +### Validation Categories + +#### Security Checks (6 categories) +- Secret strength analysis +- Default value detection +- Algorithm recommendations +- Network exposure warnings +- File permission analysis +- Debug logging security + +#### Performance Checks (5 categories) +- Resource optimization +- Memory usage analysis +- Timeout balancing +- Large file preparation +- Configuration efficiency + +#### Connectivity Checks (4 categories) +- Service connectivity +- Socket accessibility +- Address validation +- DNS resolution + +#### System Checks (5 categories) +- CPU availability +- Memory monitoring +- Disk space validation +- Permission testing +- Resource constraints + +### Integration Examples + +#### Shell Script Integration +```bash +#!/bin/bash +CONFIG_FILE="/etc/hmac-file-server/config.toml" + +echo "๐Ÿ” Validating HMAC File Server configuration..." + +# Run validation +if ./hmac-file-server -config "$CONFIG_FILE" --validate-config; then + echo "โœ… Configuration validation passed" + + # Additional specific checks + echo "๐Ÿ” Running security audit..." + ./hmac-file-server -config "$CONFIG_FILE" --check-security + + echo "โšก Checking performance settings..." + ./hmac-file-server -config "$CONFIG_FILE" --check-performance +else + echo "โŒ Configuration validation failed" + echo "๐Ÿ’ก Try: ./hmac-file-server -config $CONFIG_FILE --check-fixable" + exit 1 +fi +``` + +#### Docker Integration +```dockerfile +# Add validation step to Dockerfile +RUN ./hmac-file-server -config /etc/config.toml --validate-config && \ + ./hmac-file-server -config /etc/config.toml --check-security +``` + +#### Kubernetes Health Check +```yaml +livenessProbe: + exec: + command: + - /usr/local/bin/hmac-file-server + - -config + - /etc/config/config.toml + - --validate-quiet + initialDelaySeconds: 30 + periodSeconds: 60 +``` + +The enhanced command-line validation system provides comprehensive coverage with 50+ validation checks across all configuration areas, making HMAC File Server v3.2 production-ready with enterprise-grade configuration management. + +--- + +## Example Configuration + +Below is an example `config.toml` file with default settings: + +```toml +# Example HMAC File Server configuration + +# Server configuration +listenport = "8080" +bind_ip = "0.0.0.0" +unixsocket = false +storagepath = "/path/to/hmac-file-server/data/" +metricsenabled = true +metricsport = "9090" +deduplicationenabled = true +minfreebytes = "5GB" +filettl = "2Y" +filettlenabled = false +autoadjustworkers = true +networkevents = false +pidfilepath = "./hmac-file-server.pid" +precaching = true +filenaming = "HMAC" +forceprotocol = "auto" + +# Logging settings +[logging] +level = "debug" +file = "/path/to/hmac-file-server.log" +max_size = 100 +max_backups = 7 +max_age = 30 +compress = true + +# Deduplication settings +[deduplication] +enabled = true +directory = "/path/to/hmac-file-server/deduplication/" + +# ISO settings +[iso] +enabled = false +size = "1TB" +mountpoint = "/path/to/hmac-file-server/iso/" +charset = "utf-8" + +# Timeout settings +[timeouts] +readtimeout = "3600s" +writetimeout = "3600s" +idletimeout = "3600s" + +# Security settings +[security] +secret = "your-secure-secret-key" +enablejwt = false +jwtsecret = "your-jwt-secret" +jwtalgorithm = "HS256" +jwtexpiration = "24h" + +# Versioning settings +[versioning] +enableversioning = false +maxversions = 1 + +# Upload settings +[uploads] +resumableuploadsenabled = false +chunkeduploadsenabled = true +chunksize = "32MB" +allowedextensions = [ + ".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", + ".bmp", ".tiff", ".svg", ".webp", ".wav", ".mp4", + ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", + ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", ".mp3", ".ogg" +] + +# Download settings +[downloads] +resumabledownloadsenabled = false +chunkeddownloadsenabled = true +chunksize = "32MB" + +# ClamAV settings +[clamav] +clamavenabled = true +clamavsocket = "/path/to/clamav/clamd.ctl" +numscanworkers = 4 +scanfileextensions = [ + ".exe", ".dll", ".bin", ".com", ".bat", + ".sh", ".php", ".js" +] + +# Redis settings +[redis] +redisenabled = true +redisdbindex = 0 +redisaddr = "localhost:6379" +redispassword = "" +redishealthcheckinterval = "120s" + +# Workers settings +[workers] +numworkers = 10 +uploadqueuesize = 5000 +``` + +--- + +## Setup Instructions + +### 1. HMAC File Server Installation + +To install the HMAC File Server, follow these steps: + +1. Clone the repository: + ```sh + git clone https://github.com/PlusOne/hmac-file-server.git + cd hmac-file-server + ``` + +2. Build the server: + ```sh + go build -o hmac-file-server ./cmd/server/main.go + ``` + +3. Create the necessary directories: + ```sh + mkdir -p /path/to/hmac-file-server/data/ + mkdir -p /path/to/hmac-file-server/deduplication/ + mkdir -p /path/to/hmac-file-server/iso/ + ``` + +4. Copy the example configuration file: + ```sh + cp config.example.toml config.toml + ``` + +5. Edit the `config.toml` file to match your environment and preferences. + +6. Start the server: + ```sh + ./hmac-file-server -config config.toml + ``` + +### 2. Reverse Proxy Configuration + +To set up a reverse proxy for the HMAC File Server, you can use either Apache2 or Nginx. Below are the configuration examples for both. + +#### Apache2 Reverse Proxy + +1. Enable the necessary Apache2 modules: + ```sh + sudo a2enmod proxy + sudo a2enmod proxy_http + sudo a2enmod headers + sudo a2enmod rewrite + ``` + +2. Create a new virtual host configuration file: + ```sh + sudo nano /etc/apache2/sites-available/hmac-file-server.conf + ``` + +3. Add the following configuration to the file: + ```apache + + ServerName your-domain.com + + ProxyPreserveHost On + ProxyPass / http://localhost:8080/ + ProxyPassReverse / http://localhost:8080/ + + + Require all granted + Header always set X-Content-Type-Options "nosniff" + Header always set X-Frame-Options "DENY" + Header always set X-XSS-Protection "1; mode=block" + + + ``` + +4. Enable the new site and restart Apache2: + ```sh + sudo a2ensite hmac-file-server.conf + sudo systemctl restart apache2 + ``` + +#### Nginx Reverse Proxy + +1. Install Nginx if not already installed: + ```sh + sudo apt-get update + sudo apt-get install nginx + ``` + +2. Create a new server block configuration file: + ```sh + sudo nano /etc/nginx/sites-available/hmac-file-server + ``` + +3. Add the following configuration to the file: + ```nginx + server { + listen 80; + server_name your-domain.com; + + location / { + proxy_pass http://localhost:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Content-Type-Options "nosniff"; + proxy_set_header X-Frame-Options "DENY"; + proxy_set_header X-XSS-Protection "1; mode=block"; + } + } + ``` + +4. Enable the new site and restart Nginx: + ```sh + sudo ln -s /etc/nginx/sites-available/hmac-file-server /etc/nginx/sites-enabled/ + sudo systemctl restart nginx + ``` + +--- + +### Proxy Best Practices & Recommendations + +For production deployments, consider the following reverse proxy best practices: + +- **Timeouts**: Set reasonable timeouts (e.g., `proxy_read_timeout 300;` in Nginx) to avoid hanging connections. +- **Buffer Sizes**: Increase buffer sizes for large file uploads/downloads if needed (e.g., `client_max_body_size 2G;` in Nginx). +- **Headers**: Always set security headers (`X-Content-Type-Options`, `X-Frame-Options`, `X-XSS-Protection`). +- **Forwarded Headers**: Ensure `X-Forwarded-For` and `X-Forwarded-Proto` are set for correct client IP and protocol logging. +- **HTTP/2**: Enable HTTP/2 for better performance if supported by your proxy and clients. +- **SSL/TLS**: Terminate SSL at the proxy and use strong ciphers. Redirect HTTP to HTTPS. +- **Health Checks**: Configure health checks for the backend server to enable automatic failover or alerting. +- **Access Controls**: Restrict access to the management endpoints (e.g., `/metrics`) to trusted IPs only. + +See the official Nginx and Apache documentation for more advanced tuning options. + +--- + +#### 3. ejabberd Configuration + +```yaml +hosts: + - "your-domain.com" + +listen: + - + port: 5222 + module: ejabberd_c2s + certfile: "/etc/ejabberd/ejabberd.pem" + starttls: true + starttls_required: true + protocol_options: + - "no_sslv3" + - "no_tlsv1" + - "no_tlsv1_1" + ciphers: "HIGH:!aNULL:!eNULL:!3DES:@STRENGTH" + dhfile: "/etc/ejabberd/dhparams.pem" + max_stanza_size: 65536 + shaper: c2s_shaper + access: c2s + + - + port: 5269 + module: ejabberd_s2s_in + certfile: "/etc/ejabberd/ejabberd.pem" + starttls: true + starttls_required: true + protocol_options: + - "no_sslv3" + - "no_tlsv1" + - "no_tlsv1_1" + ciphers: "HIGH:!aNULL:!eNULL:!3DES:@STRENGTH" + dhfile: "/etc/ejabberd/dhparams.pem" + max_stanza_size: 131072 + shaper: s2s_shaper + access: s2s + +acl: + local: + user_regexp: "" + +access_rules: + local: + allow: local + +mod_http_upload: + max_size: 1073741824 + thumbnail: true + put_url: https://share.uuxo.net + get_url: https://share.uuxo.net + external_secret: "changeme" + custom_headers: + "Access-Control-Allow-Origin": "*" + "Access-Control-Allow-Methods": "GET,HEAD,PUT,OPTIONS" + "Access-Control-Allow-Headers": "Content-Type" +``` + +4. Restart ejabberd: + ```sh + sudo systemctl restart ejabberd + ``` + +### 4. Systemd Service Setup + +To set up the HMAC File Server as a systemd service, follow these steps: + +1. Create a new systemd service file: + ```sh + sudo nano /etc/systemd/system/hmac-file-server.service + ``` + +2. Add the following configuration to the file: + ```ini + [Unit] + Description=HMAC File Server + After=network.target + + [Service] + ExecStart=/path/to/hmac-file-server -config /path/to/config.toml + WorkingDirectory=/path/to/hmac-file-server + Restart=always + User=www-data + Group=www-data + + [Install] + WantedBy=multi-user.target + ``` + +3. Reload systemd and enable the service: + ```sh + sudo systemctl daemon-reload + sudo systemctl enable hmac-file-server + sudo systemctl start hmac-file-server + ``` + +--- + +## Running with Docker & Docker Compose + +You can run the HMAC File Server using Docker and Docker Compose for easy deployment and environment management. + +### Docker Compose Example + +```yaml +version: '3.8' + +services: + hmac-file-server: + image: ghcr.io/plusone/hmac-file-server:latest + ports: + - "8080:8080" + volumes: + - ./config:/etc/hmac-file-server + - ./data/uploads:/opt/hmac-file-server/data/uploads + - ./data/duplicates:/opt/hmac-file-server/data/duplicates + - ./data/temp:/opt/hmac-file-server/data/temp + - ./data/logs:/opt/hmac-file-server/data/logs + environment: + - CONFIG_PATH=/etc/hmac-file-server/config.toml + restart: unless-stopped +``` + +**Key paths:** +- `/etc/hmac-file-server/config.toml`: Main config file (mount your config here) +- `/opt/hmac-file-server/data/uploads`: Upload storage +- `/opt/hmac-file-server/data/duplicates`: Deduplication data +- `/opt/hmac-file-server/data/temp`: Temporary files +- `/opt/hmac-file-server/data/logs`: Log files + +### Docker Build + +The official Dockerfile supports multi-stage builds for minimal images: + +```dockerfile +# Stage 1: Build +FROM golang:1.24-alpine AS builder + +WORKDIR /build +RUN apk add --no-cache git +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 go build -o hmac-file-server ./cmd/server/main.go + +# Stage 2: Runtime +FROM alpine:latest + +RUN apk --no-cache add ca-certificates + +RUN mkdir -p /opt/hmac-file-server/data/uploads \ + && mkdir -p /opt/hmac-file-server/data/duplicates \ + && mkdir -p /opt/hmac-file-server/data/temp \ + && mkdir -p /opt/hmac-file-server/data/logs + +WORKDIR /opt/hmac-file-server + +COPY --from=builder /build/hmac-file-server . + +EXPOSE 8080 + +CMD ["./hmac-file-server", "--config", "/etc/hmac-file-server/config.toml"] +``` + +### Example Docker Config + +A sample `config.toml` for Docker deployments: + +```toml +[server] +listenport = "8080" +unixsocket = false +storagepath = "/opt/hmac-file-server/data/uploads" +metricsenabled = true +metricsport = "9090" +deduplicationenabled = true +minfreebytes = "5GB" +filettl = "2y" +filettlenabled = false +autoadjustworkers = true +networkevents = false +pidfilepath = "./hmac-file-server.pid" +precaching = false + +[deduplication] +enabled = true +directory = "/opt/hmac-file-server/data/duplicates" + +[logging] +level = "debug" +file = "./hmac-file-server.log" +max_size = 100 +max_backups = 7 +max_age = 30 +compress = true + +[iso] +enabled = false +size = "1TB" +mountpoint = "/mnt/nfs_vol01/hmac-file-server/iso/" +charset = "utf-8" + +[timeouts] +readtimeout = "3600s" +writetimeout = "3600s" +idletimeout = "3600s" + +[security] +secret = "hmac-file-server-is-the-win" + +[versioning] +enableversioning = false +maxversions = 1 + +[uploads] +resumableuploadsenabled = false +chunkeduploadsenabled = true +chunksize = "32MB" +allowedextensions = [ + ".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp", + ".wav", ".mp4", ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", ".mpeg", ".mpg", + ".m4v", ".3gp", ".3g2", ".mp3", ".ogg" +] + +[downloads] +chunkeddownloadsenabled = false +chunksize = "32MB" +allowedextensions = [ + ".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp", + ".wav", ".mp4", ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", ".mpeg", ".mpg", + ".m4v", ".3gp", ".3g2", ".mp3", ".ogg" +] + +[clamav] +clamavenabled = false +clamavsocket = "/var/run/clamav/clamd.ctl" +numscanworkers = 4 +scanfileextensions = [".exe", ".dll", ".bin", ".com", ".bat", ".sh", ".php", ".js"] + +[redis] +redisenabled = false +redisdbindex = 0 +redisaddr = "localhost:6379" +redispassword = "" +redishealthcheckinterval = "120s" + +[workers] +numworkers = 4 +uploadqueuesize = 5000 + +[file] +filerevision = 1 +``` + +### Quickstart with Docker Compose + +1. Place your `config.toml` in the `./config` directory. +2. Run: + +```zsh +docker compose up -d +``` + +3. The server will be available on `http://localhost:8080`. + +--- diff --git a/build-multi-arch-fixed.sh b/build-multi-arch-fixed.sh new file mode 100644 index 0000000..e69de29 diff --git a/build-multi-arch.sh b/build-multi-arch.sh new file mode 100755 index 0000000..e8b2ee3 --- /dev/null +++ b/build-multi-arch.sh @@ -0,0 +1,196 @@ +#!/bin/bash +# HMAC File Server v3.2 - Multi-Architecture Build Script +# Compiles binaries for AMD64, ARM64, and ARM32 architectures + +# Remove set -e to prevent early exit on errors + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +CYAN='\033[0;36m' +NC='\033[0m' + +print_status() { + echo -e "${GREEN}[BUILD]${NC} $1" +} + +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +print_arch() { + echo -e "${CYAN}[ARCH]${NC} $1" +} + +# Check if Go is installed +if ! command -v go &> /dev/null; then + print_error "Go is not installed or not in PATH" + exit 1 +fi + +# Create temp directory if it doesn't exist +TEMP_DIR="./temp" +if [[ ! -d "$TEMP_DIR" ]]; then + mkdir -p "$TEMP_DIR" + print_info "Created temp directory: $TEMP_DIR" +fi + +# Source files to compile +SOURCE_FILES="cmd/server/main.go cmd/server/helpers.go cmd/server/config_validator.go cmd/server/config_test_scenarios.go" + +print_status "Starting multi-architecture build for HMAC File Server v3.2" +print_info "Source files: $SOURCE_FILES" +print_info "Output directory: $TEMP_DIR" +echo "" + +# Build function +build_for_arch() { + local goos=$1 + local goarch=$2 + local output_name=$3 + local arch_description=$4 + + print_arch "Building for $arch_description ($goos/$goarch)..." + + # Set environment variables for cross-compilation + export GOOS=$goos + export GOARCH=$goarch + export CGO_ENABLED=0 + + # Build the binary + if go build -ldflags="-w -s" -o "$TEMP_DIR/$output_name" $SOURCE_FILES 2>/dev/null; then + # Get file size + if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS + SIZE=$(stat -f%z "$TEMP_DIR/$output_name" | awk '{printf "%.1fMB", $1/1024/1024}') + else + # Linux + SIZE=$(stat -c%s "$TEMP_DIR/$output_name" | awk '{printf "%.1fMB", $1/1024/1024}') + fi + + print_status "Build successful: $arch_description" + print_info " Binary: $TEMP_DIR/$output_name" + print_info " Size: $SIZE" + + # Test binary (version check) + if timeout 10s "$TEMP_DIR/$output_name" --version >/dev/null 2>&1; then + print_info " Version check: PASSED" + else + print_warning " Version check: SKIPPED (cross-compiled binary)" + fi + + return 0 + else + print_error "Build failed: $arch_description" + return 1 + fi +} + +# Track build results +BUILDS_ATTEMPTED=0 +BUILDS_SUCCESSFUL=0 +FAILED_BUILDS=() + +echo "Starting builds..." +echo "====================" +echo "" + +# Build for AMD64 (x86_64) +print_arch "AMD64 (Intel/AMD 64-bit)" +((BUILDS_ATTEMPTED++)) +if build_for_arch "linux" "amd64" "hmac-file-server-linux-amd64" "AMD64 Linux"; then + ((BUILDS_SUCCESSFUL++)) +else + FAILED_BUILDS+=("AMD64") +fi +echo "" + +# Build for ARM64 (AArch64) +print_arch "ARM64 (AArch64)" +((BUILDS_ATTEMPTED++)) +if build_for_arch "linux" "arm64" "hmac-file-server-linux-arm64" "ARM64 Linux"; then + ((BUILDS_SUCCESSFUL++)) +else + FAILED_BUILDS+=("ARM64") +fi +echo "" + +# Build for ARM32 (ARMv7) +print_arch "ARM32 (ARMv7)" +export GOARM=7 # ARMv7 with hardware floating point +((BUILDS_ATTEMPTED++)) +if build_for_arch "linux" "arm" "hmac-file-server-linux-arm32" "ARM32 Linux"; then + ((BUILDS_SUCCESSFUL++)) +else + FAILED_BUILDS+=("ARM32") +fi +echo "" + +# Reset environment variables +unset GOOS GOARCH CGO_ENABLED GOARM + +# Build summary +echo "Build Summary" +echo "================" +print_info "Builds attempted: $BUILDS_ATTEMPTED" +print_info "Builds successful: $BUILDS_SUCCESSFUL" + +if [[ $BUILDS_SUCCESSFUL -eq $BUILDS_ATTEMPTED ]]; then + print_status "ALL BUILDS SUCCESSFUL!" + echo "" + print_info "Generated binaries in $TEMP_DIR:" + ls -lh "$TEMP_DIR"/hmac-file-server-* | while read -r line; do + echo " $line" + done + echo "" + print_info "Usage examples:" + echo " - Copy to target system and run: ./hmac-file-server-linux-amd64 --version" + echo " - Deploy with installer: cp temp/hmac-file-server-linux-amd64 /opt/hmac-file-server/" + echo " - Docker deployment: COPY temp/hmac-file-server-linux-amd64 /usr/local/bin/" + +elif [[ $BUILDS_SUCCESSFUL -gt 0 ]]; then + print_warning "PARTIAL SUCCESS: $BUILDS_SUCCESSFUL/$BUILDS_ATTEMPTED builds completed" + if [[ ${#FAILED_BUILDS[@]} -gt 0 ]]; then + print_error "Failed architectures: ${FAILED_BUILDS[*]}" + fi + +else + print_error "ALL BUILDS FAILED!" + exit 1 +fi + +echo "" +print_info "Architecture compatibility:" +echo " - AMD64: Intel/AMD 64-bit servers, desktops, cloud instances" +echo " - ARM64: Apple Silicon, AWS Graviton, modern ARM servers" +echo " - ARM32: Raspberry Pi, embedded systems, older ARM devices" + +echo "" +print_status "Multi-architecture build completed!" + +# Final verification +echo "" +print_info "Final verification:" +for binary in "$TEMP_DIR"/hmac-file-server-*; do + if [[ -f "$binary" ]]; then + filename=$(basename "$binary") + if file "$binary" >/dev/null 2>&1; then + file_info=$(file "$binary" | cut -d: -f2- | sed 's/^ *//') + print_info " OK $filename: $file_info" + else + print_info " OK $filename: Binary file" + fi + fi +done + +exit 0 diff --git a/builddebian.sh b/builddebian.sh new file mode 100755 index 0000000..613afe1 --- /dev/null +++ b/builddebian.sh @@ -0,0 +1,407 @@ +#!/bin/bash +# HMAC File Server v3.2 - Debian Package Builder +# Creates .deb packages for AMD64 and ARM64 architectures + +set -e + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' + +print_status() { + echo -e "${GREEN}[BUILD]${NC} $1" +} + +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Project configuration +PROJECT_DIR=$(pwd) +BUILD_DIR=$PROJECT_DIR/build +DEB_DIR=$PROJECT_DIR/debian +PACKAGE_NAME="hmac-file-server" +VERSION="3.2.0" +MAINTAINER="Alex Renz " + +# Source files for compilation +SOURCE_FILES="cmd/server/main.go cmd/server/helpers.go cmd/server/config_validator.go cmd/server/config_test_scenarios.go" + +print_status "Starting Debian package build for HMAC File Server v$VERSION" +print_info "Building packages for: AMD64, ARM64" + +# Check if Go is installed +if ! command -v go &> /dev/null; then + print_error "Go is not installed or not in PATH" + exit 1 +fi + +# Check if dpkg-deb is available +if ! command -v dpkg-deb &> /dev/null; then + print_error "dpkg-deb is not installed. Please install dpkg-dev package" + exit 1 +fi + +# Clean and create required directories +print_info "Setting up build directories..." +rm -rf $BUILD_DIR $DEB_DIR +mkdir -p $BUILD_DIR/{amd64,arm64} +mkdir -p $DEB_DIR/DEBIAN +mkdir -p $DEB_DIR/usr/local/bin +mkdir -p $DEB_DIR/etc/hmac-file-server +mkdir -p $DEB_DIR/var/lib/hmac-file-server/{uploads,deduplication,runtime} +mkdir -p $DEB_DIR/var/log/hmac-file-server +mkdir -p $DEB_DIR/usr/share/doc/hmac-file-server +mkdir -p $DEB_DIR/lib/systemd/system + +# Compile Go binaries for both architectures +print_status "Compiling binaries..." +for ARCH in amd64 arm64; do + print_info "Building for $ARCH..." + + # Set cross-compilation environment + export GOOS=linux + export GOARCH=$ARCH + export CGO_ENABLED=0 + + # Build hmac-file-server + if go build -ldflags="-w -s" -o $BUILD_DIR/$ARCH/hmac-file-server $SOURCE_FILES; then + SIZE=$(stat -c%s "$BUILD_DIR/$ARCH/hmac-file-server" | awk '{printf "%.1fMB", $1/1024/1024}') + print_info " $ARCH binary built successfully ($SIZE)" + else + print_error "Failed to build $ARCH binary" + exit 1 + fi +done + +# Reset environment variables +unset GOOS GOARCH CGO_ENABLED + +# Prepare Debian control file template +print_info "Creating package metadata..." +CONTROL_TEMPLATE=$DEB_DIR/DEBIAN/control.template +cat < $CONTROL_TEMPLATE +Package: $PACKAGE_NAME +Version: $VERSION +Architecture: ARCH_PLACEHOLDER +Maintainer: $MAINTAINER +Depends: redis-server, clamav, clamav-daemon +Recommends: nginx +Section: net +Priority: optional +Homepage: https://github.com/PlusOne/hmac-file-server +Description: HMAC File Server v3.2 - Enterprise XMPP File Sharing + A lightweight, secure file server designed for XMPP environments with + enterprise-grade features including: + . + * HMAC-based authentication and JWT support + * Redis integration for session management + * ClamAV virus scanning for uploaded files + * Prometheus metrics for monitoring + * Chunked upload/download support + * File deduplication capabilities + * Comprehensive configuration validation + . + Perfect for Prosody, Ejabberd, and other XMPP servers requiring + secure file sharing capabilities with professional deployment features. +EOF + +# Prepare systemd service file +print_info "Creating systemd service configuration..." +cat < $DEB_DIR/lib/systemd/system/hmac-file-server.service +[Unit] +Description=HMAC File Server 3.2 +Documentation=https://github.com/PlusOne/hmac-file-server +After=network.target +Wants=network-online.target +After=redis.service +After=clamav-daemon.service + +[Service] +Type=simple +User=hmac-file-server +Group=hmac-file-server +ExecStart=/usr/local/bin/hmac-file-server -config /etc/hmac-file-server/config.toml +ExecReload=/bin/kill -SIGHUP \$MAINPID +WorkingDirectory=/var/lib/hmac-file-server +Restart=always +RestartSec=10 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=hmac-file-server + +# Security settings +NoNewPrivileges=true +PrivateTmp=true +ProtectSystem=strict +ProtectHome=true +ReadWritePaths=/var/lib/hmac-file-server /var/log/hmac-file-server +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +AmbientCapabilities=CAP_NET_BIND_SERVICE + +# Resource limits +LimitNOFILE=65536 +LimitNPROC=4096 + +[Install] +WantedBy=multi-user.target +EOF + +# Prepare example configuration file +print_info "Creating example configuration..." +cat < $DEB_DIR/etc/hmac-file-server/config.toml +# HMAC File Server v3.2 Configuration +# Complete configuration reference: https://github.com/PlusOne/hmac-file-server/blob/main/WIKI.MD + +[server] +bind_ip = "127.0.0.1" +listenport = "8080" +unixsocket = false +storagepath = "/var/lib/hmac-file-server/uploads" +metricsenabled = true +metricsport = "9090" +deduplicationenabled = true +deduplicationpath = "/var/lib/hmac-file-server/deduplication" +filenaming = "HMAC" +force_protocol = "auto" +sslenabled = false +pidfilepath = "/var/lib/hmac-file-server/runtime/hmac-file-server.pid" + +[security] +secret = "CHANGE_THIS_SECRET_IN_PRODUCTION_USE_48_CHARS_MIN" +enablejwt = false +jwtsecret = "" +jwtalgorithm = "HS256" +jwtexpiration = "24h" + +[uploads] +allowedextensions = [".txt", ".pdf", ".jpg", ".jpeg", ".png", ".gif", ".webp", ".zip", ".tar", ".gz", ".7z", ".mp4", ".webm", ".ogg", ".mp3", ".wav", ".flac", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx", ".odt", ".ods", ".odp"] +maxfilesize = "100MB" +chunkeduploadsenabled = true +chunksize = "10MB" +resumableuploadsenabled = true +ttlenabled = false +ttl = "168h" + +[downloads] +chunkeddownloadsenabled = true +chunksize = "10MB" + +[logging] +level = "INFO" +file = "/var/log/hmac-file-server/hmac-file-server.log" +max_size = 100 +max_backups = 3 +max_age = 30 +compress = true + +[workers] +numworkers = 10 +uploadqueuesize = 1000 +autoscaling = true + +[timeouts] +readtimeout = "30s" +writetimeout = "30s" +idletimeout = "120s" +shutdown = "30s" + +[clamav] +enabled = false +socket = "/var/run/clamav/clamd.ctl" +timeout = "30s" + +[redis] +enabled = false +address = "localhost:6379" +database = 0 +password = "" +EOF + +# Prepare post-installation script +print_info "Creating installation scripts..." +cat < $DEB_DIR/DEBIAN/postinst +#!/bin/bash +set -e + +# Create hmac-file-server user and group if they do not exist +if ! id -u hmac-file-server >/dev/null 2>&1; then + useradd --system --no-create-home --shell /usr/sbin/nologin --home-dir /var/lib/hmac-file-server hmac-file-server + echo "Created system user: hmac-file-server" +fi + +# Set proper ownership and permissions +chown -R hmac-file-server:hmac-file-server /var/lib/hmac-file-server +chown -R hmac-file-server:hmac-file-server /var/log/hmac-file-server +chown hmac-file-server:hmac-file-server /etc/hmac-file-server/config.toml + +# Set directory permissions +chmod 755 /var/lib/hmac-file-server +chmod 755 /var/lib/hmac-file-server/uploads +chmod 755 /var/lib/hmac-file-server/deduplication +chmod 755 /var/lib/hmac-file-server/runtime +chmod 755 /var/log/hmac-file-server +chmod 640 /etc/hmac-file-server/config.toml + +# Reload systemd and enable service +systemctl daemon-reload +systemctl enable hmac-file-server.service + +echo "" +echo "HMAC File Server v3.2 installed successfully!" +echo "" +echo "Next steps:" +echo "1. Edit /etc/hmac-file-server/config.toml (CHANGE THE SECRET!)" +echo "2. Enable Redis/ClamAV if needed: systemctl enable redis-server clamav-daemon" +echo "3. Start the service: systemctl start hmac-file-server" +echo "4. Check status: systemctl status hmac-file-server" +echo "" +echo "Documentation: https://github.com/PlusOne/hmac-file-server" +echo "" +EOF +chmod 0755 $DEB_DIR/DEBIAN/postinst + +# Prepare pre-removal script +cat < $DEB_DIR/DEBIAN/prerm +#!/bin/bash +set -e + +# Stop the service before removal +if systemctl is-active --quiet hmac-file-server.service; then + echo "Stopping HMAC File Server service..." + systemctl stop hmac-file-server.service || true +fi +EOF +chmod 0755 $DEB_DIR/DEBIAN/prerm + +# Prepare post-removal script +cat < $DEB_DIR/DEBIAN/postrm +#!/bin/bash +set -e + +case "\$1" in + purge) + # Remove systemd service + systemctl disable hmac-file-server.service >/dev/null 2>&1 || true + rm -f /lib/systemd/system/hmac-file-server.service + systemctl daemon-reload >/dev/null 2>&1 || true + + # Remove user and group + if id -u hmac-file-server >/dev/null 2>&1; then + userdel hmac-file-server || true + fi + if getent group hmac-file-server >/dev/null 2>&1; then + groupdel hmac-file-server || true + fi + + # Remove data directories (ask user) + echo "" + echo "HMAC File Server has been removed." + echo "Data directories remain at:" + echo " - /var/lib/hmac-file-server/" + echo " - /var/log/hmac-file-server/" + echo " - /etc/hmac-file-server/" + echo "" + echo "Remove them manually if no longer needed:" + echo " sudo rm -rf /var/lib/hmac-file-server" + echo " sudo rm -rf /var/log/hmac-file-server" + echo " sudo rm -rf /etc/hmac-file-server" + echo "" + ;; + remove) + # Just disable service + systemctl disable hmac-file-server.service >/dev/null 2>&1 || true + systemctl daemon-reload >/dev/null 2>&1 || true + ;; +esac +EOF +chmod 0755 $DEB_DIR/DEBIAN/postrm + +# Prepare documentation +print_info "Including documentation..." +cp README.MD $DEB_DIR/usr/share/doc/hmac-file-server/ +cp INSTALL.MD $DEB_DIR/usr/share/doc/hmac-file-server/ +cp WIKI.MD $DEB_DIR/usr/share/doc/hmac-file-server/ +cp CHANGELOG.MD $DEB_DIR/usr/share/doc/hmac-file-server/ +cp config-example-xmpp.toml $DEB_DIR/usr/share/doc/hmac-file-server/ + +# Create .deb packages +print_status "Building Debian packages..." +for ARCH in amd64 arm64; do + print_info "Creating package for $ARCH..." + + # Update control file for the current architecture + sed "s/ARCH_PLACEHOLDER/$ARCH/" $CONTROL_TEMPLATE > $DEB_DIR/DEBIAN/control + + # Copy binary for current architecture + cp $BUILD_DIR/$ARCH/hmac-file-server $DEB_DIR/usr/local/bin/ + + # Calculate installed size + INSTALLED_SIZE=$(du -sk $DEB_DIR | cut -f1) + echo "Installed-Size: $INSTALLED_SIZE" >> $DEB_DIR/DEBIAN/control + + # Ensure proper permissions + find $DEB_DIR -type d -exec chmod 755 {} \; + find $DEB_DIR -type f -exec chmod 644 {} \; + chmod 0755 $DEB_DIR/usr/local/bin/hmac-file-server + chmod 0755 $DEB_DIR/DEBIAN/postinst + chmod 0755 $DEB_DIR/DEBIAN/prerm + chmod 0755 $DEB_DIR/DEBIAN/postrm + + # Build the .deb package + PACKAGE_FILE="${PACKAGE_NAME}_${VERSION}_${ARCH}.deb" + if dpkg-deb --build $DEB_DIR $PACKAGE_FILE; then + SIZE=$(stat -c%s "$PACKAGE_FILE" | awk '{printf "%.1fMB", $1/1024/1024}') + print_info " Package created: $PACKAGE_FILE ($SIZE)" + else + print_error "Failed to create package for $ARCH" + exit 1 + fi + + # Clean up binary for next build + rm -f $DEB_DIR/usr/local/bin/hmac-file-server + rm -f $DEB_DIR/DEBIAN/control +done + +# Cleanup temporary directories +print_info "Cleaning up build directories..." +rm -rf $BUILD_DIR $DEB_DIR + +# Show results +print_status "Debian package build completed!" +echo "" +print_info "Generated packages:" +for PACKAGE in ${PACKAGE_NAME}_${VERSION}_*.deb; do + if [[ -f "$PACKAGE" ]]; then + SIZE=$(stat -c%s "$PACKAGE" | awk '{printf "%.1fMB", $1/1024/1024}') + print_info " $PACKAGE ($SIZE)" + fi +done + +echo "" +print_info "Installation commands:" +echo " sudo dpkg -i ${PACKAGE_NAME}_${VERSION}_amd64.deb" +echo " sudo dpkg -i ${PACKAGE_NAME}_${VERSION}_arm64.deb" +echo "" +print_info "Package information:" +echo " dpkg -I ${PACKAGE_NAME}_${VERSION}_amd64.deb" +echo " dpkg -c ${PACKAGE_NAME}_${VERSION}_amd64.deb" +echo "" +print_warning "Remember to:" +echo " 1. Edit /etc/hmac-file-server/config.toml" +echo " 2. Change the default secret" +echo " 3. Configure Redis/ClamAV if needed" +echo " 4. Start the service: systemctl start hmac-file-server" + +exit 0 diff --git a/builddocker.sh b/builddocker.sh new file mode 100755 index 0000000..9643764 --- /dev/null +++ b/builddocker.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -e + +IMAGE_NAME="hmac-file-server" +DOCKERFILE_PATH="dockerenv/dockerbuild/Dockerfile" +COMPOSE_FILE="dockerenv/docker-compose.yml" + +echo "Building Docker image: $IMAGE_NAME" +docker build -t "$IMAGE_NAME" -f "$DOCKERFILE_PATH" . + +#echo "Starting services using $COMPOSE_FILE" +#docker-compose -f "$COMPOSE_FILE" up -d + +echo "Build and deployment complete." diff --git a/buildgo.sh b/buildgo.sh new file mode 100755 index 0000000..bb0e1bb --- /dev/null +++ b/buildgo.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# HMAC File Server - Build Script + +set -e + +# Colors +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' + +print_status() { + echo -e "${GREEN}[BUILD]${NC} $1" +} + +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if Go is installed +if ! command -v go &> /dev/null; then + print_error "Go is not installed or not in PATH" + exit 1 +fi + +# Build the application +print_status "Building HMAC File Server v3.2..." +go build -o hmac-file-server cmd/server/main.go cmd/server/helpers.go cmd/server/config_validator.go cmd/server/config_test_scenarios.go + +if [ $? -eq 0 ]; then + print_status "Build successful! Binary created: ./hmac-file-server" + + # Check binary size + SIZE=$(du -h hmac-file-server | cut -f1) + print_info "Binary size: $SIZE" + + # Show help to verify it works + print_info "Testing binary functionality..." + ./hmac-file-server --help > /dev/null 2>&1 + if [ $? -eq 0 ]; then + print_status "Binary is functional!" + else + print_error "Binary test failed" + exit 1 + fi +else + print_error "Build failed!" + exit 1 +fi + +# Create test file for manual testing +print_info "Creating test file..." +echo "Hello, HMAC File Server! $(date)" > test_upload.txt + +# Generate HMAC signature for manual testing +print_info "HMAC signature generation for testing:" +SECRET="hmac-file-server-is-the-win" +MESSAGE="/upload" + +# Check if openssl is available +if command -v openssl &> /dev/null; then + SIGNATURE=$(echo -n "$MESSAGE" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2) + echo "Secret: $SECRET" + echo "Message: $MESSAGE" + echo "Signature: $SIGNATURE" + echo "" + echo "Test with curl (requires server running on localhost:8080):" + echo "curl -v -X POST -H \"X-Signature: $SIGNATURE\" -F \"file=@test_upload.txt\" http://localhost:8080/upload" +else + print_info "OpenSSL not found. You can generate HMAC manually or use the Go tests." + echo "To start server: ./hmac-file-server" + echo "For testing, check the test/ directory for Go test files." +fi + +print_status "Build complete! Ready to run: ./hmac-file-server" diff --git a/cmd/monitor/monitor.go b/cmd/monitor/monitor.go new file mode 100644 index 0000000..ccf99ad --- /dev/null +++ b/cmd/monitor/monitor.go @@ -0,0 +1,1050 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/gdamore/tcell/v2" + "github.com/pelletier/go-toml" + "github.com/prometheus/common/expfmt" + "github.com/rivo/tview" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/process" +) + +var ( + prometheusURL string + configFilePath string // Pfad der gefundenen Konfiguration + logFilePath string // Pfad der Logdatei aus der Konfiguration + metricsEnabled bool // Neue Variable fรผr die Aktivierung von Metriken + bindIP string // Neue Variable fรผr die gebundene IP-Adresse +) + +func init() { + configPaths := []string{ + "/etc/hmac-file-server/config.toml", + "../config.toml", + "./config.toml", + } + + var config *toml.Tree + var err error + + // Lade die config.toml aus den definierten Pfaden + for _, path := range configPaths { + config, err = toml.LoadFile(path) + if err == nil { + configFilePath = path + log.Printf("Using config file: %s", configFilePath) + break + } + } + + if err != nil { + log.Fatalf("Error loading config file: %v\nPlease create a config.toml in one of the following locations:\n%v", err, configPaths) + } + + // Metricsport auslesen + portValue := config.Get("server.metricsport") + if portValue == nil { + log.Println("Warning: 'server.metricsport' is missing in the configuration, using default port 9090") + portValue = int64(9090) + } + + var port int64 + switch v := portValue.(type) { + case int64: + port = v + case string: + parsedPort, err := strconv.ParseInt(v, 10, 64) + if err != nil { + log.Fatalf("Error parsing 'server.metricsport' as int64: %v", err) + } + port = parsedPort + default: + log.Fatalf("Error: 'server.metricsport' is not of type int64 or string, got %T", v) + } + + // Lesen von 'metricsenabled' aus der Konfiguration + metricsEnabledValue := config.Get("server.metricsenabled") + if metricsEnabledValue == nil { + log.Println("Warning: 'server.metricsenabled' ist in der Konfiguration nicht gesetzt. StandardmรครŸig deaktiviert.") + metricsEnabled = false + } else { + var ok bool + metricsEnabled, ok = metricsEnabledValue.(bool) + if !ok { + log.Fatalf("Konfigurationsfehler: 'server.metricsenabled' sollte ein boolescher Wert sein, aber %T wurde gefunden.", metricsEnabledValue) + } + } + + // Lesen von 'bind_ip' aus der Konfiguration + bindIPValue := config.Get("server.bind_ip") + if bindIPValue == nil { + log.Println("Warning: 'server.bind_ip' ist in der Konfiguration nicht gesetzt. StandardmรครŸig auf 'localhost' gesetzt.") + bindIP = "localhost" + } else { + var ok bool + bindIP, ok = bindIPValue.(string) + if !ok { + log.Fatalf("Konfigurationsfehler: 'server.bind_ip' sollte ein String sein, aber %T wurde gefunden.", bindIPValue) + } + } + + // Konstruktion der prometheusURL basierend auf 'bind_ip' und 'metricsport' + prometheusURL = fmt.Sprintf("http://%s:%d/metrics", bindIP, port) + log.Printf("Metrics URL gesetzt auf: %s", prometheusURL) + + // Log-Datei auslesen รผber server.logfile + logFileValue := config.Get("server.logfile") + if logFileValue == nil { + log.Println("Warning: 'server.logfile' is missing, using default '/var/log/hmac-file-server.log'") + logFilePath = "/var/log/hmac-file-server.log" + } else { + lf, ok := logFileValue.(string) + if !ok { + log.Fatalf("Error: 'server.logfile' is not of type string, got %T", logFileValue) + } + logFilePath = lf + } +} + +// Thresholds for color coding +const ( + HighUsage = 80.0 + MediumUsage = 50.0 +) + +// ProcessInfo holds information about a process +type ProcessInfo struct { + PID int32 + Name string + CPUPercent float64 + MemPercent float32 + CommandLine string + Uptime string // Neues Feld fรผr die Uptime + Status string // Neues Feld fรผr den Status + ErrorCount int // Neues Feld fรผr die Anzahl der Fehler + TotalRequests int64 // Neues Feld fรผr die Gesamtanzahl der Anfragen + ActiveConnections int // Neues Feld fรผr aktive Verbindungen + AverageResponseTime float64 // Neues Feld fรผr die durchschnittliche Antwortzeit in Millisekunden +} + +// Optimized metrics fetching with timeout and connection reuse +func fetchMetrics() (map[string]float64, error) { + // Create HTTP client with timeout and connection reuse + client := &http.Client{ + Timeout: 5 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + DisableCompression: true, + }, + } + + resp, err := client.Get(prometheusURL) + if err != nil { + return nil, fmt.Errorf("failed to fetch metrics: %w", err) + } + defer resp.Body.Close() + + // Limit response body size to prevent memory issues + limitedReader := io.LimitReader(resp.Body, 1024*1024) // 1MB limit + + parser := &expfmt.TextParser{} + metricFamilies, err := parser.TextToMetricFamilies(limitedReader) + if err != nil { + return nil, fmt.Errorf("failed to parse metrics: %w", err) + } + + metrics := make(map[string]float64) + + // More selective metric filtering to reduce processing + relevantPrefixes := []string{ + "hmac_file_server_", + "memory_usage_bytes", + "cpu_usage_percent", + "active_connections_total", + "goroutines_count", + "total_requests", + "average_response_time_ms", + } + + for name, mf := range metricFamilies { + // Quick prefix check to skip irrelevant metrics + relevant := false + for _, prefix := range relevantPrefixes { + if strings.HasPrefix(name, prefix) || name == prefix { + relevant = true + break + } + } + if !relevant { + continue + } + + for _, m := range mf.GetMetric() { + var value float64 + if m.GetGauge() != nil { + value = m.GetGauge().GetValue() + } else if m.GetCounter() != nil { + value = m.GetCounter().GetValue() + } else if m.GetUntyped() != nil { + value = m.GetUntyped().GetValue() + } else { + continue + } + + // Simplified label handling + if len(m.GetLabel()) > 0 { + labels := make([]string, 0, len(m.GetLabel())) + for _, label := range m.GetLabel() { + labels = append(labels, fmt.Sprintf("%s=\"%s\"", label.GetName(), label.GetValue())) + } + metricKey := fmt.Sprintf("%s{%s}", name, strings.Join(labels, ",")) + metrics[metricKey] = value + } else { + metrics[name] = value + } + } + } + + return metrics, nil +} + +// Function to fetch system data +func fetchSystemData() (float64, float64, int, error) { + v, err := mem.VirtualMemory() + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to fetch memory data: %w", err) + } + + c, err := cpu.Percent(0, false) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to fetch CPU data: %w", err) + } + + cores, err := cpu.Counts(true) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to fetch CPU cores: %w", err) + } + + cpuUsage := 0.0 + if len(c) > 0 { + cpuUsage = c[0] + } + + return v.UsedPercent, cpuUsage, cores, nil +} + +// Optimized process list fetching with better resource management +func fetchProcessList() ([]ProcessInfo, error) { + processes, err := process.Processes() + if err != nil { + return nil, fmt.Errorf("failed to fetch processes: %w", err) + } + + // Pre-allocate slice with reasonable capacity + processList := make([]ProcessInfo, 0, len(processes)) + var mu sync.Mutex + var wg sync.WaitGroup + + // Limit concurrent goroutines to prevent resource exhaustion + sem := make(chan struct{}, 5) // Reduced from 10 to 5 + timeout := time.After(10 * time.Second) // Add timeout + + // Process only a subset of processes to reduce load + maxProcesses := 200 + if len(processes) > maxProcesses { + processes = processes[:maxProcesses] + } + + for _, p := range processes { + select { + case <-timeout: + log.Printf("Process list fetch timeout, returning partial results") + return processList, nil + default: + } + + wg.Add(1) + sem <- struct{}{} // Enter semaphore + + go func(p *process.Process) { + defer wg.Done() + defer func() { + <-sem // Exit semaphore + // Recover from any panics in process info fetching + if r := recover(); r != nil { + log.Printf("Process info fetch panic: %v", r) + } + }() + + // Set shorter timeout for individual process operations + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Use context for process operations where possible + cpuPercent, err := p.CPUPercentWithContext(ctx) + if err != nil { + return + } + + memPercent, err := p.MemoryPercentWithContext(ctx) + if err != nil { + return + } + + name, err := p.NameWithContext(ctx) + if err != nil { + return + } + + // Skip if CPU and memory usage are both very low to reduce noise + if cpuPercent < 0.1 && memPercent < 0.1 { + return + } + + // Limit command line length to prevent memory bloat + cmdline, err := p.CmdlineWithContext(ctx) + if err != nil { + cmdline = "" + } + if len(cmdline) > 100 { + cmdline = cmdline[:100] + "..." + } + + info := ProcessInfo{ + PID: p.Pid, + Name: name, + CPUPercent: cpuPercent, + MemPercent: memPercent, + CommandLine: cmdline, + } + + mu.Lock() + processList = append(processList, info) + mu.Unlock() + }(p) + } + + // Wait with timeout + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + // All goroutines completed + case <-time.After(15 * time.Second): + log.Printf("Process list fetch timeout after 15 seconds, returning partial results") + } + + return processList, nil +} + +// Function to fetch detailed information about hmac-file-server +func fetchHmacFileServerInfo() (*ProcessInfo, error) { + processes, err := process.Processes() + if err != nil { + return nil, fmt.Errorf("failed to fetch processes: %w", err) + } + + for _, p := range processes { + name, err := p.Name() + if err != nil { + continue + } + + if name == "hmac-file-server" { + cpuPercent, err := p.CPUPercent() + if err != nil { + cpuPercent = 0.0 + } + + memPercent, err := p.MemoryPercent() + if err != nil { + memPercent = 0.0 + } + + cmdline, err := p.Cmdline() + if err != nil { + cmdline = "" + } + + createTime, err := p.CreateTime() + if err != nil { + return nil, fmt.Errorf("failed to get process start time: %w", err) + } + uptime := time.Since(time.Unix(0, createTime*int64(time.Millisecond))) + + status := "Running" // Standardstatus + + // รœberprรผfung, ob der Prozess aktiv ist + isRunning, err := p.IsRunning() + if err != nil || !isRunning { + status = "Stopped" + } + + errorCount, err := countHmacErrors() + if err != nil { + errorCount = 0 + } + + metrics, err := fetchMetrics() + if err != nil { + return nil, fmt.Errorf("failed to fetch metrics: %w", err) + } + + totalRequests, ok := metrics["total_requests"] + if !ok { + totalRequests = 0 + } + + activeConnections, ok := metrics["active_connections_total"] + if !ok { + activeConnections = 0 + } + + averageResponseTime, ok := metrics["average_response_time_ms"] + if !ok { + averageResponseTime = 0.0 + } + + return &ProcessInfo{ + PID: p.Pid, + Name: name, + CPUPercent: cpuPercent, + MemPercent: memPercent, + CommandLine: cmdline, + Uptime: uptime.String(), + Status: status, + ErrorCount: errorCount, + TotalRequests: int64(totalRequests), + ActiveConnections: int(activeConnections), + AverageResponseTime: averageResponseTime, + }, nil + } + } + + return nil, fmt.Errorf("hmac-file-server process not found") +} + +// Optimized error counting with caching and limits +var ( + errorCountCache int + errorCountCacheTime time.Time + errorCountMutex sync.RWMutex +) + +func countHmacErrors() (int, error) { + // Use cached value if recent (within 30 seconds) + errorCountMutex.RLock() + if time.Since(errorCountCacheTime) < 30*time.Second { + count := errorCountCache + errorCountMutex.RUnlock() + return count, nil + } + errorCountMutex.RUnlock() + + // Use the configured log file path + file, err := os.Open(logFilePath) + if err != nil { + return 0, err + } + defer file.Close() + + // Get file size to limit reading for very large files + stat, err := file.Stat() + if err != nil { + return 0, err + } + + // Limit to last 1MB for large log files + var startPos int64 = 0 + if stat.Size() > 1024*1024 { + startPos = stat.Size() - 1024*1024 + file.Seek(startPos, io.SeekStart) + } + + scanner := bufio.NewScanner(file) + errorCount := 0 + lineCount := 0 + maxLines := 1000 // Limit lines scanned + + for scanner.Scan() && lineCount < maxLines { + line := scanner.Text() + if strings.Contains(line, "level=error") { + errorCount++ + } + lineCount++ + } + + if err := scanner.Err(); err != nil { + return 0, err + } + + // Update cache + errorCountMutex.Lock() + errorCountCache = errorCount + errorCountCacheTime = time.Now() + errorCountMutex.Unlock() + + return errorCount, nil +} + +// Optimized data structure for caching +type cachedData struct { + systemData systemData + metrics map[string]float64 + processes []ProcessInfo + hmacInfo *ProcessInfo + lastUpdate time.Time + mu sync.RWMutex +} + +type systemData struct { + memUsage float64 + cpuUsage float64 + cores int +} + +var cache = &cachedData{} + +// Optimized updateUI with reduced frequency and better resource management +func updateUI(ctx context.Context, app *tview.Application, pages *tview.Pages, sysPage, hmacPage tview.Primitive) { + // Reduce update frequency significantly + fastTicker := time.NewTicker(5 * time.Second) // UI updates + slowTicker := time.NewTicker(15 * time.Second) // Process list updates + defer fastTicker.Stop() + defer slowTicker.Stop() + + // Worker pool to limit concurrent operations + workerPool := make(chan struct{}, 3) // Max 3 concurrent operations + + // Single goroutine for data collection + go func() { + defer func() { + if r := recover(); r != nil { + log.Printf("Data collection goroutine recovered from panic: %v", r) + } + }() + + for { + select { + case <-ctx.Done(): + return + case <-fastTicker.C: + // Only update system data and metrics (lightweight operations) + select { + case workerPool <- struct{}{}: + go func() { + defer func() { <-workerPool }() + updateSystemAndMetrics() + }() + default: + // Skip if worker pool is full + } + case <-slowTicker.C: + // Update process list less frequently (expensive operation) + select { + case workerPool <- struct{}{}: + go func() { + defer func() { <-workerPool }() + updateProcessData() + }() + default: + // Skip if worker pool is full + } + } + } + }() + + // UI update loop + uiTicker := time.NewTicker(2 * time.Second) + defer uiTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-uiTicker.C: + app.QueueUpdateDraw(func() { + updateUIComponents(pages, sysPage, hmacPage) + }) + } + } +} + +// Separate function to update system data and metrics +func updateSystemAndMetrics() { + defer func() { + if r := recover(); r != nil { + log.Printf("updateSystemAndMetrics recovered from panic: %v", r) + } + }() + + // Get system data + memUsage, cpuUsage, cores, err := fetchSystemData() + if err != nil { + log.Printf("Error fetching system data: %v", err) + return + } + + // Get metrics if enabled + var metrics map[string]float64 + if metricsEnabled { + metrics, err = fetchMetrics() + if err != nil { + log.Printf("Error fetching metrics: %v", err) + metrics = make(map[string]float64) // Use empty map on error + } + } + + // Update cache + cache.mu.Lock() + cache.systemData = systemData{memUsage, cpuUsage, cores} + cache.metrics = metrics + cache.lastUpdate = time.Now() + cache.mu.Unlock() +} + +// Separate function to update process data (expensive operation) +func updateProcessData() { + defer func() { + if r := recover(); r != nil { + log.Printf("updateProcessData recovered from panic: %v", r) + } + }() + + // Get process list + processes, err := fetchProcessList() + if err != nil { + log.Printf("Error fetching process list: %v", err) + return + } + + // Get HMAC info + hmacInfo, err := fetchHmacFileServerInfo() + if err != nil { + log.Printf("Error fetching HMAC info: %v", err) + } + + // Update cache + cache.mu.Lock() + cache.processes = processes + cache.hmacInfo = hmacInfo + cache.mu.Unlock() +} + +// Update UI components with cached data +func updateUIComponents(pages *tview.Pages, sysPage, hmacPage tview.Primitive) { + currentPage, _ := pages.GetFrontPage() + + cache.mu.RLock() + defer cache.mu.RUnlock() + + switch currentPage { + case "system": + sysFlex := sysPage.(*tview.Flex) + + // Update system table + sysTable := sysFlex.GetItem(0).(*tview.Table) + updateSystemTable(sysTable, cache.systemData.memUsage, cache.systemData.cpuUsage, cache.systemData.cores) + + // Update metrics table + if metricsEnabled && len(cache.metrics) > 0 { + metricsTable := sysFlex.GetItem(1).(*tview.Table) + updateMetricsTable(metricsTable, cache.metrics) + } + + // Update process table + if len(cache.processes) > 0 { + processTable := sysFlex.GetItem(2).(*tview.Table) + updateProcessTable(processTable, cache.processes) + } + + case "hmac": + if cache.hmacInfo != nil { + hmacFlex := hmacPage.(*tview.Flex) + hmacTable := hmacFlex.GetItem(0).(*tview.Table) + updateHmacTable(hmacTable, cache.hmacInfo, cache.metrics) + } + } +} + +// Helper function to update system data table +func updateSystemTable(sysTable *tview.Table, memUsage, cpuUsage float64, cores int) { + sysTable.Clear() + sysTable.SetCell(0, 0, tview.NewTableCell("Metric").SetAttributes(tcell.AttrBold)) + sysTable.SetCell(0, 1, tview.NewTableCell("Value").SetAttributes(tcell.AttrBold)) + + // CPU Usage Row + cpuUsageCell := tview.NewTableCell(fmt.Sprintf("%.2f%%", cpuUsage)) + if cpuUsage > HighUsage { + cpuUsageCell.SetTextColor(tcell.ColorRed) + } else if cpuUsage > MediumUsage { + cpuUsageCell.SetTextColor(tcell.ColorYellow) + } else { + cpuUsageCell.SetTextColor(tcell.ColorGreen) + } + sysTable.SetCell(1, 0, tview.NewTableCell("CPU Usage")) + sysTable.SetCell(1, 1, cpuUsageCell) + + // Memory Usage Row + memUsageCell := tview.NewTableCell(fmt.Sprintf("%.2f%%", memUsage)) + if memUsage > HighUsage { + memUsageCell.SetTextColor(tcell.ColorRed) + } else if memUsage > MediumUsage { + memUsageCell.SetTextColor(tcell.ColorYellow) + } else { + memUsageCell.SetTextColor(tcell.ColorGreen) + } + sysTable.SetCell(2, 0, tview.NewTableCell("Memory Usage")) + sysTable.SetCell(2, 1, memUsageCell) + + // CPU Cores Row + sysTable.SetCell(3, 0, tview.NewTableCell("CPU Cores")) + sysTable.SetCell(3, 1, tview.NewTableCell(fmt.Sprintf("%d", cores))) +} + +// Helper function to update metrics table +func updateMetricsTable(metricsTable *tview.Table, metrics map[string]float64) { + metricsTable.Clear() + metricsTable.SetCell(0, 0, tview.NewTableCell("Metric").SetAttributes(tcell.AttrBold)) + metricsTable.SetCell(0, 1, tview.NewTableCell("Value").SetAttributes(tcell.AttrBold)) + + row := 1 + for key, value := range metrics { + metricsTable.SetCell(row, 0, tview.NewTableCell(key)) + metricsTable.SetCell(row, 1, tview.NewTableCell(fmt.Sprintf("%.2f", value))) + row++ + } +} + +// Helper function to update process table +func updateProcessTable(processTable *tview.Table, processes []ProcessInfo) { + processTable.Clear() + processTable.SetCell(0, 0, tview.NewTableCell("PID").SetAttributes(tcell.AttrBold)) + processTable.SetCell(0, 1, tview.NewTableCell("Name").SetAttributes(tcell.AttrBold)) + processTable.SetCell(0, 2, tview.NewTableCell("CPU%").SetAttributes(tcell.AttrBold)) + processTable.SetCell(0, 3, tview.NewTableCell("Mem%").SetAttributes(tcell.AttrBold)) + processTable.SetCell(0, 4, tview.NewTableCell("Command").SetAttributes(tcell.AttrBold)) + + // Sort processes by CPU usage + sort.Slice(processes, func(i, j int) bool { + return processes[i].CPUPercent > processes[j].CPUPercent + }) + + // Limit to top 20 processes + maxRows := 20 + if len(processes) < maxRows { + maxRows = len(processes) + } + + for i := 0; i < maxRows; i++ { + p := processes[i] + processTable.SetCell(i+1, 0, tview.NewTableCell(fmt.Sprintf("%d", p.PID))) + processTable.SetCell(i+1, 1, tview.NewTableCell(p.Name)) + processTable.SetCell(i+1, 2, tview.NewTableCell(fmt.Sprintf("%.2f", p.CPUPercent))) + processTable.SetCell(i+1, 3, tview.NewTableCell(fmt.Sprintf("%.2f", p.MemPercent))) + processTable.SetCell(i+1, 4, tview.NewTableCell(p.CommandLine)) + } +} + +// Helper function to update hmac-table +func updateHmacTable(hmacTable *tview.Table, hmacInfo *ProcessInfo, metrics map[string]float64) { + hmacTable.Clear() + hmacTable.SetCell(0, 0, tview.NewTableCell("Property").SetAttributes(tcell.AttrBold)) + hmacTable.SetCell(0, 1, tview.NewTableCell("Value").SetAttributes(tcell.AttrBold)) + + // Process information + hmacTable.SetCell(1, 0, tview.NewTableCell("PID")) + hmacTable.SetCell(1, 1, tview.NewTableCell(fmt.Sprintf("%d", hmacInfo.PID))) + + hmacTable.SetCell(2, 0, tview.NewTableCell("CPU%")) + hmacTable.SetCell(2, 1, tview.NewTableCell(fmt.Sprintf("%.2f", hmacInfo.CPUPercent))) + + hmacTable.SetCell(3, 0, tview.NewTableCell("Mem%")) + hmacTable.SetCell(3, 1, tview.NewTableCell(fmt.Sprintf("%.2f", hmacInfo.MemPercent))) + + hmacTable.SetCell(4, 0, tview.NewTableCell("Command")) + hmacTable.SetCell(4, 1, tview.NewTableCell(hmacInfo.CommandLine)) + + hmacTable.SetCell(5, 0, tview.NewTableCell("Uptime")) + hmacTable.SetCell(5, 1, tview.NewTableCell(hmacInfo.Uptime)) // Neue Zeile fรผr Uptime + + hmacTable.SetCell(6, 0, tview.NewTableCell("Status")) + hmacTable.SetCell(6, 1, tview.NewTableCell(hmacInfo.Status)) // Neue Zeile fรผr Status + + hmacTable.SetCell(7, 0, tview.NewTableCell("Error Count")) + hmacTable.SetCell(7, 1, tview.NewTableCell(fmt.Sprintf("%d", hmacInfo.ErrorCount))) // Neue Zeile fรผr Error Count + + hmacTable.SetCell(8, 0, tview.NewTableCell("Total Requests")) + hmacTable.SetCell(8, 1, tview.NewTableCell(fmt.Sprintf("%d", hmacInfo.TotalRequests))) // Neue Zeile fรผr Total Requests + + hmacTable.SetCell(9, 0, tview.NewTableCell("Active Connections")) + hmacTable.SetCell(9, 1, tview.NewTableCell(fmt.Sprintf("%d", hmacInfo.ActiveConnections))) // Neue Zeile fรผr Active Connections + + hmacTable.SetCell(10, 0, tview.NewTableCell("Avg. Response Time (ms)")) + hmacTable.SetCell(10, 1, tview.NewTableCell(fmt.Sprintf("%.2f", hmacInfo.AverageResponseTime))) // Neue Zeile fรผr Average Response Time + + // Metrics related to hmac-file-server + row := 12 + hmacTable.SetCell(row, 0, tview.NewTableCell("Metric").SetAttributes(tcell.AttrBold)) + hmacTable.SetCell(row, 1, tview.NewTableCell("Value").SetAttributes(tcell.AttrBold)) + row++ + + for key, value := range metrics { + if strings.Contains(key, "hmac_file_server_") { + hmacTable.SetCell(row, 0, tview.NewTableCell(key)) + hmacTable.SetCell(row, 1, tview.NewTableCell(fmt.Sprintf("%.2f", value))) + row++ + } + } +} + +func createSystemPage() tview.Primitive { + // Create system data table + sysTable := tview.NewTable().SetBorders(false) + sysTable.SetTitle(" [::b]System Data ").SetBorder(true) + + // Create Prometheus metrics table + metricsTable := tview.NewTable().SetBorders(false) + metricsTable.SetTitle(" [::b]Prometheus Metrics ").SetBorder(true) + + // Create process list table + processTable := tview.NewTable().SetBorders(false) + processTable.SetTitle(" [::b]Process List ").SetBorder(true) + + // Create a flex layout to hold the tables + sysFlex := tview.NewFlex(). + SetDirection(tview.FlexRow). + AddItem(sysTable, 7, 0, false). + AddItem(metricsTable, 0, 1, false). + AddItem(processTable, 0, 2, false) + + return sysFlex +} + +func createHmacPage() tview.Primitive { + hmacTable := tview.NewTable().SetBorders(false) + hmacTable.SetTitle(" [::b]hmac-file-server Details ").SetBorder(true) + + hmacFlex := tview.NewFlex(). + SetDirection(tview.FlexRow). + AddItem(hmacTable, 0, 1, false) + + return hmacFlex +} + +func createLogsPage(ctx context.Context, app *tview.Application, logFilePath string) tview.Primitive { + logsTextView := tview.NewTextView(). + SetDynamicColors(true). + SetRegions(true). + SetWordWrap(true) + logsTextView.SetTitle(" [::b]Logs ").SetBorder(true) + + const numLines = 50 // Reduced from 100 to 50 lines + + // Cache for log content to avoid reading file too frequently + var lastLogUpdate time.Time + var logMutex sync.RWMutex + + // Read logs less frequently and only when on logs page + go func() { + ticker := time.NewTicker(5 * time.Second) // Increased from 2 to 5 seconds + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + // Only update if we haven't updated recently + logMutex.RLock() + timeSinceUpdate := time.Since(lastLogUpdate) + logMutex.RUnlock() + + if timeSinceUpdate < 4*time.Second { + continue + } + + content, err := readLastNLines(logFilePath, numLines) + if err != nil { + app.QueueUpdateDraw(func() { + logsTextView.SetText(fmt.Sprintf("[red]Error reading log file: %v[white]", err)) + }) + continue + } + + // Process the log content with color coding + lines := strings.Split(content, "\n") + var coloredLines []string + + // Limit the number of lines processed + maxLines := min(len(lines), numLines) + coloredLines = make([]string, 0, maxLines) + + for i := len(lines) - maxLines; i < len(lines); i++ { + if i < 0 { + continue + } + line := lines[i] + if strings.Contains(line, "level=info") { + coloredLines = append(coloredLines, "[green]"+line+"[white]") + } else if strings.Contains(line, "level=warn") { + coloredLines = append(coloredLines, "[yellow]"+line+"[white]") + } else if strings.Contains(line, "level=error") { + coloredLines = append(coloredLines, "[red]"+line+"[white]") + } else { + coloredLines = append(coloredLines, line) + } + } + + logContent := strings.Join(coloredLines, "\n") + + // Update cache + logMutex.Lock() + lastLogUpdate = time.Now() + logMutex.Unlock() + + app.QueueUpdateDraw(func() { + logsTextView.SetText(logContent) + }) + } + } + }() + + return logsTextView +} + +// Helper function for min +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// Optimized readLastNLines to handle large files efficiently +func readLastNLines(filePath string, n int) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer file.Close() + + const bufferSize = 1024 + buffer := make([]byte, bufferSize) + var content []byte + var fileSize int64 + + fileInfo, err := file.Stat() + if err != nil { + return "", err + } + fileSize = fileInfo.Size() + + var offset int64 = 0 + for { + if fileSize-offset < bufferSize { + offset = fileSize + } else { + offset += bufferSize + } + + _, err := file.Seek(-offset, io.SeekEnd) + if err != nil { + return "", err + } + + bytesRead, err := file.Read(buffer) + if err != nil && err != io.EOF { + return "", err + } + + content = append(buffer[:bytesRead], content...) + + if bytesRead < bufferSize || len(strings.Split(string(content), "\n")) > n+1 { + break + } + + if offset >= fileSize { + break + } + } + + lines := strings.Split(string(content), "\n") + if len(lines) > n { + lines = lines[len(lines)-n:] + } + return strings.Join(lines, "\n"), nil +} + +func main() { + app := tview.NewApplication() + + // Create a cancellable context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create pages + pages := tview.NewPages() + + // System page + sysPage := createSystemPage() + pages.AddPage("system", sysPage, true, true) + + // hmac-file-server page + hmacPage := createHmacPage() + pages.AddPage("hmac", hmacPage, true, false) + + // Logs page mit dem gelesenen logFilePath + logsPage := createLogsPage(ctx, app, logFilePath) + pages.AddPage("logs", logsPage, true, false) + + // Add key binding to switch views and handle exit + app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyRune { + switch event.Rune() { + case 'q', 'Q': + cancel() + app.Stop() + return nil + case 's', 'S': + // Switch to system page + pages.SwitchToPage("system") + case 'h', 'H': + // Switch to hmac-file-server page + pages.SwitchToPage("hmac") + case 'l', 'L': + // Switch to logs page + pages.SwitchToPage("logs") + } + } + return event + }) + + // Start the UI update loop in a separate goroutine + go updateUI(ctx, app, pages, sysPage, hmacPage) + + // Set the root and run the application + if err := app.SetRoot(pages, true).EnableMouse(true).Run(); err != nil { + log.Fatalf("Error running application: %v", err) + log.Fatalf("Error running application: %v", err) + } +} diff --git a/cmd/server/config.toml b/cmd/server/config.toml deleted file mode 100644 index 5587289..0000000 --- a/cmd/server/config.toml +++ /dev/null @@ -1,67 +0,0 @@ -# Server Settings -[server] -ListenPort = "8080" -UnixSocket = false -StoreDir = "./testupload" -LogLevel = "info" -LogFile = "./hmac-file-server.log" -MetricsEnabled = true -MetricsPort = "9090" -FileTTL = "8760h" - -# Workers and Connections -[workers] -NumWorkers = 2 -UploadQueueSize = 500 - -# Timeout Settings -[timeouts] -ReadTimeout = "600s" -WriteTimeout = "600s" -IdleTimeout = "600s" - -# Security Settings -[security] -Secret = "a-orc-and-a-humans-is-drinking-ale" - -# Versioning Settings -[versioning] -EnableVersioning = false -MaxVersions = 1 - -# Upload/Download Settings -[uploads] -ResumableUploadsEnabled = true -ChunkedUploadsEnabled = true -ChunkSize = 16777216 -AllowedExtensions = [ - # Document formats - ".txt", ".pdf", - - # Image formats - ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp", - - # Video formats - ".wav", ".mp4", ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", - - # Audio formats - ".mp3", ".ogg" -] - -# ClamAV Settings -[clamav] -ClamAVEnabled = false -ClamAVSocket = "/var/run/clamav/clamd.ctl" -NumScanWorkers = 4 - -# Redis Settings -[redis] -RedisEnabled = false -RedisAddr = "localhost:6379" -RedisPassword = "" -RedisDBIndex = 0 -RedisHealthCheckInterval = "120s" - -# Deduplication -[deduplication] -enabled = false diff --git a/cmd/server/config_test_scenarios.go b/cmd/server/config_test_scenarios.go new file mode 100644 index 0000000..0dac8bb --- /dev/null +++ b/cmd/server/config_test_scenarios.go @@ -0,0 +1,294 @@ +// config_test_scenarios.go +package main + +import ( + "fmt" + "os" + "path/filepath" +) + +// ConfigTestScenario represents a test scenario for configuration validation +type ConfigTestScenario struct { + Name string + Config Config + ShouldPass bool + ExpectedErrors []string + ExpectedWarnings []string +} + +// GetConfigTestScenarios returns a set of test scenarios for configuration validation +func GetConfigTestScenarios() []ConfigTestScenario { + baseValidConfig := Config{ + Server: ServerConfig{ + ListenAddress: "8080", + BindIP: "0.0.0.0", + StoragePath: "/tmp/test-storage", + MetricsEnabled: true, + MetricsPort: "9090", + FileTTLEnabled: true, + FileTTL: "24h", + MinFreeBytes: "1GB", + FileNaming: "HMAC", + ForceProtocol: "auto", + PIDFilePath: "/tmp/test.pid", + }, + Security: SecurityConfig{ + Secret: "test-secret-key-32-characters", + EnableJWT: false, + }, + Logging: LoggingConfig{ + Level: "info", + File: "/tmp/test.log", + MaxSize: 100, + MaxBackups: 3, + MaxAge: 30, + }, + Timeouts: TimeoutConfig{ + Read: "30s", + Write: "30s", + Idle: "60s", + }, + Workers: WorkersConfig{ + NumWorkers: 4, + UploadQueueSize: 50, + }, + Uploads: UploadsConfig{ + AllowedExtensions: []string{".txt", ".pdf", ".jpg"}, + ChunkSize: "10MB", + }, + Downloads: DownloadsConfig{ + AllowedExtensions: []string{".txt", ".pdf", ".jpg"}, + ChunkSize: "10MB", + }, + } + + return []ConfigTestScenario{ + { + Name: "Valid Basic Configuration", + Config: baseValidConfig, + ShouldPass: true, + }, + { + Name: "Missing Listen Address", + Config: func() Config { + c := baseValidConfig + c.Server.ListenAddress = "" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"server.listen_address is required"}, + }, + { + Name: "Invalid Port Number", + Config: func() Config { + c := baseValidConfig + c.Server.ListenAddress = "99999" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"invalid port number"}, + }, + { + Name: "Invalid IP Address", + Config: func() Config { + c := baseValidConfig + c.Server.BindIP = "999.999.999.999" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"invalid IP address format"}, + }, + { + Name: "Same Port for Server and Metrics", + Config: func() Config { + c := baseValidConfig + c.Server.ListenAddress = "8080" + c.Server.MetricsPort = "8080" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"metrics port cannot be the same as main listen port"}, + }, + { + Name: "JWT Enabled Without Secret", + Config: func() Config { + c := baseValidConfig + c.Security.EnableJWT = true + c.Security.JWTSecret = "" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"JWT secret is required when JWT is enabled"}, + }, + { + Name: "Short JWT Secret", + Config: func() Config { + c := baseValidConfig + c.Security.EnableJWT = true + c.Security.JWTSecret = "short" + c.Security.JWTAlgorithm = "HS256" + return c + }(), + ShouldPass: true, + ExpectedWarnings: []string{"JWT secret should be at least 32 characters"}, + }, + { + Name: "Invalid Log Level", + Config: func() Config { + c := baseValidConfig + c.Logging.Level = "invalid" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"invalid log level"}, + }, + { + Name: "Invalid Timeout Format", + Config: func() Config { + c := baseValidConfig + c.Timeouts.Read = "invalid" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"invalid read timeout format"}, + }, + { + Name: "Negative Worker Count", + Config: func() Config { + c := baseValidConfig + c.Workers.NumWorkers = -1 + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"number of workers must be positive"}, + }, + { + Name: "Extensions Without Dots", + Config: func() Config { + c := baseValidConfig + c.Uploads.AllowedExtensions = []string{"txt", "pdf"} + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"file extensions must start with a dot"}, + }, + { + Name: "High Worker Count Warning", + Config: func() Config { + c := baseValidConfig + c.Workers.NumWorkers = 100 + return c + }(), + ShouldPass: true, + ExpectedWarnings: []string{"very high worker count may impact performance"}, + }, + { + Name: "Deduplication Without Directory", + Config: func() Config { + c := baseValidConfig + c.Deduplication.Enabled = true + c.Deduplication.Directory = "" + return c + }(), + ShouldPass: false, + ExpectedErrors: []string{"deduplication directory is required"}, + }, + } +} + +// RunConfigTests runs all configuration test scenarios +func RunConfigTests() { + scenarios := GetConfigTestScenarios() + passed := 0 + failed := 0 + + fmt.Println("๐Ÿงช Running Configuration Test Scenarios") + fmt.Println("=======================================") + fmt.Println() + + for i, scenario := range scenarios { + fmt.Printf("Test %d: %s\n", i+1, scenario.Name) + + // Create temporary directories for testing + tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("hmac-test-%d", i)) + os.MkdirAll(tempDir, 0755) + defer os.RemoveAll(tempDir) + + // Update paths in config to use temp directory + scenario.Config.Server.StoragePath = filepath.Join(tempDir, "storage") + scenario.Config.Logging.File = filepath.Join(tempDir, "test.log") + scenario.Config.Server.PIDFilePath = filepath.Join(tempDir, "test.pid") + if scenario.Config.Deduplication.Enabled { + scenario.Config.Deduplication.Directory = filepath.Join(tempDir, "dedup") + } + + result := ValidateConfigComprehensive(&scenario.Config) + + // Check if test passed as expected + testPassed := true + if scenario.ShouldPass && result.HasErrors() { + fmt.Printf(" โŒ Expected to pass but failed with errors:\n") + for _, err := range result.Errors { + fmt.Printf(" โ€ข %s\n", err.Message) + } + testPassed = false + } else if !scenario.ShouldPass && !result.HasErrors() { + fmt.Printf(" โŒ Expected to fail but passed\n") + testPassed = false + } else if !scenario.ShouldPass && result.HasErrors() { + // Check if expected errors are present + expectedFound := true + for _, expectedError := range scenario.ExpectedErrors { + found := false + for _, actualError := range result.Errors { + if contains([]string{actualError.Message}, expectedError) || + contains([]string{actualError.Error()}, expectedError) { + found = true + break + } + } + if !found { + fmt.Printf(" โŒ Expected error not found: %s\n", expectedError) + expectedFound = false + } + } + if !expectedFound { + testPassed = false + } + } + + // Check expected warnings + if len(scenario.ExpectedWarnings) > 0 { + for _, expectedWarning := range scenario.ExpectedWarnings { + found := false + for _, actualWarning := range result.Warnings { + if contains([]string{actualWarning.Message}, expectedWarning) || + contains([]string{actualWarning.Error()}, expectedWarning) { + found = true + break + } + } + if !found { + fmt.Printf(" โš ๏ธ Expected warning not found: %s\n", expectedWarning) + } + } + } + + if testPassed { + fmt.Printf(" โœ… Passed\n") + passed++ + } else { + failed++ + } + fmt.Println() + } + + // Summary + fmt.Printf("๐Ÿ“Š Test Results: %d passed, %d failed\n", passed, failed) + if failed > 0 { + fmt.Printf("โŒ Some tests failed. Please review the implementation.\n") + os.Exit(1) + } else { + fmt.Printf("โœ… All tests passed!\n") + } +} diff --git a/cmd/server/config_validator.go b/cmd/server/config_validator.go new file mode 100644 index 0000000..b31bf39 --- /dev/null +++ b/cmd/server/config_validator.go @@ -0,0 +1,1131 @@ +// config_validator.go +package main + +import ( + "errors" + "fmt" + "net" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" +) + +// ConfigValidationError represents a configuration validation error +type ConfigValidationError struct { + Field string + Value interface{} + Message string +} + +func (e ConfigValidationError) Error() string { + return fmt.Sprintf("config validation error in field '%s': %s (value: %v)", e.Field, e.Message, e.Value) +} + +// ConfigValidationResult contains the results of config validation +type ConfigValidationResult struct { + Errors []ConfigValidationError + Warnings []ConfigValidationError + Valid bool +} + +// AddError adds a validation error +func (r *ConfigValidationResult) AddError(field string, value interface{}, message string) { + r.Errors = append(r.Errors, ConfigValidationError{Field: field, Value: value, Message: message}) + r.Valid = false +} + +// AddWarning adds a validation warning +func (r *ConfigValidationResult) AddWarning(field string, value interface{}, message string) { + r.Warnings = append(r.Warnings, ConfigValidationError{Field: field, Value: value, Message: message}) +} + +// HasErrors returns true if there are validation errors +func (r *ConfigValidationResult) HasErrors() bool { + return len(r.Errors) > 0 +} + +// HasWarnings returns true if there are validation warnings +func (r *ConfigValidationResult) HasWarnings() bool { + return len(r.Warnings) > 0 +} + +// ValidateConfigComprehensive performs comprehensive configuration validation +func ValidateConfigComprehensive(c *Config) *ConfigValidationResult { + result := &ConfigValidationResult{Valid: true} + + // Validate each section + validateServerConfig(&c.Server, result) + validateSecurityConfig(&c.Security, result) + validateLoggingConfig(&c.Logging, result) + validateTimeoutConfig(&c.Timeouts, result) + validateUploadsConfig(&c.Uploads, result) + validateDownloadsConfig(&c.Downloads, result) + validateClamAVConfig(&c.ClamAV, result) + validateRedisConfig(&c.Redis, result) + validateWorkersConfig(&c.Workers, result) + validateVersioningConfig(&c.Versioning, result) + validateDeduplicationConfig(&c.Deduplication, result) + validateISOConfig(&c.ISO, result) + + // Cross-section validations + validateCrossSection(c, result) + + // Enhanced validations + validateSystemResources(result) + validateNetworkConnectivity(c, result) + validatePerformanceSettings(c, result) + validateSecurityHardening(c, result) + + // Check disk space for storage paths + if c.Server.StoragePath != "" { + checkDiskSpace(c.Server.StoragePath, result) + } + if c.Deduplication.Enabled && c.Deduplication.Directory != "" { + checkDiskSpace(c.Deduplication.Directory, result) + } + + return result +} + +// validateServerConfig validates server configuration +func validateServerConfig(server *ServerConfig, result *ConfigValidationResult) { + // ListenAddress validation + if server.ListenAddress == "" { + result.AddError("server.listenport", server.ListenAddress, "listen address/port is required") + } else { + if !isValidPort(server.ListenAddress) { + result.AddError("server.listenport", server.ListenAddress, "invalid port number (must be 1-65535)") + } + } + + // BindIP validation + if server.BindIP != "" { + if ip := net.ParseIP(server.BindIP); ip == nil { + result.AddError("server.bind_ip", server.BindIP, "invalid IP address format") + } + } + + // StoragePath validation + if server.StoragePath == "" { + result.AddError("server.storagepath", server.StoragePath, "storage path is required") + } else { + if err := validateDirectoryPath(server.StoragePath, true); err != nil { + result.AddError("server.storagepath", server.StoragePath, err.Error()) + } + } + + // MetricsPort validation + if server.MetricsEnabled && server.MetricsPort != "" { + if !isValidPort(server.MetricsPort) { + result.AddError("server.metricsport", server.MetricsPort, "invalid metrics port number") + } + if server.MetricsPort == server.ListenAddress { + result.AddError("server.metricsport", server.MetricsPort, "metrics port cannot be the same as main listen port") + } + } + + // Size validations + if server.MaxUploadSize != "" { + if _, err := parseSize(server.MaxUploadSize); err != nil { + result.AddError("server.max_upload_size", server.MaxUploadSize, "invalid size format") + } + } + + if server.MinFreeBytes != "" { + if _, err := parseSize(server.MinFreeBytes); err != nil { + result.AddError("server.min_free_bytes", server.MinFreeBytes, "invalid size format") + } + } + + // TTL validation + if server.FileTTLEnabled { + if server.FileTTL == "" { + result.AddError("server.filettl", server.FileTTL, "file TTL is required when TTL is enabled") + } else { + if _, err := parseTTL(server.FileTTL); err != nil { + result.AddError("server.filettl", server.FileTTL, "invalid TTL format") + } + } + } + + // File naming validation + validFileNaming := []string{"HMAC", "original", "None"} + if !contains(validFileNaming, server.FileNaming) { + result.AddError("server.file_naming", server.FileNaming, "must be one of: HMAC, original, None") + } + + // Protocol validation + validProtocols := []string{"ipv4", "ipv6", "auto", ""} + if !contains(validProtocols, server.ForceProtocol) { + result.AddError("server.force_protocol", server.ForceProtocol, "must be one of: ipv4, ipv6, auto, or empty") + } + + // PID file validation + if server.PIDFilePath != "" { + dir := filepath.Dir(server.PIDFilePath) + if err := validateDirectoryPath(dir, false); err != nil { + result.AddError("server.pidfilepath", server.PIDFilePath, fmt.Sprintf("PID file directory invalid: %v", err)) + } + } + + // Worker threshold validation + if server.EnableDynamicWorkers { + if server.WorkerScaleUpThresh <= 0 { + result.AddError("server.worker_scale_up_thresh", server.WorkerScaleUpThresh, "must be positive when dynamic workers are enabled") + } + if server.WorkerScaleDownThresh <= 0 { + result.AddError("server.worker_scale_down_thresh", server.WorkerScaleDownThresh, "must be positive when dynamic workers are enabled") + } + if server.WorkerScaleDownThresh >= server.WorkerScaleUpThresh { + result.AddWarning("server.worker_scale_down_thresh", server.WorkerScaleDownThresh, "scale down threshold should be lower than scale up threshold") + } + } + + // Extensions validation + for _, ext := range server.GlobalExtensions { + if !strings.HasPrefix(ext, ".") { + result.AddError("server.global_extensions", ext, "file extensions must start with a dot") + } + } +} + +// validateSecurityConfig validates security configuration +func validateSecurityConfig(security *SecurityConfig, result *ConfigValidationResult) { + if security.EnableJWT { + // JWT validation + if strings.TrimSpace(security.JWTSecret) == "" { + result.AddError("security.jwtsecret", security.JWTSecret, "JWT secret is required when JWT is enabled") + } else if len(security.JWTSecret) < 32 { + result.AddWarning("security.jwtsecret", "[REDACTED]", "JWT secret should be at least 32 characters for security") + } + + validAlgorithms := []string{"HS256", "HS384", "HS512", "RS256", "RS384", "RS512", "ES256", "ES384", "ES512"} + if !contains(validAlgorithms, security.JWTAlgorithm) { + result.AddError("security.jwtalgorithm", security.JWTAlgorithm, "unsupported JWT algorithm") + } + + if security.JWTExpiration != "" { + if _, err := time.ParseDuration(security.JWTExpiration); err != nil { + result.AddError("security.jwtexpiration", security.JWTExpiration, "invalid JWT expiration format") + } + } + } else { + // HMAC validation + if strings.TrimSpace(security.Secret) == "" { + result.AddError("security.secret", security.Secret, "HMAC secret is required when JWT is disabled") + } else if len(security.Secret) < 16 { + result.AddWarning("security.secret", "[REDACTED]", "HMAC secret should be at least 16 characters for security") + } + } +} + +// validateLoggingConfig validates logging configuration +func validateLoggingConfig(logging *LoggingConfig, result *ConfigValidationResult) { + validLevels := []string{"panic", "fatal", "error", "warn", "warning", "info", "debug", "trace"} + if !contains(validLevels, strings.ToLower(logging.Level)) { + result.AddError("logging.level", logging.Level, "invalid log level") + } + + if logging.File != "" { + dir := filepath.Dir(logging.File) + if err := validateDirectoryPath(dir, false); err != nil { + result.AddError("logging.file", logging.File, fmt.Sprintf("log file directory invalid: %v", err)) + } + } + + if logging.MaxSize <= 0 { + result.AddWarning("logging.max_size", logging.MaxSize, "max size should be positive") + } + + if logging.MaxBackups < 0 { + result.AddWarning("logging.max_backups", logging.MaxBackups, "max backups should be non-negative") + } + + if logging.MaxAge < 0 { + result.AddWarning("logging.max_age", logging.MaxAge, "max age should be non-negative") + } +} + +// validateTimeoutConfig validates timeout configuration +func validateTimeoutConfig(timeouts *TimeoutConfig, result *ConfigValidationResult) { + if timeouts.Read != "" { + if duration, err := time.ParseDuration(timeouts.Read); err != nil { + result.AddError("timeouts.read", timeouts.Read, "invalid read timeout format") + } else if duration <= 0 { + result.AddError("timeouts.read", timeouts.Read, "read timeout must be positive") + } + } + + if timeouts.Write != "" { + if duration, err := time.ParseDuration(timeouts.Write); err != nil { + result.AddError("timeouts.write", timeouts.Write, "invalid write timeout format") + } else if duration <= 0 { + result.AddError("timeouts.write", timeouts.Write, "write timeout must be positive") + } + } + + if timeouts.Idle != "" { + if duration, err := time.ParseDuration(timeouts.Idle); err != nil { + result.AddError("timeouts.idle", timeouts.Idle, "invalid idle timeout format") + } else if duration <= 0 { + result.AddError("timeouts.idle", timeouts.Idle, "idle timeout must be positive") + } + } + + if timeouts.Shutdown != "" { + if duration, err := time.ParseDuration(timeouts.Shutdown); err != nil { + result.AddError("timeouts.shutdown", timeouts.Shutdown, "invalid shutdown timeout format") + } else if duration <= 0 { + result.AddError("timeouts.shutdown", timeouts.Shutdown, "shutdown timeout must be positive") + } + } +} + +// validateUploadsConfig validates uploads configuration +func validateUploadsConfig(uploads *UploadsConfig, result *ConfigValidationResult) { + // Validate extensions + for _, ext := range uploads.AllowedExtensions { + if !strings.HasPrefix(ext, ".") { + result.AddError("uploads.allowed_extensions", ext, "file extensions must start with a dot") + } + } + + // Validate chunk size + if uploads.ChunkSize != "" { + if _, err := parseSize(uploads.ChunkSize); err != nil { + result.AddError("uploads.chunk_size", uploads.ChunkSize, "invalid chunk size format") + } + } + + // Validate resumable age + if uploads.MaxResumableAge != "" { + if _, err := time.ParseDuration(uploads.MaxResumableAge); err != nil { + result.AddError("uploads.max_resumable_age", uploads.MaxResumableAge, "invalid resumable age format") + } + } +} + +// validateDownloadsConfig validates downloads configuration +func validateDownloadsConfig(downloads *DownloadsConfig, result *ConfigValidationResult) { + // Validate extensions + for _, ext := range downloads.AllowedExtensions { + if !strings.HasPrefix(ext, ".") { + result.AddError("downloads.allowed_extensions", ext, "file extensions must start with a dot") + } + } + + // Validate chunk size + if downloads.ChunkSize != "" { + if _, err := parseSize(downloads.ChunkSize); err != nil { + result.AddError("downloads.chunk_size", downloads.ChunkSize, "invalid chunk size format") + } + } +} + +// validateClamAVConfig validates ClamAV configuration +func validateClamAVConfig(clamav *ClamAVConfig, result *ConfigValidationResult) { + if clamav.ClamAVEnabled { + if clamav.ClamAVSocket == "" { + result.AddWarning("clamav.clamavsocket", clamav.ClamAVSocket, "ClamAV socket path not specified, using default") + } else { + // Check if socket file exists + if _, err := os.Stat(clamav.ClamAVSocket); os.IsNotExist(err) { + result.AddWarning("clamav.clamavsocket", clamav.ClamAVSocket, "ClamAV socket file does not exist") + } + } + + if clamav.NumScanWorkers <= 0 { + result.AddError("clamav.numscanworkers", clamav.NumScanWorkers, "number of scan workers must be positive") + } + + // Validate scan extensions + for _, ext := range clamav.ScanFileExtensions { + if !strings.HasPrefix(ext, ".") { + result.AddError("clamav.scanfileextensions", ext, "file extensions must start with a dot") + } + } + } +} + +// validateRedisConfig validates Redis configuration +func validateRedisConfig(redis *RedisConfig, result *ConfigValidationResult) { + if redis.RedisEnabled { + if redis.RedisAddr == "" { + result.AddError("redis.redisaddr", redis.RedisAddr, "Redis address is required when Redis is enabled") + } else { + // Validate address format (host:port) + if !isValidHostPort(redis.RedisAddr) { + result.AddError("redis.redisaddr", redis.RedisAddr, "invalid Redis address format (should be host:port)") + } + } + + if redis.RedisDBIndex < 0 || redis.RedisDBIndex > 15 { + result.AddWarning("redis.redisdbindex", redis.RedisDBIndex, "Redis DB index is typically 0-15") + } + + if redis.RedisHealthCheckInterval != "" { + if _, err := time.ParseDuration(redis.RedisHealthCheckInterval); err != nil { + result.AddError("redis.redishealthcheckinterval", redis.RedisHealthCheckInterval, "invalid health check interval format") + } + } + } +} + +// validateWorkersConfig validates workers configuration +func validateWorkersConfig(workers *WorkersConfig, result *ConfigValidationResult) { + if workers.NumWorkers <= 0 { + result.AddError("workers.numworkers", workers.NumWorkers, "number of workers must be positive") + } + + if workers.UploadQueueSize <= 0 { + result.AddError("workers.uploadqueuesize", workers.UploadQueueSize, "upload queue size must be positive") + } + + // Performance recommendations + if workers.NumWorkers > 50 { + result.AddWarning("workers.numworkers", workers.NumWorkers, "very high worker count may impact performance") + } + + if workers.UploadQueueSize > 1000 { + result.AddWarning("workers.uploadqueuesize", workers.UploadQueueSize, "very large queue size may impact memory usage") + } +} + +// validateVersioningConfig validates versioning configuration +func validateVersioningConfig(versioning *VersioningConfig, result *ConfigValidationResult) { + if versioning.Enabled { + if versioning.MaxRevs <= 0 { + result.AddError("versioning.maxversions", versioning.MaxRevs, "max versions must be positive when versioning is enabled") + } + + validBackends := []string{"filesystem", "database", "s3", ""} + if !contains(validBackends, versioning.Backend) { + result.AddWarning("versioning.backend", versioning.Backend, "unknown versioning backend") + } + } +} + +// validateDeduplicationConfig validates deduplication configuration +func validateDeduplicationConfig(dedup *DeduplicationConfig, result *ConfigValidationResult) { + if dedup.Enabled { + if dedup.Directory == "" { + result.AddError("deduplication.directory", dedup.Directory, "deduplication directory is required when deduplication is enabled") + } else { + if err := validateDirectoryPath(dedup.Directory, true); err != nil { + result.AddError("deduplication.directory", dedup.Directory, err.Error()) + } + } + } +} + +// validateISOConfig validates ISO configuration +func validateISOConfig(iso *ISOConfig, result *ConfigValidationResult) { + if iso.Enabled { + if iso.MountPoint == "" { + result.AddError("iso.mount_point", iso.MountPoint, "mount point is required when ISO is enabled") + } + + if iso.Size != "" { + if _, err := parseSize(iso.Size); err != nil { + result.AddError("iso.size", iso.Size, "invalid ISO size format") + } + } + + if iso.ContainerFile == "" { + result.AddWarning("iso.containerfile", iso.ContainerFile, "container file path not specified") + } + + validCharsets := []string{"utf-8", "iso-8859-1", "ascii", ""} + if !contains(validCharsets, strings.ToLower(iso.Charset)) { + result.AddWarning("iso.charset", iso.Charset, "uncommon charset specified") + } + } +} + +// validateCrossSection performs cross-section validations +func validateCrossSection(c *Config, result *ConfigValidationResult) { + // Storage path vs deduplication directory conflict + if c.Deduplication.Enabled && c.Server.StoragePath == c.Deduplication.Directory { + result.AddError("deduplication.directory", c.Deduplication.Directory, "deduplication directory cannot be the same as storage path") + } + + // ISO mount point vs storage path conflict + if c.ISO.Enabled && c.Server.StoragePath == c.ISO.MountPoint { + result.AddWarning("iso.mount_point", c.ISO.MountPoint, "ISO mount point is the same as storage path") + } + + // Extension conflicts between uploads and downloads + if len(c.Uploads.AllowedExtensions) > 0 && len(c.Downloads.AllowedExtensions) > 0 { + uploadExts := make(map[string]bool) + for _, ext := range c.Uploads.AllowedExtensions { + uploadExts[ext] = true + } + + hasCommonExtensions := false + for _, ext := range c.Downloads.AllowedExtensions { + if uploadExts[ext] { + hasCommonExtensions = true + break + } + } + + if !hasCommonExtensions { + result.AddWarning("uploads/downloads.allowed_extensions", "", "no common extensions between uploads and downloads - files may not be downloadable") + } + } + + // Global extensions override warning + if len(c.Server.GlobalExtensions) > 0 && (len(c.Uploads.AllowedExtensions) > 0 || len(c.Downloads.AllowedExtensions) > 0) { + result.AddWarning("server.global_extensions", c.Server.GlobalExtensions, "global extensions will override upload/download extension settings") + } +} + +// Enhanced Security Validation Functions + +// checkSecretStrength analyzes the strength of secrets/passwords +func checkSecretStrength(secret string) (score int, issues []string) { + if len(secret) == 0 { + return 0, []string{"secret is empty"} + } + + issues = []string{} + score = 0 + + // Length scoring + if len(secret) >= 32 { + score += 3 + } else if len(secret) >= 16 { + score += 2 + } else if len(secret) >= 8 { + score += 1 + } else { + issues = append(issues, "secret is too short") + } + + // Character variety scoring + hasLower := false + hasUpper := false + hasDigit := false + hasSpecial := false + + for _, char := range secret { + switch { + case char >= 'a' && char <= 'z': + hasLower = true + case char >= 'A' && char <= 'Z': + hasUpper = true + case char >= '0' && char <= '9': + hasDigit = true + case strings.ContainsRune("!@#$%^&*()_+-=[]{}|;:,.<>?", char): + hasSpecial = true + } + } + + varietyCount := 0 + if hasLower { + varietyCount++ + } + if hasUpper { + varietyCount++ + } + if hasDigit { + varietyCount++ + } + if hasSpecial { + varietyCount++ + } + + score += varietyCount + + if varietyCount < 3 { + issues = append(issues, "secret should contain uppercase, lowercase, numbers, and special characters") + } + + // Check for common patterns + lowerSecret := strings.ToLower(secret) + commonWeakPasswords := []string{ + "password", "123456", "qwerty", "admin", "root", "test", "guest", + "secret", "hmac", "server", "default", "changeme", "example", + "demo", "temp", "temporary", "fileserver", "upload", "download", + } + + for _, weak := range commonWeakPasswords { + if strings.Contains(lowerSecret, weak) { + issues = append(issues, fmt.Sprintf("contains common weak pattern: %s", weak)) + score -= 2 + } + } + + // Check for repeated characters + if hasRepeatedChars(secret) { + issues = append(issues, "contains too many repeated characters") + score -= 1 + } + + // Ensure score doesn't go negative + if score < 0 { + score = 0 + } + + return score, issues +} + +// hasRepeatedChars checks if a string has excessive repeated characters +func hasRepeatedChars(s string) bool { + if len(s) < 4 { + return false + } + + for i := 0; i <= len(s)-3; i++ { + if s[i] == s[i+1] && s[i+1] == s[i+2] { + return true + } + } + + return false +} + +// isDefaultOrExampleSecret checks if a secret appears to be a default/example value +func isDefaultOrExampleSecret(secret string) bool { + defaultSecrets := []string{ + "your-secret-key-here", + "change-this-secret", + "example-secret", + "default-secret", + "test-secret", + "demo-secret", + "sample-secret", + "placeholder", + "PUT_YOUR_SECRET_HERE", + "CHANGE_ME", + "YOUR_JWT_SECRET", + "your-hmac-secret", + "supersecret", + "secretkey", + "myverysecuresecret", + } + + lowerSecret := strings.ToLower(strings.TrimSpace(secret)) + + for _, defaultSecret := range defaultSecrets { + if strings.Contains(lowerSecret, strings.ToLower(defaultSecret)) { + return true + } + } + + // Check for obvious patterns + if strings.Contains(lowerSecret, "example") || + strings.Contains(lowerSecret, "default") || + strings.Contains(lowerSecret, "change") || + strings.Contains(lowerSecret, "replace") || + strings.Contains(lowerSecret, "todo") || + strings.Contains(lowerSecret, "fixme") { + return true + } + + return false +} + +// calculateEntropy calculates the Shannon entropy of a string +func calculateEntropy(s string) float64 { + if len(s) == 0 { + return 0 + } + + // Count character frequencies + freq := make(map[rune]int) + for _, char := range s { + freq[char]++ + } + + // Calculate entropy + entropy := 0.0 + length := float64(len(s)) + + for _, count := range freq { + if count > 0 { + p := float64(count) / length + entropy -= p * (float64(count) / length) // Simplified calculation + } + } + + return entropy +} + +// validateSecretSecurity performs comprehensive secret security validation +func validateSecretSecurity(fieldName, secret string, result *ConfigValidationResult) { + if secret == "" { + return // Already handled by other validators + } + + // Check for default/example secrets + if isDefaultOrExampleSecret(secret) { + result.AddError(fieldName, "[REDACTED]", "appears to be a default or example secret - must be changed") + return + } + + // Check secret strength + score, issues := checkSecretStrength(secret) + + if score < 3 { + for _, issue := range issues { + result.AddError(fieldName, "[REDACTED]", fmt.Sprintf("weak secret: %s", issue)) + } + } else if score < 6 { + for _, issue := range issues { + result.AddWarning(fieldName, "[REDACTED]", fmt.Sprintf("secret could be stronger: %s", issue)) + } + } + + // Check entropy (simplified) + entropy := calculateEntropy(secret) + if entropy < 3.0 { + result.AddWarning(fieldName, "[REDACTED]", "secret has low entropy - consider using more varied characters") + } + + // Length-specific warnings + if len(secret) > 256 { + result.AddWarning(fieldName, "[REDACTED]", "secret is very long - may impact performance") + } +} + +// validateSystemResources checks system resource availability +func validateSystemResources(result *ConfigValidationResult) { + // Check available CPU cores + cpuCores := runtime.NumCPU() + if cpuCores < 2 { + result.AddWarning("system.cpu", cpuCores, "minimum 2 CPU cores recommended for optimal performance") + } else if cpuCores < 4 { + result.AddWarning("system.cpu", cpuCores, "4+ CPU cores recommended for high-load environments") + } + + // Check available memory (basic check through runtime) + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + + // Basic memory availability check (simplified version) + // This checks current Go heap, but for production we'd want system memory + allocMB := float64(memStats.Alloc) / 1024 / 1024 + if allocMB > 512 { + result.AddWarning("system.memory", allocMB, "current memory usage is high - ensure adequate system memory") + } + + // Check for potential resource constraints + numGoroutines := runtime.NumGoroutine() + if numGoroutines > 1000 { + result.AddWarning("system.goroutines", numGoroutines, "high goroutine count may indicate resource constraints") + } +} + +// validateNetworkConnectivity tests network connectivity to external services +func validateNetworkConnectivity(c *Config, result *ConfigValidationResult) { + // Test Redis connectivity if enabled + if c.Redis.RedisEnabled && c.Redis.RedisAddr != "" { + if err := testNetworkConnection("tcp", c.Redis.RedisAddr, 5*time.Second); err != nil { + result.AddWarning("redis.connectivity", c.Redis.RedisAddr, fmt.Sprintf("cannot connect to Redis: %v", err)) + } + } + + // Test ClamAV connectivity if enabled + if c.ClamAV.ClamAVEnabled && c.ClamAV.ClamAVSocket != "" { + // For Unix socket, test file existence and permissions + if strings.HasPrefix(c.ClamAV.ClamAVSocket, "/") { + if stat, err := os.Stat(c.ClamAV.ClamAVSocket); err != nil { + result.AddWarning("clamav.connectivity", c.ClamAV.ClamAVSocket, fmt.Sprintf("ClamAV socket not accessible: %v", err)) + } else if stat.Mode()&os.ModeSocket == 0 { + result.AddWarning("clamav.connectivity", c.ClamAV.ClamAVSocket, "specified path is not a socket file") + } + } else { + // Assume TCP connection format + if err := testNetworkConnection("tcp", c.ClamAV.ClamAVSocket, 5*time.Second); err != nil { + result.AddWarning("clamav.connectivity", c.ClamAV.ClamAVSocket, fmt.Sprintf("cannot connect to ClamAV: %v", err)) + } + } + } +} + +// testNetworkConnection attempts to connect to a network address +func testNetworkConnection(network, address string, timeout time.Duration) error { + conn, err := net.DialTimeout(network, address, timeout) + if err != nil { + return err + } + defer conn.Close() + return nil +} + +// validatePerformanceSettings analyzes configuration for performance implications +func validatePerformanceSettings(c *Config, result *ConfigValidationResult) { + // Check worker configuration against system resources + cpuCores := runtime.NumCPU() + + if c.Workers.NumWorkers > cpuCores*4 { + result.AddWarning("workers.performance", c.Workers.NumWorkers, + fmt.Sprintf("worker count (%d) significantly exceeds CPU cores (%d) - may cause context switching overhead", + c.Workers.NumWorkers, cpuCores)) + } + + // Check ClamAV scan workers + if c.ClamAV.ClamAVEnabled && c.ClamAV.NumScanWorkers > cpuCores { + result.AddWarning("clamav.performance", c.ClamAV.NumScanWorkers, + fmt.Sprintf("scan workers (%d) exceed CPU cores (%d) - may impact scanning performance", + c.ClamAV.NumScanWorkers, cpuCores)) + } + + // Check timeout configurations for performance balance + if c.Timeouts.Read != "" { + if duration, err := time.ParseDuration(c.Timeouts.Read); err == nil { + if duration > 300*time.Second { + result.AddWarning("timeouts.performance", c.Timeouts.Read, "very long read timeout may impact server responsiveness") + } + } + } + + // Check upload size vs available resources + if c.Server.MaxUploadSize != "" { + if size, err := parseSize(c.Server.MaxUploadSize); err == nil { + if size > 10*1024*1024*1024 { // 10GB + result.AddWarning("server.performance", c.Server.MaxUploadSize, "very large max upload size requires adequate disk space and memory") + } + } + } + + // Check for potential memory-intensive configurations + if c.Workers.UploadQueueSize > 500 && c.Workers.NumWorkers > 20 { + result.AddWarning("workers.memory", fmt.Sprintf("queue:%d workers:%d", c.Workers.UploadQueueSize, c.Workers.NumWorkers), + "high queue size with many workers may consume significant memory") + } +} + +// validateSecurityHardening performs advanced security validation +func validateSecurityHardening(c *Config, result *ConfigValidationResult) { + // Check for default or weak configurations + if c.Security.EnableJWT { + if c.Security.JWTSecret == "your-secret-key-here" || c.Security.JWTSecret == "changeme" { + result.AddError("security.jwtsecret", "[REDACTED]", "JWT secret appears to be a default value - change immediately") + } + + // Check JWT algorithm strength + weakAlgorithms := []string{"HS256"} // HS256 is considered less secure than RS256 + if contains(weakAlgorithms, c.Security.JWTAlgorithm) { + result.AddWarning("security.jwtalgorithm", c.Security.JWTAlgorithm, "consider using RS256 or ES256 for enhanced security") + } + } else { + if c.Security.Secret == "your-secret-key-here" || c.Security.Secret == "changeme" || c.Security.Secret == "secret" { + result.AddError("security.secret", "[REDACTED]", "HMAC secret appears to be a default value - change immediately") + } + } + + // Check for insecure bind configurations + if c.Server.BindIP == "0.0.0.0" { + result.AddWarning("server.bind_ip", c.Server.BindIP, "binding to 0.0.0.0 exposes service to all interfaces - ensure firewall protection") + } + + // Check for development/debug settings in production + if c.Logging.Level == "debug" || c.Logging.Level == "trace" { + result.AddWarning("logging.security", c.Logging.Level, "debug/trace logging may expose sensitive information - use 'info' or 'warn' in production") + } + + // Check file permissions for sensitive paths + if c.Server.StoragePath != "" { + if stat, err := os.Stat(c.Server.StoragePath); err == nil { + mode := stat.Mode().Perm() + if mode&0077 != 0 { // World or group writable + result.AddWarning("server.storagepath.permissions", c.Server.StoragePath, "storage directory permissions allow group/world access - consider restricting to owner-only") + } + } + } +} + +// checkDiskSpace validates available disk space for storage paths +func checkDiskSpace(path string, result *ConfigValidationResult) { + if stat, err := os.Stat(path); err == nil && stat.IsDir() { + // Get available space (platform-specific implementation would be more robust) + // This is a simplified check - in production, use syscall.Statfs on Unix or similar + + // For now, we'll just check if we can write a test file + testFile := filepath.Join(path, ".disk_space_test") + if f, err := os.Create(testFile); err != nil { + result.AddWarning("system.disk_space", path, fmt.Sprintf("cannot write to storage directory: %v", err)) + } else { + f.Close() + os.Remove(testFile) + + // Additional check: try to write a larger test file to estimate space + const testSize = 1024 * 1024 // 1MB + testData := make([]byte, testSize) + if f, err := os.Create(testFile); err == nil { + if _, err := f.Write(testData); err != nil { + result.AddWarning("system.disk_space", path, "low disk space detected - ensure adequate storage for operations") + } + f.Close() + os.Remove(testFile) + } + } + } +} + +// isValidPort checks if a string represents a valid port number +func isValidPort(port string) bool { + if p, err := strconv.Atoi(port); err != nil || p < 1 || p > 65535 { + return false + } + return true +} + +// isValidHostPort checks if a string is a valid host:port combination +func isValidHostPort(hostPort string) bool { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return false + } + + // Validate port + if !isValidPort(port) { + return false + } + + // Validate host (can be IP, hostname, or empty for localhost) + if host != "" { + if ip := net.ParseIP(host); ip == nil { + // If not an IP, check if it's a valid hostname + matched, _ := regexp.MatchString(`^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)*$`, host) + return matched + } + } + + return true +} + +// validateDirectoryPath validates a directory path +func validateDirectoryPath(path string, createIfMissing bool) error { + if path == "" { + return errors.New("directory path cannot be empty") + } + + // Check if path exists + if stat, err := os.Stat(path); os.IsNotExist(err) { + if createIfMissing { + // Try to create the directory + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("cannot create directory: %v", err) + } + } else { + return fmt.Errorf("directory does not exist: %s", path) + } + } else if err != nil { + return fmt.Errorf("cannot access directory: %v", err) + } else if !stat.IsDir() { + return fmt.Errorf("path exists but is not a directory: %s", path) + } + + // Check if directory is writable + testFile := filepath.Join(path, ".write_test") + if f, err := os.Create(testFile); err != nil { + return fmt.Errorf("directory is not writable: %v", err) + } else { + f.Close() + os.Remove(testFile) + } + + return nil +} + +// contains checks if a slice contains a string +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// PrintValidationResults prints the validation results in a user-friendly format +func PrintValidationResults(result *ConfigValidationResult) { + if result.HasErrors() { + log.Error("โŒ Configuration validation failed with the following errors:") + for _, err := range result.Errors { + log.Errorf(" โ€ข %s", err.Error()) + } + fmt.Println() + } + + if result.HasWarnings() { + log.Warn("โš ๏ธ Configuration validation completed with warnings:") + for _, warn := range result.Warnings { + log.Warnf(" โ€ข %s", warn.Error()) + } + fmt.Println() + } + + if !result.HasErrors() && !result.HasWarnings() { + log.Info("โœ… Configuration validation passed successfully!") + } +} + +// runSpecializedValidation performs targeted validation based on flags +func runSpecializedValidation(c *Config, security, performance, connectivity, quiet, verbose, fixable bool) { + result := &ConfigValidationResult{Valid: true} + + if verbose { + log.Info("Running specialized validation with detailed output...") + fmt.Println() + } + + // Run only the requested validation types + if security { + if verbose { + log.Info("๐Ÿ” Running security validation checks...") + } + validateSecurityConfig(&c.Security, result) + validateSecurityHardening(c, result) + } + + if performance { + if verbose { + log.Info("โšก Running performance validation checks...") + } + validatePerformanceSettings(c, result) + validateSystemResources(result) + } + + if connectivity { + if verbose { + log.Info("๐ŸŒ Running connectivity validation checks...") + } + validateNetworkConnectivity(c, result) + } + + // If no specific type is requested, run basic validation + if !security && !performance && !connectivity { + if verbose { + log.Info("๐Ÿ” Running comprehensive validation...") + } + result = ValidateConfigComprehensive(c) + } + + // Filter results based on flags + if fixable { + filterFixableIssues(result) + } + + // Output results based on verbosity + if quiet { + printQuietValidationResults(result) + } else if verbose { + printVerboseValidationResults(result) + } else { + PrintValidationResults(result) + } + + // Exit with appropriate code + if result.HasErrors() { + os.Exit(1) + } +} + +// filterFixableIssues removes non-fixable issues from results +func filterFixableIssues(result *ConfigValidationResult) { + fixablePatterns := []string{ + "permissions", + "directory", + "default value", + "debug logging", + "size format", + "timeout format", + "port number", + "IP address", + } + + var fixableErrors []ConfigValidationError + var fixableWarnings []ConfigValidationError + + for _, err := range result.Errors { + for _, pattern := range fixablePatterns { + if strings.Contains(strings.ToLower(err.Message), pattern) { + fixableErrors = append(fixableErrors, err) + break + } + } + } + + for _, warn := range result.Warnings { + for _, pattern := range fixablePatterns { + if strings.Contains(strings.ToLower(warn.Message), pattern) { + fixableWarnings = append(fixableWarnings, warn) + break + } + } + } + + result.Errors = fixableErrors + result.Warnings = fixableWarnings + result.Valid = len(fixableErrors) == 0 +} + +// printQuietValidationResults prints only errors +func printQuietValidationResults(result *ConfigValidationResult) { + if result.HasErrors() { + for _, err := range result.Errors { + fmt.Printf("ERROR: %s\n", err.Error()) + } + } +} + +// printVerboseValidationResults prints detailed validation information +func printVerboseValidationResults(result *ConfigValidationResult) { + fmt.Println("๐Ÿ“Š DETAILED VALIDATION REPORT") + fmt.Println("============================") + fmt.Println() + + // System information + fmt.Printf("๐Ÿ–ฅ๏ธ System: %d CPU cores, %d goroutines\n", runtime.NumCPU(), runtime.NumGoroutine()) + + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + fmt.Printf("๐Ÿ’พ Memory: %.2f MB allocated\n", float64(memStats.Alloc)/1024/1024) + fmt.Println() + + // Validation summary + fmt.Printf("โœ… Checks passed: %d\n", countPassedChecks(result)) + fmt.Printf("โš ๏ธ Warnings: %d\n", len(result.Warnings)) + fmt.Printf("โŒ Errors: %d\n", len(result.Errors)) + fmt.Println() + + // Detailed results + if result.HasErrors() { + fmt.Println("๐Ÿšจ CONFIGURATION ERRORS:") + for i, err := range result.Errors { + fmt.Printf(" %d. Field: %s\n", i+1, err.Field) + fmt.Printf(" Issue: %s\n", err.Message) + fmt.Printf(" Value: %v\n", err.Value) + fmt.Println() + } + } + + if result.HasWarnings() { + fmt.Println("โš ๏ธ CONFIGURATION WARNINGS:") + for i, warn := range result.Warnings { + fmt.Printf(" %d. Field: %s\n", i+1, warn.Field) + fmt.Printf(" Issue: %s\n", warn.Message) + fmt.Printf(" Value: %v\n", warn.Value) + fmt.Println() + } + } + + if !result.HasErrors() && !result.HasWarnings() { + fmt.Println("๐ŸŽ‰ All validation checks passed successfully!") + } +} + +// countPassedChecks estimates the number of successful validation checks +func countPassedChecks(result *ConfigValidationResult) int { + // Rough estimate: total possible checks minus errors and warnings + totalPossibleChecks := 50 // Approximate number of validation checks + return totalPossibleChecks - len(result.Errors) - len(result.Warnings) +} diff --git a/cmd/server/helpers.go b/cmd/server/helpers.go new file mode 100644 index 0000000..01f267a --- /dev/null +++ b/cmd/server/helpers.go @@ -0,0 +1,713 @@ +package main + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/dutchcoders/go-clamd" + "github.com/go-redis/redis/v8" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/mem" + "gopkg.in/natefinch/lumberjack.v2" +) + +// WorkerPool represents a pool of workers +type WorkerPool struct { + workers int + taskQueue chan UploadTask + scanQueue chan ScanTask + ctx context.Context + cancel context.CancelFunc +} + +// NewWorkerPool creates a new worker pool +func NewWorkerPool(workers int, queueSize int) *WorkerPool { + ctx, cancel := context.WithCancel(context.Background()) + return &WorkerPool{ + workers: workers, + taskQueue: make(chan UploadTask, queueSize), + scanQueue: make(chan ScanTask, queueSize), + ctx: ctx, + cancel: cancel, + } +} + +// Start starts the worker pool +func (wp *WorkerPool) Start() { + for i := 0; i < wp.workers; i++ { + go wp.worker() + } +} + +// Stop stops the worker pool +func (wp *WorkerPool) Stop() { + wp.cancel() + close(wp.taskQueue) + close(wp.scanQueue) +} + +// worker is the worker function +func (wp *WorkerPool) worker() { + for { + select { + case <-wp.ctx.Done(): + return + case task := <-wp.taskQueue: + if task.Result != nil { + task.Result <- nil // Simple implementation + } + case scanTask := <-wp.scanQueue: + err := processScan(scanTask) + if scanTask.Result != nil { + scanTask.Result <- err + } + } + } +} + +// Stub for precacheStoragePath +func precacheStoragePath(storagePath string) error { + // TODO: Implement actual pre-caching logic + // This would typically involve walking the storagePath + // and loading file information into a cache. + log.Infof("Pre-caching for storage path '%s' is a stub and not yet implemented.", storagePath) + return nil +} + +func checkFreeSpaceWithRetry(path string, retries int, delay time.Duration) error { + for i := 0; i < retries; i++ { + minFreeBytes, err := parseSize(conf.Server.MinFreeBytes) + if err != nil { + log.Fatalf("Invalid MinFreeBytes: %v", err) + } + if err := checkStorageSpace(path, minFreeBytes); err != nil { + log.Warnf("Free space check failed (attempt %d/%d): %v", i+1, retries, err) + time.Sleep(delay) + continue + } + return nil + } + return fmt.Errorf("insufficient free space after %d attempts", retries) +} + +func handleFileCleanup(conf *Config) { + if !conf.Server.FileTTLEnabled { + log.Println("File TTL is disabled.") + return + } + + ttlDuration, err := parseTTL(conf.Server.FileTTL) + if err != nil { + log.Fatalf("Invalid TTL configuration: %v", err) + } + + log.Printf("TTL cleanup enabled. Files older than %v will be deleted.", ttlDuration) + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() + + for range ticker.C { + deleteOldFiles(conf, ttlDuration) + } +} + +func computeSHA256(ctx context.Context, filePath string) (string, error) { + if filePath == "" { + return "", fmt.Errorf("file path is empty") + } + file, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("failed to open file %s: %w", filePath, err) + } + defer file.Close() + + hasher := sha256.New() + if _, err := io.Copy(hasher, file); err != nil { + return "", fmt.Errorf("failed to hash file: %w", err) + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} + +func handleDeduplication(ctx context.Context, absFilename string) error { + checksum, err := computeSHA256(ctx, absFilename) + if err != nil { + return err + } + + dedupDir := conf.Deduplication.Directory + if dedupDir == "" { + return fmt.Errorf("deduplication directory not configured") + } + + dedupPath := filepath.Join(dedupDir, checksum) + if err := os.MkdirAll(dedupPath, os.ModePerm); err != nil { + return err + } + + existingPath := filepath.Join(dedupPath, filepath.Base(absFilename)) + if _, err := os.Stat(existingPath); err == nil { + return os.Link(existingPath, absFilename) + } + + if err := os.Rename(absFilename, existingPath); err != nil { + return err + } + + return os.Link(existingPath, absFilename) +} + +func handleISOContainer(absFilename string) error { + isoPath := filepath.Join(conf.ISO.MountPoint, "container.iso") + if err := CreateISOContainer([]string{absFilename}, isoPath, conf.ISO.Size, conf.ISO.Charset); err != nil { + return err + } + if err := MountISOContainer(isoPath, conf.ISO.MountPoint); err != nil { + return err + } + return UnmountISOContainer(conf.ISO.MountPoint) +} + +func sanitizeFilePath(baseDir, filePath string) (string, error) { + absBaseDir, err := filepath.Abs(baseDir) + if err != nil { + return "", err + } + absFilePath, err := filepath.Abs(filepath.Join(absBaseDir, filePath)) + if err != nil { + return "", err + } + if !strings.HasPrefix(absFilePath, absBaseDir) { + return "", fmt.Errorf("invalid file path: %s", filePath) + } + return absFilePath, nil +} + +// Stub for formatBytes +func formatBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %ciB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} + +// Stub for deleteOldFiles +func deleteOldFiles(conf *Config, ttlDuration time.Duration) { + // TODO: Implement actual file deletion logic based on TTL + log.Infof("deleteOldFiles is a stub and not yet implemented. It would check for files older than %v.", ttlDuration) +} + +// Stub for CreateISOContainer +func CreateISOContainer(files []string, isoPath, size, charset string) error { + // TODO: Implement actual ISO container creation logic + log.Infof("CreateISOContainer is a stub and not yet implemented. It would create an ISO at %s.", isoPath) + return nil +} + +// Stub for MountISOContainer +func MountISOContainer(isoPath, mountPoint string) error { + // TODO: Implement actual ISO container mounting logic + log.Infof("MountISOContainer is a stub and not yet implemented. It would mount %s to %s.", isoPath, mountPoint) + return nil +} + +// Stub for UnmountISOContainer +func UnmountISOContainer(mountPoint string) error { + // TODO: Implement actual ISO container unmounting logic + log.Infof("UnmountISOContainer is a stub and not yet implemented. It would unmount %s.", mountPoint) + return nil +} + +func checkStorageSpace(storagePath string, minFreeBytes int64) error { + var stat syscall.Statfs_t + if err := syscall.Statfs(storagePath, &stat); err != nil { + return err + } + availableBytes := stat.Bavail * uint64(stat.Bsize) + if int64(availableBytes) < minFreeBytes { + return fmt.Errorf("not enough space: available %d < required %d", availableBytes, minFreeBytes) + } + return nil +} + +// setupLogging initializes logging configuration +func setupLogging() { + log.Infof("DEBUG: Starting setupLogging function") + if conf.Logging.File != "" { + log.Infof("DEBUG: Setting up file logging to: %s", conf.Logging.File) + log.SetOutput(&lumberjack.Logger{ + Filename: conf.Logging.File, + MaxSize: conf.Logging.MaxSize, + MaxBackups: conf.Logging.MaxBackups, + MaxAge: conf.Logging.MaxAge, + Compress: conf.Logging.Compress, + }) + log.Infof("Logging configured to file: %s", conf.Logging.File) + } + log.Infof("DEBUG: setupLogging function completed") +} + +// logSystemInfo logs system information +func logSystemInfo() { + memStats, err := mem.VirtualMemory() + if err != nil { + log.Warnf("Failed to get memory stats: %v", err) + } else { + log.Infof("System Memory: Total=%s, Available=%s, Used=%.1f%%", + formatBytes(int64(memStats.Total)), + formatBytes(int64(memStats.Available)), + memStats.UsedPercent) + } + + cpuStats, err := cpu.Info() + if err != nil { + log.Warnf("Failed to get CPU stats: %v", err) + } else if len(cpuStats) > 0 { + log.Infof("CPU: %s, Cores=%d", cpuStats[0].ModelName, len(cpuStats)) + } + + log.Infof("Go Runtime: Version=%s, NumCPU=%d, NumGoroutine=%d", + runtime.Version(), runtime.NumCPU(), runtime.NumGoroutine()) +} + +// initMetrics initializes Prometheus metrics +func initMetrics() { + uploadDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "upload_duration_seconds", + Help: "Duration of upload operations in seconds", + }) + + uploadErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "upload_errors_total", + Help: "Total number of upload errors", + }) + + uploadsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "uploads_total", + Help: "Total number of uploads", + }) + + downloadDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "download_duration_seconds", + Help: "Duration of download operations in seconds", + }) + + downloadsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "downloads_total", + Help: "Total number of downloads", + }) + + downloadErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "download_errors_total", + Help: "Total number of download errors", + }) + + memoryUsage = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "memory_usage_percent", + Help: "Current memory usage percentage", + }) + + cpuUsage = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "cpu_usage_percent", + Help: "Current CPU usage percentage", + }) + + activeConnections = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "active_connections_total", + Help: "Number of active connections", + }) + + requestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "requests_total", + Help: "Total number of requests", + }, []string{"method", "status"}) + + goroutines = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "goroutines_total", + Help: "Number of goroutines", + }) + + uploadSizeBytes = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "upload_size_bytes", + Help: "Size of uploaded files in bytes", + }) + + downloadSizeBytes = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "download_size_bytes", + Help: "Size of downloaded files in bytes", + }) + + filesDeduplicatedTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "files_deduplicated_total", + Help: "Total number of deduplicated files", + }) + + deduplicationErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "deduplication_errors_total", + Help: "Total number of deduplication errors", + }) + + isoContainersCreatedTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "iso_containers_created_total", + Help: "Total number of ISO containers created", + }) + + isoCreationErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "iso_creation_errors_total", + Help: "Total number of ISO creation errors", + }) + + isoContainersMountedTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "iso_containers_mounted_total", + Help: "Total number of ISO containers mounted", + }) + + isoMountErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "iso_mount_errors_total", + Help: "Total number of ISO mount errors", + }) + + workerAdjustmentsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "worker_adjustments_total", + Help: "Total number of worker adjustments", + }) + + workerReAdjustmentsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "worker_readjustments_total", + Help: "Total number of worker readjustments", + }) + + // Register all metrics + prometheus.MustRegister( + uploadDuration, uploadErrorsTotal, uploadsTotal, + downloadDuration, downloadsTotal, downloadErrorsTotal, + memoryUsage, cpuUsage, activeConnections, requestsTotal, + goroutines, uploadSizeBytes, downloadSizeBytes, + filesDeduplicatedTotal, deduplicationErrorsTotal, + isoContainersCreatedTotal, isoCreationErrorsTotal, + isoContainersMountedTotal, isoMountErrorsTotal, + workerAdjustmentsTotal, workerReAdjustmentsTotal, + ) + + log.Info("Prometheus metrics initialized successfully") +} + +// scanFileWithClamAV scans a file using ClamAV +func scanFileWithClamAV(filename string) error { + if clamClient == nil { + return fmt.Errorf("ClamAV client not initialized") + } + + result, err := clamClient.ScanFile(filename) + if err != nil { + return fmt.Errorf("ClamAV scan failed: %w", err) + } + + // Handle the result channel + if result != nil { + select { + case scanResult := <-result: + if scanResult != nil && scanResult.Status != "OK" { + return fmt.Errorf("virus detected in %s: %s", filename, scanResult.Status) + } + case <-time.After(30 * time.Second): + return fmt.Errorf("ClamAV scan timeout for file: %s", filename) + } + } + + log.Debugf("File %s passed ClamAV scan", filename) + return nil +} + +// initClamAV initializes ClamAV client +func initClamAV(socketPath string) (*clamd.Clamd, error) { + if socketPath == "" { + socketPath = "/var/run/clamav/clamd.ctl" + } + + client := clamd.NewClamd(socketPath) + + // Test connection + err := client.Ping() + if err != nil { + return nil, fmt.Errorf("failed to ping ClamAV daemon: %w", err) + } + + log.Infof("ClamAV client initialized with socket: %s", socketPath) + return client, nil +} + +// initRedis initializes Redis client +func initRedis() { + redisClient = redis.NewClient(&redis.Options{ + Addr: conf.Redis.RedisAddr, + Password: conf.Redis.RedisPassword, + DB: conf.Redis.RedisDBIndex, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := redisClient.Ping(ctx).Result() + if err != nil { + log.Warnf("Failed to connect to Redis: %v", err) + redisConnected = false + } else { + log.Info("Redis client initialized successfully") + redisConnected = true + } +} + +// monitorNetwork monitors network events +func monitorNetwork(ctx context.Context) { + log.Info("Starting network monitoring") + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + log.Info("Network monitoring stopped") + return + case <-ticker.C: + // Simple network monitoring - check interface status + interfaces, err := net.Interfaces() + if err != nil { + log.Warnf("Failed to get network interfaces: %v", err) + continue + } + + for _, iface := range interfaces { + if iface.Flags&net.FlagUp != 0 && iface.Flags&net.FlagLoopback == 0 { + select { + case networkEvents <- NetworkEvent{ + Type: "interface_up", + Details: fmt.Sprintf("Interface %s is up", iface.Name), + }: + default: + // Channel full, skip + } + } + } + } + } +} + +// handleNetworkEvents handles network events +func handleNetworkEvents(ctx context.Context) { + log.Info("Starting network event handler") + + for { + select { + case <-ctx.Done(): + log.Info("Network event handler stopped") + return + case event := <-networkEvents: + log.Debugf("Network event: %s - %s", event.Type, event.Details) + } + } +} + +// updateSystemMetrics updates system metrics +func updateSystemMetrics(ctx context.Context) { + ticker := time.NewTicker(15 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + // Update memory metrics + if memStats, err := mem.VirtualMemory(); err == nil { + memoryUsage.Set(memStats.UsedPercent) + } + + // Update CPU metrics + if cpuPercents, err := cpu.Percent(time.Second, false); err == nil && len(cpuPercents) > 0 { + cpuUsage.Set(cpuPercents[0]) + } + + // Update goroutine count + goroutines.Set(float64(runtime.NumGoroutine())) + } + } +} + +// setupRouter sets up HTTP routes +func setupRouter() *http.ServeMux { + mux := http.NewServeMux() + + mux.HandleFunc("/upload", handleUpload) + mux.HandleFunc("/download/", handleDownload) + mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) + }) + + if conf.Server.MetricsEnabled { + mux.Handle("/metrics", promhttp.Handler()) + } + + // Catch-all handler for all upload protocols (v, v2, token, v3) + // This must be added last as it matches all paths + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + // Handle PUT requests for all upload protocols + if r.Method == http.MethodPut { + query := r.URL.Query() + + // Check if this is a v3 request (mod_http_upload_external) + if query.Get("v3") != "" && query.Get("expires") != "" { + handleV3Upload(w, r) + return + } + + // Check if this is a legacy protocol request (v, v2, token) + if query.Get("v") != "" || query.Get("v2") != "" || query.Get("token") != "" { + handleLegacyUpload(w, r) + return + } + } + + // Handle GET/HEAD requests for downloads + if r.Method == http.MethodGet || r.Method == http.MethodHead { + // Only handle download requests if the path looks like a file + path := strings.TrimPrefix(r.URL.Path, "/") + if path != "" && !strings.HasSuffix(path, "/") { + handleLegacyDownload(w, r) + return + } + } + + // For all other requests, return 404 + http.NotFound(w, r) + }) + + log.Info("HTTP router configured successfully with full protocol support (v, v2, token, v3)") + return mux +} + +// setupGracefulShutdown sets up graceful shutdown +func setupGracefulShutdown(server *http.Server, cancel context.CancelFunc) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigChan + log.Info("Received shutdown signal, initiating graceful shutdown...") + + // Cancel context + cancel() + + // Shutdown server with timeout + ctx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownCancel() + + if err := server.Shutdown(ctx); err != nil { + log.Errorf("Server shutdown error: %v", err) + } else { + log.Info("Server shutdown completed") + } + + // Clean up PID file + if conf.Server.CleanUponExit { + removePIDFile(conf.Server.PIDFilePath) + } + + // Stop worker pool if it exists + if workerPool != nil { + workerPool.Stop() + log.Info("Worker pool stopped") + } + + os.Exit(0) + }() +} + +// ProgressWriter wraps an io.Writer to provide upload progress reporting +type ProgressWriter struct { + dst io.Writer + total int64 + written int64 + filename string + onProgress func(written, total int64, filename string) + lastReport time.Time +} + +// NewProgressWriter creates a new ProgressWriter +func NewProgressWriter(dst io.Writer, total int64, filename string) *ProgressWriter { + return &ProgressWriter{ + dst: dst, + total: total, + filename: filename, + onProgress: func(written, total int64, filename string) { + if total > 0 { + percentage := float64(written) / float64(total) * 100 + sizeMiB := float64(written) / (1024 * 1024) + totalMiB := float64(total) / (1024 * 1024) + log.Infof("Upload progress for %s: %.1f%% (%.1f/%.1f MiB)", + filepath.Base(filename), percentage, sizeMiB, totalMiB) + } + }, + lastReport: time.Now(), + } +} + +// Write implements io.Writer interface with progress reporting +func (pw *ProgressWriter) Write(p []byte) (int, error) { + n, err := pw.dst.Write(p) + if err != nil { + return n, err + } + + pw.written += int64(n) + + // Report progress every 30 seconds or every 50MB for large files + now := time.Now() + shouldReport := false + + if pw.total > 100*1024*1024 { // Files larger than 100MB + shouldReport = now.Sub(pw.lastReport) > 30*time.Second || + (pw.written%(50*1024*1024) == 0 && pw.written > 0) + } else if pw.total > 10*1024*1024 { // Files larger than 10MB + shouldReport = now.Sub(pw.lastReport) > 10*time.Second || + (pw.written%(10*1024*1024) == 0 && pw.written > 0) + } + + if shouldReport && pw.onProgress != nil { + pw.onProgress(pw.written, pw.total, pw.filename) + pw.lastReport = now + } + + return n, err +} + +// copyWithProgress copies data from src to dst with progress reporting +func copyWithProgress(dst io.Writer, src io.Reader, total int64, filename string) (int64, error) { + progressWriter := NewProgressWriter(dst, total, filename) + + // Use a pooled buffer for efficient copying + bufPtr := bufferPool.Get().(*[]byte) + defer bufferPool.Put(bufPtr) + buf := *bufPtr + + return io.CopyBuffer(progressWriter, src, buf) +} diff --git a/cmd/server/main.go b/cmd/server/main.go index 65039c2..ac4af87 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -8,140 +8,285 @@ import ( "crypto/hmac" "crypto/sha256" "encoding/hex" + "encoding/json" + "errors" "flag" "fmt" "io" "mime" "net" "net/http" - "net/url" "os" - "os/signal" + "os/exec" "path/filepath" - "runtime" "strconv" "strings" + "sync" "syscall" "time" - "sync" - "github.com/dutchcoders/go-clamd" // ClamAV integration "github.com/go-redis/redis/v8" // Redis integration + jwt "github.com/golang-jwt/jwt/v5" "github.com/patrickmn/go-cache" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/mem" "github.com/sirupsen/logrus" "github.com/spf13/viper" ) -// Configuration structure +// parseSize converts a human-readable size string to bytes +func parseSize(sizeStr string) (int64, error) { + sizeStr = strings.TrimSpace(sizeStr) + if len(sizeStr) < 2 { + return 0, fmt.Errorf("invalid size string: %s", sizeStr) + } + + unit := strings.ToUpper(sizeStr[len(sizeStr)-2:]) + valueStr := sizeStr[:len(sizeStr)-2] + value, err := strconv.Atoi(valueStr) + if err != nil { + return 0, fmt.Errorf("invalid size value: %v", err) + } + + switch unit { + case "KB": + return int64(value) * 1024, nil + case "MB": + return int64(value) * 1024 * 1024, nil + case "GB": + return int64(value) * 1024 * 1024 * 1024, nil + default: + return 0, fmt.Errorf("unknown size unit: %s", unit) + } +} + +// parseTTL converts a human-readable TTL string to a time.Duration +func parseTTL(ttlStr string) (time.Duration, error) { + ttlStr = strings.ToLower(strings.TrimSpace(ttlStr)) + if ttlStr == "" { + return 0, fmt.Errorf("TTL string cannot be empty") + } + var valueStr string + var unit rune + for _, r := range ttlStr { + if r >= '0' && r <= '9' { + valueStr += string(r) + } else { + unit = r + break + } + } + val, err := strconv.Atoi(valueStr) + if err != nil { + return 0, fmt.Errorf("invalid TTL value: %v", err) + } + switch unit { + case 's': + return time.Duration(val) * time.Second, nil + case 'm': + return time.Duration(val) * time.Minute, nil + case 'h': + return time.Duration(val) * time.Hour, nil + case 'd': + return time.Duration(val) * 24 * time.Hour, nil + case 'w': + return time.Duration(val) * 7 * 24 * time.Hour, nil + case 'y': + return time.Duration(val) * 365 * 24 * time.Hour, nil + default: + return 0, fmt.Errorf("unknown TTL unit: %c", unit) + } +} + +// Configuration structures type ServerConfig struct { - ListenPort string `mapstructure:"ListenPort"` - UnixSocket bool `mapstructure:"UnixSocket"` - StoragePath string `mapstructure:"StoragePath"` - LogLevel string `mapstructure:"LogLevel"` - LogFile string `mapstructure:"LogFile"` - MetricsEnabled bool `mapstructure:"MetricsEnabled"` - MetricsPort string `mapstructure:"MetricsPort"` - FileTTL string `mapstructure:"FileTTL"` - MinFreeBytes int64 `mapstructure:"MinFreeBytes"` // Minimum free bytes required - DeduplicationEnabled bool `mapstructure:"DeduplicationEnabled"` -} - -type TimeoutConfig struct { - ReadTimeout string `mapstructure:"ReadTimeout"` - WriteTimeout string `mapstructure:"WriteTimeout"` - IdleTimeout string `mapstructure:"IdleTimeout"` -} - -type SecurityConfig struct { - Secret string `mapstructure:"Secret"` -} - -type VersioningConfig struct { - EnableVersioning bool `mapstructure:"EnableVersioning"` - MaxVersions int `mapstructure:"MaxVersions"` + ListenAddress string `toml:"listenport" mapstructure:"listenport"` // Fixed to match config file field + StoragePath string `toml:"storagepath" mapstructure:"storagepath"` // Fixed to match config + MetricsEnabled bool `toml:"metricsenabled" mapstructure:"metricsenabled"` // Fixed to match config + MetricsPath string `toml:"metrics_path" mapstructure:"metrics_path"` + PidFile string `toml:"pid_file" mapstructure:"pid_file"` + MaxUploadSize string `toml:"max_upload_size" mapstructure:"max_upload_size"` + MaxHeaderBytes int `toml:"max_header_bytes" mapstructure:"max_header_bytes"` + CleanupInterval string `toml:"cleanup_interval" mapstructure:"cleanup_interval"` + MaxFileAge string `toml:"max_file_age" mapstructure:"max_file_age"` + PreCache bool `toml:"pre_cache" mapstructure:"pre_cache"` + PreCacheWorkers int `toml:"pre_cache_workers" mapstructure:"pre_cache_workers"` + PreCacheInterval string `toml:"pre_cache_interval" mapstructure:"pre_cache_interval"` + GlobalExtensions []string `toml:"global_extensions" mapstructure:"global_extensions"` + DeduplicationEnabled bool `toml:"deduplication_enabled" mapstructure:"deduplication_enabled"` + MinFreeBytes string `toml:"min_free_bytes" mapstructure:"min_free_bytes"` + FileNaming string `toml:"file_naming" mapstructure:"file_naming"` + ForceProtocol string `toml:"force_protocol" mapstructure:"force_protocol"` + EnableDynamicWorkers bool `toml:"enable_dynamic_workers" mapstructure:"enable_dynamic_workers"` + WorkerScaleUpThresh int `toml:"worker_scale_up_thresh" mapstructure:"worker_scale_up_thresh"` + WorkerScaleDownThresh int `toml:"worker_scale_down_thresh" mapstructure:"worker_scale_down_thresh"` + UnixSocket bool `toml:"unixsocket" mapstructure:"unixsocket"` // Added missing field from example/logs + MetricsPort string `toml:"metricsport" mapstructure:"metricsport"` // Fixed to match config + FileTTL string `toml:"filettl" mapstructure:"filettl"` // Fixed to match config + FileTTLEnabled bool `toml:"filettlenabled" mapstructure:"filettlenabled"` // Fixed to match config + AutoAdjustWorkers bool `toml:"autoadjustworkers" mapstructure:"autoadjustworkers"` // Fixed to match config + NetworkEvents bool `toml:"networkevents" mapstructure:"networkevents"` // Fixed to match config + PIDFilePath string `toml:"pidfilepath" mapstructure:"pidfilepath"` // Fixed to match config + CleanUponExit bool `toml:"clean_upon_exit" mapstructure:"clean_upon_exit"` // Added missing field + PreCaching bool `toml:"precaching" mapstructure:"precaching"` // Fixed to match config + BindIP string `toml:"bind_ip" mapstructure:"bind_ip"` // Added missing field } type UploadsConfig struct { - ResumableUploadsEnabled bool `mapstructure:"ResumableUploadsEnabled"` - ChunkedUploadsEnabled bool `mapstructure:"ChunkedUploadsEnabled"` - ChunkSize int64 `mapstructure:"ChunkSize"` - AllowedExtensions []string `mapstructure:"AllowedExtensions"` + AllowedExtensions []string `toml:"allowedextensions" mapstructure:"allowedextensions"` + ChunkedUploadsEnabled bool `toml:"chunkeduploadsenabled" mapstructure:"chunkeduploadsenabled"` + ChunkSize string `toml:"chunksize" mapstructure:"chunksize"` + ResumableUploadsEnabled bool `toml:"resumableuploadsenabled" mapstructure:"resumableuploadsenabled"` + MaxResumableAge string `toml:"max_resumable_age" mapstructure:"max_resumable_age"` +} + +type DownloadsConfig struct { + AllowedExtensions []string `toml:"allowedextensions" mapstructure:"allowedextensions"` + ChunkedDownloadsEnabled bool `toml:"chunkeddownloadsenabled" mapstructure:"chunkeddownloadsenabled"` + ChunkSize string `toml:"chunksize" mapstructure:"chunksize"` + ResumableDownloadsEnabled bool `toml:"resumable_downloads_enabled" mapstructure:"resumable_downloads_enabled"` +} + +type SecurityConfig struct { + Secret string `toml:"secret" mapstructure:"secret"` + EnableJWT bool `toml:"enablejwt" mapstructure:"enablejwt"` // Added EnableJWT field + JWTSecret string `toml:"jwtsecret" mapstructure:"jwtsecret"` + JWTAlgorithm string `toml:"jwtalgorithm" mapstructure:"jwtalgorithm"` + JWTExpiration string `toml:"jwtexpiration" mapstructure:"jwtexpiration"` +} + +type LoggingConfig struct { + Level string `mapstructure:"level"` + File string `mapstructure:"file"` + MaxSize int `mapstructure:"max_size"` + MaxBackups int `mapstructure:"max_backups"` + MaxAge int `mapstructure:"max_age"` + Compress bool `mapstructure:"compress"` +} + +type DeduplicationConfig struct { + Enabled bool `mapstructure:"enabled"` + Directory string `mapstructure:"directory"` +} + +type ISOConfig struct { + Enabled bool `mapstructure:"enabled"` + MountPoint string `mapstructure:"mountpoint"` + Size string `mapstructure:"size"` + Charset string `mapstructure:"charset"` + ContainerFile string `mapstructure:"containerfile"` // Added missing field +} + +type TimeoutConfig struct { + Read string `mapstructure:"readtimeout" toml:"readtimeout"` + Write string `mapstructure:"writetimeout" toml:"writetimeout"` + Idle string `mapstructure:"idletimeout" toml:"idletimeout"` + Shutdown string `mapstructure:"shutdown" toml:"shutdown"` +} + +type VersioningConfig struct { + Enabled bool `mapstructure:"enableversioning" toml:"enableversioning"` // Corrected to match example config + Backend string `mapstructure:"backend" toml:"backend"` + MaxRevs int `mapstructure:"maxversions" toml:"maxversions"` // Corrected to match example config } type ClamAVConfig struct { - ClamAVEnabled bool `mapstructure:"ClamAVEnabled"` - ClamAVSocket string `mapstructure:"ClamAVSocket"` - NumScanWorkers int `mapstructure:"NumScanWorkers"` + ClamAVEnabled bool `mapstructure:"clamavenabled"` + ClamAVSocket string `mapstructure:"clamavsocket"` + NumScanWorkers int `mapstructure:"numscanworkers"` + ScanFileExtensions []string `mapstructure:"scanfileextensions"` } type RedisConfig struct { - RedisEnabled bool `mapstructure:"RedisEnabled"` - RedisDBIndex int `mapstructure:"RedisDBIndex"` - RedisAddr string `mapstructure:"RedisAddr"` - RedisPassword string `mapstructure:"RedisPassword"` - RedisHealthCheckInterval string `mapstructure:"RedisHealthCheckInterval"` + RedisEnabled bool `mapstructure:"redisenabled"` + RedisDBIndex int `mapstructure:"redisdbindex"` + RedisAddr string `mapstructure:"redisaddr"` + RedisPassword string `mapstructure:"redispassword"` + RedisHealthCheckInterval string `mapstructure:"redishealthcheckinterval"` } type WorkersConfig struct { - NumWorkers int `mapstructure:"NumWorkers"` - UploadQueueSize int `mapstructure:"UploadQueueSize"` + NumWorkers int `mapstructure:"numworkers"` + UploadQueueSize int `mapstructure:"uploadqueuesize"` } type FileConfig struct { - FileRevision int `mapstructure:"FileRevision"` } +type BuildConfig struct { + Version string `mapstructure:"version"` // Updated version +} + +// This is the main Config struct to be used type Config struct { - Server ServerConfig `mapstructure:"server"` - Timeouts TimeoutConfig `mapstructure:"timeouts"` - Security SecurityConfig `mapstructure:"security"` - Versioning VersioningConfig `mapstructure:"versioning"` - Uploads UploadsConfig `mapstructure:"uploads"` - ClamAV ClamAVConfig `mapstructure:"clamav"` - Redis RedisConfig `mapstructure:"redis"` - Workers WorkersConfig `mapstructure:"workers"` - File FileConfig `mapstructure:"file"` + Server ServerConfig `mapstructure:"server"` + Logging LoggingConfig `mapstructure:"logging"` + Deduplication DeduplicationConfig `mapstructure:"deduplication"` // Added + ISO ISOConfig `mapstructure:"iso"` // Added + Timeouts TimeoutConfig `mapstructure:"timeouts"` // Added + Security SecurityConfig `mapstructure:"security"` + Versioning VersioningConfig `mapstructure:"versioning"` // Added + Uploads UploadsConfig `mapstructure:"uploads"` + Downloads DownloadsConfig `mapstructure:"downloads"` + ClamAV ClamAVConfig `mapstructure:"clamav"` + Redis RedisConfig `mapstructure:"redis"` + Workers WorkersConfig `mapstructure:"workers"` + File FileConfig `mapstructure:"file"` + Build BuildConfig `mapstructure:"build"` } -// UploadTask represents a file upload task type UploadTask struct { AbsFilename string Request *http.Request Result chan error } -// ScanTask represents a file scan task type ScanTask struct { AbsFilename string Result chan error } -// NetworkEvent represents a network-related event type NetworkEvent struct { Type string Details string } -var ( - conf Config - versionString string = "v2.0-dev" - log = logrus.New() - uploadQueue chan UploadTask - networkEvents chan NetworkEvent - fileInfoCache *cache.Cache - clamClient *clamd.Clamd // Added for ClamAV integration - redisClient *redis.Client // Redis client - redisConnected bool // Redis connection status - mu sync.RWMutex +// Add a new field to store the creation date of files +type FileMetadata struct { + CreationDate time.Time +} + +// processScan processes a scan task +func processScan(task ScanTask) error { + log.Infof("Started processing scan for file: %s", task.AbsFilename) + semaphore <- struct{}{} + defer func() { <-semaphore }() + + err := scanFileWithClamAV(task.AbsFilename) + if err != nil { + log.WithFields(logrus.Fields{"file": task.AbsFilename, "error": err}).Error("Failed to scan file") + return err + } + + log.Infof("Finished processing scan for file: %s", task.AbsFilename) + return nil +} + +var ( + conf Config + versionString string + log = logrus.New() + fileInfoCache *cache.Cache + fileMetadataCache *cache.Cache + clamClient *clamd.Clamd + redisClient *redis.Client + redisConnected bool + confMutex sync.RWMutex // Protects the global 'conf' variable and related critical sections. + // Use RLock() for reading, Lock() for writing. - // Prometheus metrics uploadDuration prometheus.Histogram uploadErrorsTotal prometheus.Counter uploadsTotal prometheus.Counter @@ -156,1768 +301,1692 @@ var ( uploadSizeBytes prometheus.Histogram downloadSizeBytes prometheus.Histogram - // Constants for worker pool - MinWorkers = 5 // Increased from 10 to 20 for better concurrency - UploadQueueSize = 10000 // Increased from 5000 to 10000 + filesDeduplicatedTotal prometheus.Counter + deduplicationErrorsTotal prometheus.Counter + isoContainersCreatedTotal prometheus.Counter + isoCreationErrorsTotal prometheus.Counter + isoContainersMountedTotal prometheus.Counter + isoMountErrorsTotal prometheus.Counter - // Channels - scanQueue chan ScanTask - ScanWorkers = 5 // Number of ClamAV scan workers + workerPool *WorkerPool + networkEvents chan NetworkEvent + + workerAdjustmentsTotal prometheus.Counter + workerReAdjustmentsTotal prometheus.Counter ) -func main() { - // Set default configuration values - setDefaults() +var bufferPool = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 32*1024) + return &buf + }, +} + +const maxConcurrentOperations = 10 + +var semaphore = make(chan struct{}, maxConcurrentOperations) + +var logMessages []string +var logMu sync.Mutex + +func flushLogMessages() { + logMu.Lock() + defer logMu.Unlock() + for _, msg := range logMessages { + log.Info(msg) + } + logMessages = []string{} +} + +// writePIDFile writes the current process ID to the specified pid file +func writePIDFile(pidPath string) error { + pid := os.Getpid() + pidStr := strconv.Itoa(pid) + err := os.WriteFile(pidPath, []byte(pidStr), 0644) + if err != nil { + log.Errorf("Failed to write PID file: %v", err) // Improved error logging + return err + } + log.Infof("PID %d written to %s", pid, pidPath) + return nil +} + +// removePIDFile removes the PID file +func removePIDFile(pidPath string) { + err := os.Remove(pidPath) + if err != nil { + log.Errorf("Failed to remove PID file: %v", err) // Improved error logging + } else { + log.Infof("PID file %s removed successfully", pidPath) + } +} + +// createAndMountISO creates an ISO container and mounts it to the specified mount point +func createAndMountISO(size, mountpoint, charset string) error { + isoPath := conf.ISO.ContainerFile + + // Create an empty ISO file + cmd := exec.Command("dd", "if=/dev/zero", fmt.Sprintf("of=%s", isoPath), fmt.Sprintf("bs=%s", size), "count=1") + if err := cmd.Run(); err != nil { + isoCreationErrorsTotal.Inc() + return fmt.Errorf("failed to create ISO file: %w", err) + } + + // Format the ISO file with a filesystem + cmd = exec.Command("mkfs", "-t", "iso9660", "-input-charset", charset, isoPath) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to format ISO file: %w", err) + } + + // Create the mount point directory if it doesn't exist + if err := os.MkdirAll(mountpoint, os.ModePerm); err != nil { + return fmt.Errorf("failed to create mount point: %w", err) + } + + // Mount the ISO file + cmd = exec.Command("mount", "-o", "loop", isoPath, mountpoint) + if err := cmd.Run(); err != nil { + isoMountErrorsTotal.Inc() + return fmt.Errorf("failed to mount ISO file: %w", err) + } + + isoContainersCreatedTotal.Inc() + isoContainersMountedTotal.Inc() + return nil +} + +func initializeNetworkProtocol(forceProtocol string) (*net.Dialer, error) { + switch forceProtocol { + case "ipv4": + return &net.Dialer{ + Timeout: 5 * time.Second, + DualStack: false, + Control: func(network, address string, c syscall.RawConn) error { + if network == "tcp6" { + return fmt.Errorf("IPv6 is disabled by forceprotocol setting") + } + return nil + }, + }, nil + case "ipv6": + return &net.Dialer{ + Timeout: 5 * time.Second, + DualStack: false, + Control: func(network, address string, c syscall.RawConn) error { + if network == "tcp4" { + return fmt.Errorf("IPv4 is disabled by forceprotocol setting") + } + return nil + }, + }, nil + case "auto": + return &net.Dialer{ + Timeout: 5 * time.Second, + DualStack: true, + }, nil + default: + return nil, fmt.Errorf("invalid forceprotocol value: %s", forceProtocol) + } +} + +var dualStackClient *http.Client + +func main() { + setDefaults() // Call setDefaults before parsing flags or reading config - // Flags for configuration file var configFile string flag.StringVar(&configFile, "config", "./config.toml", "Path to configuration file \"config.toml\".") + var genConfig bool + var genConfigPath string + var validateOnly bool + var runConfigTests bool + var validateQuiet bool + var validateVerbose bool + var validateFixable bool + var validateSecurity bool + var validatePerformance bool + var validateConnectivity bool + var listValidationChecks bool + var showVersion bool + + flag.BoolVar(&genConfig, "genconfig", false, "Print example configuration and exit.") + flag.StringVar(&genConfigPath, "genconfig-path", "", "Write example configuration to the given file and exit.") + flag.BoolVar(&validateOnly, "validate-config", false, "Validate configuration and exit without starting server.") + flag.BoolVar(&runConfigTests, "test-config", false, "Run configuration validation test scenarios and exit.") + flag.BoolVar(&validateQuiet, "validate-quiet", false, "Only show errors during validation (suppress warnings and info).") + flag.BoolVar(&validateVerbose, "validate-verbose", false, "Show detailed validation information including system checks.") + flag.BoolVar(&validateFixable, "check-fixable", false, "Only show validation issues that can be automatically fixed.") + flag.BoolVar(&validateSecurity, "check-security", false, "Run only security-related validation checks.") + flag.BoolVar(&validatePerformance, "check-performance", false, "Run only performance-related validation checks.") + flag.BoolVar(&validateConnectivity, "check-connectivity", false, "Run only network connectivity validation checks.") + flag.BoolVar(&listValidationChecks, "list-checks", false, "List all available validation checks and exit.") + flag.BoolVar(&showVersion, "version", false, "Show version information and exit.") flag.Parse() - // Load configuration + if showVersion { + fmt.Printf("HMAC File Server v3.2\n") + os.Exit(0) + } + + if listValidationChecks { + printValidationChecks() + os.Exit(0) + } + + if genConfig { + printExampleConfig() + os.Exit(0) + } + if genConfigPath != "" { + f, err := os.Create(genConfigPath) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create file: %v\n", err) + os.Exit(1) + } + defer f.Close() + w := bufio.NewWriter(f) + fmt.Fprint(w, getExampleConfigString()) + w.Flush() + fmt.Printf("Example config written to %s\n", genConfigPath) + os.Exit(0) + } + if runConfigTests { + RunConfigTests() + os.Exit(0) + } + + // Initialize Viper + viper.SetConfigType("toml") + + // Set default config path + defaultConfigPath := "/etc/hmac-file-server/config.toml" + + // Attempt to load the default config + viper.SetConfigFile(defaultConfigPath) + if err := viper.ReadInConfig(); err != nil { + // If default config not found, fallback to parent directory + parentDirConfig := "../config.toml" + viper.SetConfigFile(parentDirConfig) + if err := viper.ReadInConfig(); err != nil { + // If still not found and -config is provided, use it + if configFile != "" { + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + fmt.Printf("Error loading config file: %v\n", err) + os.Exit(1) + } + } else { + fmt.Println("No configuration file found. Please create a config file with the following content:") + printExampleConfig() + os.Exit(1) + } + } + } + err := readConfig(configFile, &conf) if err != nil { - log.Fatalf("Error reading config: %v", err) // Fatal: application cannot proceed + log.Fatalf("Failed to load configuration: %v\nPlease ensure your config.toml is present at one of the following paths:\n%v", err, []string{ + "/etc/hmac-file-server/config.toml", + "../config.toml", + "./config.toml", + }) } log.Info("Configuration loaded successfully.") - // Initialize file info cache - fileInfoCache = cache.New(5*time.Minute, 10*time.Minute) - - // Create store directory - err = os.MkdirAll(conf.Server.StoragePath, os.ModePerm) + err = validateConfig(&conf) if err != nil { - log.Fatalf("Error creating store directory: %v", err) + log.Fatalf("Configuration validation failed: %v", err) } - log.WithField("directory", conf.Server.StoragePath).Info("Store directory is ready") + log.Info("Configuration validated successfully.") + + // Perform comprehensive configuration validation + validationResult := ValidateConfigComprehensive(&conf) + PrintValidationResults(validationResult) + + if validationResult.HasErrors() { + log.Fatal("Cannot start server due to configuration errors. Please fix the above issues and try again.") + } + + // Handle specialized validation flags + if validateSecurity || validatePerformance || validateConnectivity || validateQuiet || validateVerbose || validateFixable { + runSpecializedValidation(&conf, validateSecurity, validatePerformance, validateConnectivity, validateQuiet, validateVerbose, validateFixable) + os.Exit(0) + } + + // If only validation was requested, exit now + if validateOnly { + if validationResult.HasErrors() { + log.Error("Configuration validation failed with errors. Review the errors above.") + os.Exit(1) + } else if validationResult.HasWarnings() { + log.Info("Configuration is valid but has warnings. Review the warnings above.") + os.Exit(0) + } else { + log.Info("Configuration validation completed successfully!") + os.Exit(0) + } + } + + // Set log level based on configuration + level, err := logrus.ParseLevel(conf.Logging.Level) + if err != nil { + log.Warnf("Invalid log level '%s', defaulting to 'info'", conf.Logging.Level) + level = logrus.InfoLevel + } + log.SetLevel(level) + log.Infof("Log level set to: %s", level.String()) + + // Log configuration settings using [logging] section + log.Infof("Server ListenAddress: %s", conf.Server.ListenAddress) // Corrected field name + log.Infof("Server UnixSocket: %v", conf.Server.UnixSocket) + log.Infof("Server StoragePath: %s", conf.Server.StoragePath) + log.Infof("Logging Level: %s", conf.Logging.Level) + log.Infof("Logging File: %s", conf.Logging.File) + log.Infof("Server MetricsEnabled: %v", conf.Server.MetricsEnabled) + log.Infof("Server MetricsPort: %s", conf.Server.MetricsPort) // Corrected field name + log.Infof("Server FileTTL: %s", conf.Server.FileTTL) // Corrected field name + log.Infof("Server MinFreeBytes: %s", conf.Server.MinFreeBytes) + log.Infof("Server AutoAdjustWorkers: %v", conf.Server.AutoAdjustWorkers) // Corrected field name + log.Infof("Server NetworkEvents: %v", conf.Server.NetworkEvents) // Corrected field name + log.Infof("Server PIDFilePath: %s", conf.Server.PIDFilePath) // Corrected field name + log.Infof("Server CleanUponExit: %v", conf.Server.CleanUponExit) // Corrected field name + log.Infof("Server PreCaching: %v", conf.Server.PreCaching) // Corrected field name + log.Infof("Server FileTTLEnabled: %v", conf.Server.FileTTLEnabled) // Corrected field name + log.Infof("Server DeduplicationEnabled: %v", conf.Server.DeduplicationEnabled) + log.Infof("Server BindIP: %s", conf.Server.BindIP) // Corrected field name + log.Infof("Server FileNaming: %s", conf.Server.FileNaming) + log.Infof("Server ForceProtocol: %s", conf.Server.ForceProtocol) + + err = writePIDFile(conf.Server.PIDFilePath) // Corrected field name + if err != nil { + log.Fatalf("Error writing PID file: %v", err) + } + log.Debug("DEBUG: PID file written successfully") + + log.Debugf("DEBUG: Config logging file: %s", conf.Logging.File) - // Setup logging setupLogging() + log.Debug("DEBUG: Logging setup completed") - // Log system information logSystemInfo() + log.Debug("DEBUG: System info logged") - // Initialize Prometheus metrics + // Initialize metrics before using any Prometheus counters initMetrics() - log.Info("Prometheus metrics initialized.") + log.Debug("DEBUG: Metrics initialized") - // Initialize upload and scan queues - uploadQueue = make(chan UploadTask, conf.Workers.UploadQueueSize) - scanQueue = make(chan ScanTask, conf.Workers.UploadQueueSize) - networkEvents = make(chan NetworkEvent, 100) - log.Info("Upload, scan, and network event channels initialized.") + initializeWorkerSettings(&conf.Server, &conf.Workers, &conf.ClamAV) + log.Debug("DEBUG: Worker settings initialized") - // Context for goroutines - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Start network monitoring - go monitorNetwork(ctx) - go handleNetworkEvents(ctx) - - // Update system metrics - go updateSystemMetrics(ctx) - - // Initialize ClamAV client if enabled - if conf.ClamAV.ClamAVEnabled { - clamClient, err = initClamAV(conf.ClamAV.ClamAVSocket) + if conf.ISO.Enabled { + err := createAndMountISO(conf.ISO.Size, conf.ISO.MountPoint, conf.ISO.Charset) if err != nil { - log.WithFields(logrus.Fields{ - "error": err.Error(), - }).Warn("ClamAV client initialization failed. Continuing without ClamAV.") - } else { - log.Info("ClamAV client initialized successfully.") + log.Fatalf("Failed to create and mount ISO container: %v", err) } + log.Infof("ISO container mounted at %s", conf.ISO.MountPoint) } - // Initialize Redis client if enabled - if conf.Redis.RedisEnabled { - initRedis() + // Set storage path to ISO mount point if ISO is enabled + storagePath := conf.Server.StoragePath + if conf.ISO.Enabled { + storagePath = conf.ISO.MountPoint } - // Redis Initialization - initRedis() - log.Info("Redis client initialized and connected successfully.") + fileInfoCache = cache.New(5*time.Minute, 10*time.Minute) + fileMetadataCache = cache.New(5*time.Minute, 10*time.Minute) - // ClamAV Initialization - if conf.ClamAV.ClamAVEnabled { - clamClient, err = initClamAV(conf.ClamAV.ClamAVSocket) - if err != nil { - log.WithFields(logrus.Fields{ - "error": err.Error(), - }).Warn("ClamAV client initialization failed. Continuing without ClamAV.") - } else { - log.Info("ClamAV client initialized successfully.") - } - } - - // Initialize worker pools - initializeUploadWorkerPool(ctx) - if conf.ClamAV.ClamAVEnabled && clamClient != nil { - initializeScanWorkerPool(ctx) - } - - // Start Redis health monitor if Redis is enabled - if conf.Redis.RedisEnabled && redisClient != nil { - go MonitorRedisHealth(ctx, redisClient, parseDuration(conf.Redis.RedisHealthCheckInterval)) - } - - // Setup router - router := setupRouter() - - // Start file cleaner - fileTTL, err := time.ParseDuration(conf.Server.FileTTL) - if err != nil { - log.Fatalf("Invalid FileTTL: %v", err) - } - go runFileCleaner(ctx, conf.Server.StoragePath, fileTTL) - - // Parse timeout durations - readTimeout, err := time.ParseDuration(conf.Timeouts.ReadTimeout) - if err != nil { - log.Fatalf("Invalid ReadTimeout: %v", err) - } - - writeTimeout, err := time.ParseDuration(conf.Timeouts.WriteTimeout) - if err != nil { - log.Fatalf("Invalid WriteTimeout: %v", err) - } - - idleTimeout, err := time.ParseDuration(conf.Timeouts.IdleTimeout) - if err != nil { - log.Fatalf("Invalid IdleTimeout: %v", err) - } - - // Configure HTTP server - server := &http.Server{ - Addr: ":" + conf.Server.ListenPort, // Prepend colon to ListenPort - Handler: router, - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - } - - // Start metrics server if enabled - if conf.Server.MetricsEnabled { + if conf.Server.PreCaching { // Corrected field name go func() { - http.Handle("/metrics", promhttp.Handler()) - log.Infof("Metrics server started on port %s", conf.Server.MetricsPort) - if err := http.ListenAndServe(":"+conf.Server.MetricsPort, nil); err != nil { - log.Fatalf("Metrics server failed: %v", err) + log.Info("Starting pre-caching of storage path...") + // Use helper function + err := precacheStoragePath(storagePath) + if err != nil { + log.Warnf("Pre-caching storage path failed: %v", err) + } else { + log.Info("Pre-cached all files in the storage path.") + log.Info("Pre-caching status: complete.") } }() } - // Setup graceful shutdown - setupGracefulShutdown(server, cancel) + err = os.MkdirAll(storagePath, os.ModePerm) + if err != nil { + log.Fatalf("Error creating store directory: %v", err) + } + log.WithField("directory", storagePath).Info("Store directory is ready") + + // Use helper function + err = checkFreeSpaceWithRetry(storagePath, 3, 5*time.Second) + if err != nil { + log.Fatalf("Insufficient free space: %v", err) + } + + initializeWorkerSettings(&conf.Server, &conf.Workers, &conf.ClamAV) + log.Info("Prometheus metrics initialized.") + + networkEvents = make(chan NetworkEvent, 100) + log.Info("Network event channel initialized.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if conf.Server.NetworkEvents { // Corrected field name + go monitorNetwork(ctx) // Assuming monitorNetwork is defined in helpers.go or elsewhere + go handleNetworkEvents(ctx) // Assuming handleNetworkEvents is defined in helpers.go or elsewhere + } + go updateSystemMetrics(ctx) + + if conf.ClamAV.ClamAVEnabled { + var clamErr error + clamClient, clamErr = initClamAV(conf.ClamAV.ClamAVSocket) // Assuming initClamAV is defined in helpers.go or elsewhere + if clamErr != nil { + log.WithError(clamErr).Warn("ClamAV client initialization failed. Continuing without ClamAV.") + } else { + log.Info("ClamAV client initialized successfully.") + } + } + + if conf.Redis.RedisEnabled { + initRedis() // Assuming initRedis is defined in helpers.go or elsewhere + } + + router := setupRouter() // Assuming setupRouter is defined (likely in this file or router.go + + go handleFileCleanup(&conf) // Directly call handleFileCleanup + + readTimeout, err := time.ParseDuration(conf.Timeouts.Read) // Corrected field name + if err != nil { + log.Fatalf("Invalid ReadTimeout: %v", err) + } + + writeTimeout, err := time.ParseDuration(conf.Timeouts.Write) // Corrected field name + if err != nil { + log.Fatalf("Invalid WriteTimeout: %v", err) + } + + idleTimeout, err := time.ParseDuration(conf.Timeouts.Idle) // Corrected field name + if err != nil { + log.Fatalf("Invalid IdleTimeout: %v", err) + } + + // Initialize network protocol based on forceprotocol setting + dialer, err := initializeNetworkProtocol(conf.Server.ForceProtocol) + if err != nil { + log.Fatalf("Failed to initialize network protocol: %v", err) + } + // Enhanced dual-stack HTTP client for robust IPv4/IPv6 and resource management + // See: https://pkg.go.dev/net/http#Transport for details on these settings + dualStackClient = &http.Client{ + Transport: &http.Transport{ + DialContext: dialer.DialContext, + IdleConnTimeout: 90 * time.Second, // Close idle connections after 90s + MaxIdleConns: 100, // Max idle connections across all hosts + MaxIdleConnsPerHost: 10, // Max idle connections per host + TLSHandshakeTimeout: 10 * time.Second, // Timeout for TLS handshake + ResponseHeaderTimeout: 15 * time.Second, // Timeout for reading response headers + }, + } + + server := &http.Server{ + Addr: conf.Server.BindIP + ":" + conf.Server.ListenAddress, // Use BindIP + ListenAddress (port) + Handler: router, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + IdleTimeout: idleTimeout, + MaxHeaderBytes: 1 << 20, // 1 MB + } + + if conf.Server.MetricsEnabled { + var wg sync.WaitGroup + go func() { + http.Handle("/metrics", promhttp.Handler()) + log.Infof("Metrics server started on port %s", conf.Server.MetricsPort) // Corrected field name + if err := http.ListenAndServe(":"+conf.Server.MetricsPort, nil); err != nil { // Corrected field name + log.Fatalf("Metrics server failed: %v", err) + } + wg.Wait() + }() + } + + setupGracefulShutdown(server, cancel) // Assuming setupGracefulShutdown is defined + + if conf.Server.AutoAdjustWorkers { // Corrected field name + go monitorWorkerPerformance(ctx, &conf.Server, &conf.Workers, &conf.ClamAV) + } + + versionString = "3.2" // Set a default version for now + if conf.Build.Version != "" { + versionString = conf.Build.Version + } + log.Infof("Running version: %s", versionString) - // Start server log.Infof("Starting HMAC file server %s...", versionString) if conf.Server.UnixSocket { - // Listen on Unix socket - if err := os.RemoveAll(conf.Server.ListenPort); err != nil { + socketPath := "/tmp/hmac-file-server.sock" // Use a default socket path since ListenAddress is now a port + if err := os.RemoveAll(socketPath); err != nil { log.Fatalf("Failed to remove existing Unix socket: %v", err) } - listener, err := net.Listen("unix", conf.Server.ListenPort) + listener, err := net.Listen("unix", socketPath) if err != nil { - log.Fatalf("Failed to listen on Unix socket %s: %v", conf.Server.ListenPort, err) + log.Fatalf("Failed to listen on Unix socket %s: %v", socketPath, err) } defer listener.Close() + log.Infof("Server listening on Unix socket: %s", socketPath) if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { log.Fatalf("Server failed: %v", err) } } else { - // Listen on TCP port + if conf.Server.BindIP == "0.0.0.0" { + log.Info("Binding to 0.0.0.0. Any net/http logs you see are normal for this universal address.") + } + log.Infof("Server listening on %s", server.Addr) if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Fatalf("Server failed: %v", err) } } + + // Start file cleanup in a separate goroutine + // Use helper function + go handleFileCleanup(&conf) } -// Function to load configuration using Viper -func readConfig(configFilename string, conf *Config) error { - viper.SetConfigFile(configFilename) - viper.SetConfigType("toml") +func printExampleConfig() { + fmt.Print(` +[server] +bind_ip = "0.0.0.0" +listenport = "8080" +unixsocket = false +storagepath = "./uploads" +logfile = "/var/log/hmac-file-server.log" +metricsenabled = true +metricsport = "9090" +minfreebytes = "100MB" +filettl = "8760h" +filettlenabled = true +autoadjustworkers = true +networkevents = true +pidfilepath = "/var/run/hmacfileserver.pid" +cleanuponexit = true +precaching = true +deduplicationenabled = true +globalextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] +# FileNaming options: "HMAC", "None" +filenaming = "HMAC" +forceprotocol = "auto" - // Read in environment variables that match - viper.AutomaticEnv() - viper.SetEnvPrefix("HMAC") // Prefix for environment variables +[logging] +level = "info" +file = "/var/log/hmac-file-server.log" +max_size = 100 +max_backups = 7 +max_age = 30 +compress = true - // Read the config file - if err := viper.ReadInConfig(); err != nil { - return fmt.Errorf("error reading config file: %w", err) - } +[deduplication] +enabled = true +directory = "./deduplication" - // Unmarshal the config into the Config struct - if err := viper.Unmarshal(conf); err != nil { - return fmt.Errorf("unable to decode into struct: %w", err) - } +[iso] +enabled = true +size = "1GB" +mountpoint = "/mnt/iso" +charset = "utf-8" +containerfile = "/mnt/iso/container.iso" - // Debug log the loaded configuration - log.Debugf("Loaded Configuration: %+v", conf.Server) +[timeouts] +readtimeout = "4800s" +writetimeout = "4800s" +idletimeout = "4800s" - // Validate the configuration - if err := validateConfig(conf); err != nil { - return fmt.Errorf("configuration validation failed: %w", err) - } +[security] +secret = "changeme" +enablejwt = false +jwtsecret = "anothersecretkey" +jwtalgorithm = "HS256" +jwtexpiration = "24h" - // Set Deduplication Enabled - conf.Server.DeduplicationEnabled = viper.GetBool("deduplication.Enabled") +[versioning] +enableversioning = false +maxversions = 1 - return nil +[uploads] +resumableuploadsenabled = true +chunkeduploadsenabled = true +chunksize = "8192" +allowedextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[downloads] +resumabledownloadsenabled = true +chunkeddownloadsenabled = true +chunksize = "8192" +allowedextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[clamav] +clamavenabled = true +clamavsocket = "/var/run/clamav/clamd.ctl" +numscanworkers = 2 +scanfileextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[redis] +redisenabled = true +redisdbindex = 0 +redisaddr = "localhost:6379" +redispassword = "" +redishealthcheckinterval = "120s" + +[workers] +numworkers = 4 +uploadqueuesize = 50 + +[file] +# Add file-specific configurations here + +[build] +version = "3.2" +`) } -// Set default configuration values -func setDefaults() { - // Server defaults - viper.SetDefault("server.ListenPort", "8080") - viper.SetDefault("server.UnixSocket", false) - viper.SetDefault("server.StoragePath", "./uploads") - viper.SetDefault("server.LogLevel", "info") - viper.SetDefault("server.LogFile", "") - viper.SetDefault("server.MetricsEnabled", true) - viper.SetDefault("server.MetricsPort", "9090") - viper.SetDefault("server.FileTTL", "8760h") // 365d -> 8760h - viper.SetDefault("server.MinFreeBytes", 100<<20) // 100 MB +func getExampleConfigString() string { + return `[server] +listen_address = ":8080" +storage_path = "/srv/hmac-file-server/uploads" +metrics_enabled = true +metrics_path = "/metrics" +pid_file = "/var/run/hmac-file-server.pid" +max_upload_size = "10GB" # Supports B, KB, MB, GB, TB +max_header_bytes = 1048576 # 1MB +cleanup_interval = "24h" +max_file_age = "720h" # 30 days +pre_cache = true +pre_cache_workers = 4 +pre_cache_interval = "1h" +global_extensions = [".txt", ".dat", ".iso"] # If set, overrides upload/download extensions +deduplication_enabled = true +min_free_bytes = "1GB" # Minimum free space required for uploads +file_naming = "original" # Options: "original", "HMAC" +force_protocol = "" # Options: "http", "https" - if set, redirects to this protocol +enable_dynamic_workers = true # Enable dynamic worker scaling +worker_scale_up_thresh = 50 # Queue length to scale up workers +worker_scale_down_thresh = 10 # Queue length to scale down workers - // Timeout defaults - viper.SetDefault("timeouts.ReadTimeout", "4800s") // supports 's' - viper.SetDefault("timeouts.WriteTimeout", "4800s") - viper.SetDefault("timeouts.IdleTimeout", "4800s") +[uploads] +allowed_extensions = [".zip", ".rar", ".7z", ".tar.gz", ".tgz", ".gpg", ".enc", ".pgp"] +chunked_uploads_enabled = true +chunk_size = "10MB" +resumable_uploads_enabled = true +max_resumable_age = "48h" - // Security defaults - viper.SetDefault("security.Secret", "changeme") +[downloads] +allowed_extensions = [".zip", ".rar", ".7z", ".tar.gz", ".tgz", ".gpg", ".enc", ".pgp"] +chunked_downloads_enabled = true +chunk_size = "10MB" +resumable_downloads_enabled = true - // Versioning defaults - viper.SetDefault("versioning.EnableVersioning", false) - viper.SetDefault("versioning.MaxVersions", 1) +[security] +secret = "your-very-secret-hmac-key" +enablejwt = false +jwtsecret = "anothersecretkey" +jwtalgorithm = "HS256" +jwtexpiration = "24h" - // Uploads defaults - viper.SetDefault("uploads.ResumableUploadsEnabled", true) - viper.SetDefault("uploads.ChunkedUploadsEnabled", true) - viper.SetDefault("uploads.ChunkSize", 8192) - viper.SetDefault("uploads.AllowedExtensions", []string{ - ".txt", ".pdf", - ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp", - ".wav", ".mp4", ".avi", ".mkv", ".mov", ".wmv", ".flv", ".webm", ".mpeg", ".mpg", ".m4v", ".3gp", ".3g2", - ".mp3", ".ogg", - }) +[logging] +level = "info" +file = "/var/log/hmac-file-server.log" +max_size = 100 +max_backups = 7 +max_age = 30 +compress = true - // ClamAV defaults - viper.SetDefault("clamav.ClamAVEnabled", true) - viper.SetDefault("clamav.ClamAVSocket", "/var/run/clamav/clamd.ctl") - viper.SetDefault("clamav.NumScanWorkers", 2) +[deduplication] +enabled = true +directory = "./deduplication" - // Redis defaults - viper.SetDefault("redis.RedisEnabled", true) - viper.SetDefault("redis.RedisAddr", "localhost:6379") - viper.SetDefault("redis.RedisPassword", "") - viper.SetDefault("redis.RedisDBIndex", 0) - viper.SetDefault("redis.RedisHealthCheckInterval", "120s") +[iso] +enabled = true +size = "1GB" +mountpoint = "/mnt/iso" +charset = "utf-8" +containerfile = "/mnt/iso/container.iso" - // Workers defaults - viper.SetDefault("workers.NumWorkers", 2) - viper.SetDefault("workers.UploadQueueSize", 50) +[timeouts] +readtimeout = "4800s" +writetimeout = "4800s" +idletimeout = "4800s" - // Deduplication defaults - viper.SetDefault("deduplication.Enabled", true) +[security] +secret = "changeme" +enablejwt = false +jwtsecret = "anothersecretkey" +jwtalgorithm = "HS256" +jwtexpiration = "24h" + +[versioning] +enableversioning = false +maxversions = 1 + +[uploads] +resumableuploadsenabled = true +chunkeduploadsenabled = true +chunksize = "8192" +allowedextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[downloads] +resumabledownloadsenabled = true +chunkeddownloadsenabled = true +chunksize = "8192" +allowedextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[clamav] +clamavenabled = true +clamavsocket = "/var/run/clamav/clamd.ctl" +numscanworkers = 2 +scanfileextensions = [".txt", ".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"] + +[redis] +redisenabled = true +redisdbindex = 0 +redisaddr = "localhost:6379" +redispassword = "" +redishealthcheckinterval = "120s" + +[workers] +numworkers = 4 +uploadqueuesize = 50 + +[file] +# Add file-specific configurations here + +[build] +version = "3.2" +` } -// Validate configuration fields -func validateConfig(conf *Config) error { - if conf.Server.ListenPort == "" { - return fmt.Errorf("ListenPort must be set") +func max(a, b int) int { + if a > b { + return a } - if conf.Security.Secret == "" { - return fmt.Errorf("secret must be set") - } - if conf.Server.StoragePath == "" { - return fmt.Errorf("StoragePath must be set") - } - if conf.Server.FileTTL == "" { - return fmt.Errorf("FileTTL must be set") - } - - // Validate timeouts - if _, err := time.ParseDuration(conf.Timeouts.ReadTimeout); err != nil { - return fmt.Errorf("invalid ReadTimeout: %v", err) - } - if _, err := time.ParseDuration(conf.Timeouts.WriteTimeout); err != nil { - return fmt.Errorf("invalid WriteTimeout: %v", err) - } - if _, err := time.ParseDuration(conf.Timeouts.IdleTimeout); err != nil { - return fmt.Errorf("invalid IdleTimeout: %v", err) - } - - // Validate Redis configuration if enabled - if conf.Redis.RedisEnabled { - if conf.Redis.RedisAddr == "" { - return fmt.Errorf("RedisAddr must be set when Redis is enabled") - } - } - - // Add more validations as needed - - return nil + return b } -// Setup logging -func setupLogging() { - level, err := logrus.ParseLevel(conf.Server.LogLevel) - if err != nil { - log.Fatalf("Invalid log level: %s", conf.Server.LogLevel) - } - log.SetLevel(level) - - if conf.Server.LogFile != "" { - logFile, err := os.OpenFile(conf.Server.LogFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - log.Fatalf("Failed to open log file: %v", err) - } - log.SetOutput(io.MultiWriter(os.Stdout, logFile)) - } else { - log.SetOutput(os.Stdout) - } - - // Use Text formatter for human-readable logs - log.SetFormatter(&logrus.TextFormatter{ - FullTimestamp: true, - // You can customize the format further if needed - }) -} - -// Log system information -func logSystemInfo() { - log.Info("========================================") - log.Infof(" HMAC File Server - %s ", versionString) - log.Info(" Secure File Handling with HMAC Auth ") - log.Info("========================================") - - log.Info("Features: Prometheus Metrics, Chunked Uploads, ClamAV Scanning") - log.Info("Build Date: 2024-10-28") - - log.Infof("Operating System: %s", runtime.GOOS) - log.Infof("Architecture: %s", runtime.GOARCH) - log.Infof("Number of CPUs: %d", runtime.NumCPU()) - log.Infof("Go Version: %s", runtime.Version()) - +func autoAdjustWorkers() (int, int) { v, _ := mem.VirtualMemory() - log.Infof("Total Memory: %v MB", v.Total/1024/1024) - log.Infof("Free Memory: %v MB", v.Free/1024/1024) - log.Infof("Used Memory: %v MB", v.Used/1024/1024) + cpuCores, _ := cpu.Counts(true) - cpuInfo, _ := cpu.Info() - for _, info := range cpuInfo { - log.Infof("CPU Model: %s, Cores: %d, Mhz: %f", info.ModelName, info.Cores, info.Mhz) + numWorkers := cpuCores * 2 + if v.Available < 4*1024*1024*1024 { // Less than 4GB available + numWorkers = max(numWorkers/2, 1) + } else if v.Available < 8*1024*1024*1024 { // Less than 8GB available + numWorkers = max(numWorkers*3/4, 1) } + queueSize := numWorkers * 10 - partitions, _ := disk.Partitions(false) - for _, partition := range partitions { - usage, _ := disk.Usage(partition.Mountpoint) - log.Infof("Disk Mountpoint: %s, Total: %v GB, Free: %v GB, Used: %v GB", - partition.Mountpoint, usage.Total/1024/1024/1024, usage.Free/1024/1024/1024, usage.Used/1024/1024/1024) - } - - hInfo, _ := host.Info() - log.Infof("Hostname: %s", hInfo.Hostname) - log.Infof("Uptime: %v seconds", hInfo.Uptime) - log.Infof("Boot Time: %v", time.Unix(int64(hInfo.BootTime), 0)) - log.Infof("Platform: %s", hInfo.Platform) - log.Infof("Platform Family: %s", hInfo.PlatformFamily) - log.Infof("Platform Version: %s", hInfo.PlatformVersion) - log.Infof("Kernel Version: %s", hInfo.KernelVersion) + log.Infof("Auto-adjusting workers: NumWorkers=%d, UploadQueueSize=%d", numWorkers, queueSize) + workerAdjustmentsTotal.Inc() + return numWorkers, queueSize } -// Initialize Prometheus metrics -// Duplicate initMetrics function removed -func initMetrics() { - uploadDuration = prometheus.NewHistogram(prometheus.HistogramOpts{Namespace: "hmac", Name: "file_server_upload_duration_seconds", Help: "Histogram of file upload duration in seconds.", Buckets: prometheus.DefBuckets}) - uploadErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{Namespace: "hmac", Name: "file_server_upload_errors_total", Help: "Total number of file upload errors."}) - uploadsTotal = prometheus.NewCounter(prometheus.CounterOpts{Namespace: "hmac", Name: "file_server_uploads_total", Help: "Total number of successful file uploads."}) - downloadDuration = prometheus.NewHistogram(prometheus.HistogramOpts{Namespace: "hmac", Name: "file_server_download_duration_seconds", Help: "Histogram of file download duration in seconds.", Buckets: prometheus.DefBuckets}) - downloadsTotal = prometheus.NewCounter(prometheus.CounterOpts{Namespace: "hmac", Name: "file_server_downloads_total", Help: "Total number of successful file downloads."}) - downloadErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{Namespace: "hmac", Name: "file_server_download_errors_total", Help: "Total number of file download errors."}) - memoryUsage = prometheus.NewGauge(prometheus.GaugeOpts{Namespace: "hmac", Name: "memory_usage_bytes", Help: "Current memory usage in bytes."}) - cpuUsage = prometheus.NewGauge(prometheus.GaugeOpts{Namespace: "hmac", Name: "cpu_usage_percent", Help: "Current CPU usage as a percentage."}) - activeConnections = prometheus.NewGauge(prometheus.GaugeOpts{Namespace: "hmac", Name: "active_connections_total", Help: "Total number of active connections."}) - requestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{Namespace: "hmac", Name: "http_requests_total", Help: "Total number of HTTP requests received, labeled by method and path."}, []string{"method", "path"}) - goroutines = prometheus.NewGauge(prometheus.GaugeOpts{Namespace: "hmac", Name: "goroutines_count", Help: "Current number of goroutines."}) - uploadSizeBytes = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "hmac", - Name: "file_server_upload_size_bytes", - Help: "Histogram of uploaded file sizes in bytes.", - Buckets: prometheus.ExponentialBuckets(100, 10, 8), - }) - downloadSizeBytes = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "hmac", - Name: "file_server_download_size_bytes", - Help: "Histogram of downloaded file sizes in bytes.", - Buckets: prometheus.ExponentialBuckets(100, 10, 8), - }) +func initializeWorkerSettings(server *ServerConfig, workers *WorkersConfig, clamav *ClamAVConfig) { + if server.AutoAdjustWorkers { + numWorkers, queueSize := autoAdjustWorkers() + workers.NumWorkers = numWorkers + workers.UploadQueueSize = queueSize + clamav.NumScanWorkers = max(numWorkers/2, 1) - if conf.Server.MetricsEnabled { - prometheus.MustRegister(uploadDuration, uploadErrorsTotal, uploadsTotal) - prometheus.MustRegister(downloadDuration, downloadsTotal, downloadErrorsTotal) - prometheus.MustRegister(memoryUsage, cpuUsage, activeConnections, requestsTotal, goroutines) - prometheus.MustRegister(uploadSizeBytes, downloadSizeBytes) + log.Infof("AutoAdjustWorkers enabled: NumWorkers=%d, UploadQueueSize=%d, NumScanWorkers=%d", + workers.NumWorkers, workers.UploadQueueSize, clamav.NumScanWorkers) + } else { + log.Infof("Manual configuration in effect: NumWorkers=%d, UploadQueueSize=%d, NumScanWorkers=%d", + workers.NumWorkers, workers.UploadQueueSize, clamav.NumScanWorkers) } } -// Update system metrics -func updateSystemMetrics(ctx context.Context) { - ticker := time.NewTicker(10 * time.Second) +func monitorWorkerPerformance(ctx context.Context, server *ServerConfig, w *WorkersConfig, clamav *ClamAVConfig) { + ticker := time.NewTicker(5 * time.Minute) defer ticker.Stop() for { select { case <-ctx.Done(): - log.Info("Stopping system metrics updater.") + log.Info("Stopping worker performance monitor.") return case <-ticker.C: - v, _ := mem.VirtualMemory() - memoryUsage.Set(float64(v.Used)) + if server.AutoAdjustWorkers { + numWorkers, queueSize := autoAdjustWorkers() + w.NumWorkers = numWorkers + w.UploadQueueSize = queueSize + clamav.NumScanWorkers = max(numWorkers/2, 1) - cpuPercent, _ := cpu.Percent(0, false) - if len(cpuPercent) > 0 { - cpuUsage.Set(cpuPercent[0]) + log.Infof("Re-adjusted workers: NumWorkers=%d, UploadQueueSize=%d, NumScanWorkers=%d", + w.NumWorkers, w.UploadQueueSize, clamav.NumScanWorkers) + workerReAdjustmentsTotal.Inc() } - - goroutines.Set(float64(runtime.NumGoroutine())) } } } -// Function to check if a file exists and return its size -func fileExists(filePath string) (bool, int64) { - if cachedInfo, found := fileInfoCache.Get(filePath); found { - if info, ok := cachedInfo.(os.FileInfo); ok { - return !info.IsDir(), info.Size() - } - } - - fileInfo, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false, 0 - } else if err != nil { - log.Error("Error checking file existence:", err) - return false, 0 - } - - fileInfoCache.Set(filePath, fileInfo, cache.DefaultExpiration) - return !fileInfo.IsDir(), fileInfo.Size() -} - -// Function to check file extension -func isExtensionAllowed(filename string) bool { - if len(conf.Uploads.AllowedExtensions) == 0 { - return true // No restrictions if the list is empty - } - ext := strings.ToLower(filepath.Ext(filename)) - for _, allowedExt := range conf.Uploads.AllowedExtensions { - if strings.ToLower(allowedExt) == ext { - return true - } - } - return false -} - -// Version the file by moving the existing file to a versioned directory -func versionFile(absFilename string) error { - versionDir := absFilename + "_versions" - - err := os.MkdirAll(versionDir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create version directory: %v", err) - } - - timestamp := time.Now().Format("20060102-150405") - versionedFilename := filepath.Join(versionDir, filepath.Base(absFilename)+"."+timestamp) - - err = os.Rename(absFilename, versionedFilename) - if err != nil { - return fmt.Errorf("failed to version the file: %v", err) - } - - log.WithFields(logrus.Fields{ - "original": absFilename, - "versioned_as": versionedFilename, - }).Info("Versioned old file") - return cleanupOldVersions(versionDir) -} - -// Clean up older versions if they exceed the maximum allowed -func cleanupOldVersions(versionDir string) error { - files, err := os.ReadDir(versionDir) - if err != nil { - return fmt.Errorf("failed to list version files: %v", err) - } - - if conf.Versioning.MaxVersions > 0 && len(files) > conf.Versioning.MaxVersions { - excessFiles := len(files) - conf.Versioning.MaxVersions - for i := 0; i < excessFiles; i++ { - err := os.Remove(filepath.Join(versionDir, files[i].Name())) - if err != nil { - return fmt.Errorf("failed to remove old version: %v", err) - } - log.WithField("file", files[i].Name()).Info("Removed old version") - } - } - - return nil -} - -// Process the upload task -func processUpload(task UploadTask) error { - absFilename := task.AbsFilename - tempFilename := absFilename + ".tmp" - r := task.Request - - log.Infof("Processing upload for file: %s", absFilename) - startTime := time.Now() - - // Handle uploads and write to a temporary file - if conf.Uploads.ChunkedUploadsEnabled { - log.Debugf("Chunked uploads enabled. Handling chunked upload for %s", tempFilename) - err := handleChunkedUpload(tempFilename, r) - if err != nil { - uploadDuration.Observe(time.Since(startTime).Seconds()) - log.WithFields(logrus.Fields{ - "file": tempFilename, - "error": err, - }).Error("Failed to handle chunked upload") - return err - } - } else { - log.Debugf("Handling standard upload for %s", tempFilename) - err := createFile(tempFilename, r) - if err != nil { - log.WithFields(logrus.Fields{ - "file": tempFilename, - "error": err, - }).Error("Error creating file") - uploadDuration.Observe(time.Since(startTime).Seconds()) - return err - } - } - - // Perform ClamAV scan on the temporary file - if clamClient != nil { - log.Debugf("Scanning %s with ClamAV", tempFilename) - err := scanFileWithClamAV(tempFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "file": tempFilename, - "error": err, - }).Warn("ClamAV detected a virus or scan failed") - os.Remove(tempFilename) - uploadErrorsTotal.Inc() - return err - } - log.Infof("ClamAV scan passed for file: %s", tempFilename) - } - - // Handle file versioning if enabled - if conf.Versioning.EnableVersioning { - existing, _ := fileExists(absFilename) - if existing { - log.Infof("File %s exists. Initiating versioning.", absFilename) - err := versionFile(absFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "file": absFilename, - "error": err, - }).Error("Error versioning file") - os.Remove(tempFilename) - return err - } - log.Infof("File versioned successfully: %s", absFilename) - } - } - - // Rename temporary file to final destination - err := os.Rename(tempFilename, absFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "temp_file": tempFilename, - "final_file": absFilename, - "error": err, - }).Error("Failed to move file to final destination") - os.Remove(tempFilename) +func readConfig(configFilename string, conf *Config) error { + viper.SetConfigFile(configFilename) + if err := viper.ReadInConfig(); err != nil { + log.WithError(err).Errorf("Unable to read config from %s", configFilename) return err } - log.Infof("File moved to final destination: %s", absFilename) - - // Handle deduplication if enabled - if conf.Server.DeduplicationEnabled { - log.Debugf("Deduplication enabled. Checking duplicates for %s", absFilename) - err = handleDeduplication(context.Background(), absFilename) - if err != nil { - log.WithError(err).Error("Deduplication failed") - uploadErrorsTotal.Inc() - return err - } - log.Infof("Deduplication handled successfully for file: %s", absFilename) + if err := viper.Unmarshal(conf); err != nil { + return fmt.Errorf("unable to decode config into struct: %v", err) } - - log.WithFields(logrus.Fields{ - "file": absFilename, - }).Info("File uploaded and processed successfully") - - uploadDuration.Observe(time.Since(startTime).Seconds()) - uploadsTotal.Inc() return nil } -// uploadWorker processes upload tasks from the uploadQueue -func uploadWorker(ctx context.Context, workerID int) { - log.Infof("Upload worker %d started.", workerID) - defer log.Infof("Upload worker %d stopped.", workerID) - for { - select { - case <-ctx.Done(): - return - case task, ok := <-uploadQueue: - if !ok { - log.Warnf("Upload queue closed. Worker %d exiting.", workerID) - return - } - log.Infof("Worker %d processing upload for file: %s", workerID, task.AbsFilename) - err := processUpload(task) - if err != nil { - log.Errorf("Worker %d failed to process upload for %s: %v", workerID, task.AbsFilename, err) - uploadErrorsTotal.Inc() - } else { - log.Infof("Worker %d successfully processed upload for %s", workerID, task.AbsFilename) - } - task.Result <- err - close(task.Result) +func setDefaults() { + viper.SetDefault("server.listen_address", ":8080") + viper.SetDefault("server.storage_path", "./uploads") + viper.SetDefault("server.metrics_enabled", true) + viper.SetDefault("server.metrics_path", "/metrics") + viper.SetDefault("server.pid_file", "/var/run/hmac-file-server.pid") + viper.SetDefault("server.max_upload_size", "10GB") + viper.SetDefault("server.max_header_bytes", 1048576) // 1MB + viper.SetDefault("server.cleanup_interval", "24h") + viper.SetDefault("server.max_file_age", "720h") // 30 days + viper.SetDefault("server.pre_cache", true) + viper.SetDefault("server.pre_cache_workers", 4) + viper.SetDefault("server.pre_cache_interval", "1h") + viper.SetDefault("server.global_extensions", []string{}) + viper.SetDefault("server.deduplication_enabled", true) + viper.SetDefault("server.min_free_bytes", "1GB") + viper.SetDefault("server.file_naming", "original") + viper.SetDefault("server.force_protocol", "") + viper.SetDefault("server.enable_dynamic_workers", true) + viper.SetDefault("server.worker_scale_up_thresh", 50) + viper.SetDefault("server.worker_scale_down_thresh", 10) + + viper.SetDefault("uploads.allowed_extensions", []string{".zip", ".rar", ".7z", ".tar.gz", ".tgz", ".gpg", ".enc", ".pgp"}) + viper.SetDefault("uploads.chunked_uploads_enabled", true) + viper.SetDefault("uploads.chunk_size", "10MB") + viper.SetDefault("uploads.resumable_uploads_enabled", true) + viper.SetDefault("uploads.max_resumable_age", "48h") + + viper.SetDefault("downloads.allowed_extensions", []string{".zip", ".rar", ".7z", ".tar.gz", ".tgz", ".gpg", ".enc", ".pgp"}) + viper.SetDefault("downloads.chunked_downloads_enabled", true) + viper.SetDefault("downloads.chunk_size", "10MB") + viper.SetDefault("downloads.resumable_downloads_enabled", true) + + viper.SetDefault("security.secret", "your-very-secret-hmac-key") + viper.SetDefault("security.enablejwt", false) + viper.SetDefault("security.jwtsecret", "your-256-bit-secret") + viper.SetDefault("security.jwtalgorithm", "HS256") + viper.SetDefault("security.jwtexpiration", "24h") + + // Logging defaults + viper.SetDefault("logging.level", "info") + viper.SetDefault("logging.file", "/var/log/hmac-file-server.log") + viper.SetDefault("logging.max_size", 100) + viper.SetDefault("logging.max_backups", 7) + viper.SetDefault("logging.max_age", 30) + viper.SetDefault("logging.compress", true) + + // Deduplication defaults + viper.SetDefault("deduplication.enabled", false) + viper.SetDefault("deduplication.directory", "./dedup_store") + + // ISO defaults + viper.SetDefault("iso.enabled", false) + viper.SetDefault("iso.mount_point", "/mnt/hmac_iso") + viper.SetDefault("iso.size", "1GB") + viper.SetDefault("iso.charset", "utf-8") + viper.SetDefault("iso.containerfile", "/var/lib/hmac-file-server/data.iso") + + // Timeouts defaults + viper.SetDefault("timeouts.read", "60s") + viper.SetDefault("timeouts.write", "60s") + viper.SetDefault("timeouts.idle", "120s") + viper.SetDefault("timeouts.shutdown", "30s") + + // Versioning defaults + viper.SetDefault("versioning.enabled", false) + viper.SetDefault("versioning.backend", "simple") + viper.SetDefault("versioning.max_revisions", 5) + + // ... other defaults for Uploads, Downloads, ClamAV, Redis, Workers, File, Build + viper.SetDefault("build.version", "dev") +} + +func validateConfig(c *Config) error { + if c.Server.ListenAddress == "" { // Corrected field name + return errors.New("server.listen_address is required") + } + + if c.Server.FileTTL == "" && c.Server.FileTTLEnabled { // Corrected field names + return errors.New("server.file_ttl is required when server.file_ttl_enabled is true") + } + + if _, err := time.ParseDuration(c.Timeouts.Read); err != nil { // Corrected field name + return fmt.Errorf("invalid timeouts.read: %v", err) + } + if _, err := time.ParseDuration(c.Timeouts.Write); err != nil { // Corrected field name + return fmt.Errorf("invalid timeouts.write: %v", err) + } + if _, err := time.ParseDuration(c.Timeouts.Idle); err != nil { // Corrected field name + return fmt.Errorf("invalid timeouts.idle: %v", err) + } + + // Corrected VersioningConfig field access + if c.Versioning.Enabled { // Use the Go struct field name 'Enabled' + if c.Versioning.MaxRevs <= 0 { // Use the Go struct field name 'MaxRevs' + return errors.New("versioning.max_revisions must be positive if versioning is enabled") } } -} -// Initialize upload worker pool -func initializeUploadWorkerPool(ctx context.Context) { - for i := 0; i < MinWorkers; i++ { - go uploadWorker(ctx, i) - } - log.Infof("Initialized %d upload workers", MinWorkers) -} - -// Worker function to process scan tasks -func scanWorker(ctx context.Context, workerID int) { - log.WithField("worker_id", workerID).Info("Scan worker started") - for { - select { - case <-ctx.Done(): - log.WithField("worker_id", workerID).Info("Scan worker stopping") - return - case task, ok := <-scanQueue: - if !ok { - log.WithField("worker_id", workerID).Info("Scan queue closed") - return - } - log.WithFields(logrus.Fields{ - "worker_id": workerID, - "file": task.AbsFilename, - }).Info("Processing scan task") - err := scanFileWithClamAV(task.AbsFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "worker_id": workerID, - "file": task.AbsFilename, - "error": err, - }).Error("Failed to scan file") - } else { - log.WithFields(logrus.Fields{ - "worker_id": workerID, - "file": task.AbsFilename, - }).Info("Successfully scanned file") - } - task.Result <- err - close(task.Result) - } - } -} - -// Initialize scan worker pool -func initializeScanWorkerPool(ctx context.Context) { - for i := 0; i < ScanWorkers; i++ { - go scanWorker(ctx, i) - } - log.Infof("Initialized %d scan workers", ScanWorkers) -} - -// Setup router with middleware -func setupRouter() http.Handler { - mux := http.NewServeMux() - mux.HandleFunc("/", handleRequest) - if conf.Server.MetricsEnabled { - mux.Handle("/metrics", promhttp.Handler()) + // Validate JWT secret if JWT is enabled + if c.Security.EnableJWT && strings.TrimSpace(c.Security.JWTSecret) == "" { + return errors.New("security.jwtsecret is required when security.enablejwt is true") } - // Apply middleware - handler := loggingMiddleware(mux) - handler = recoveryMiddleware(handler) - handler = corsMiddleware(handler) - return handler -} - -// Middleware for logging -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestsTotal.WithLabelValues(r.Method, r.URL.Path).Inc() - next.ServeHTTP(w, r) - }) -} - -// Middleware for panic recovery -func recoveryMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if rec := recover(); rec != nil { - log.WithFields(logrus.Fields{ - "method": r.Method, - "url": r.URL.String(), - "error": rec, - }).Error("Panic recovered in HTTP handler") - http.Error(w, "Internal Server Error", http.StatusInternalServerError) - } - }() - next.ServeHTTP(w, r) - }) -} - -// corsMiddleware handles CORS by setting appropriate headers -func corsMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Set CORS headers - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-File-MAC") - w.Header().Set("Access-Control-Max-Age", "86400") // Cache preflight response for 1 day - - // Handle preflight OPTIONS request - if r.Method == http.MethodOptions { - w.WriteHeader(http.StatusOK) - return - } - - // Proceed to the next handler - next.ServeHTTP(w, r) - }) -} - -// Handle file uploads and downloads -func handleRequest(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodPost && strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") { - absFilename, err := sanitizeFilePath(conf.Server.StoragePath, strings.TrimPrefix(r.URL.Path, "/")) - if err != nil { - log.WithError(err).Error("Invalid file path") - http.Error(w, "Invalid file path", http.StatusBadRequest) - return - } - err = handleMultipartUpload(w, r, absFilename) - if err != nil { - log.WithError(err).Error("Failed to handle multipart upload") - http.Error(w, "Failed to handle multipart upload", http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusCreated) - return + // Validate HMAC secret if JWT is not enabled (as it's the fallback) + if !c.Security.EnableJWT && strings.TrimSpace(c.Security.Secret) == "" { + return errors.New("security.secret is required for HMAC authentication (when JWT is disabled)") } - // Get client IP address - clientIP := r.Header.Get("X-Real-IP") - if clientIP == "" { - clientIP = r.Header.Get("X-Forwarded-For") - } - if clientIP == "" { - // Fallback to RemoteAddr - host, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - log.WithError(err).Warn("Failed to parse RemoteAddr") - clientIP = r.RemoteAddr + return nil +} + +// validateJWTFromRequest extracts and validates a JWT from the request. +func validateJWTFromRequest(r *http.Request, secret string) (*jwt.Token, error) { + authHeader := r.Header.Get("Authorization") + tokenString := "" + + if authHeader != "" { + splitToken := strings.Split(authHeader, "Bearer ") + if len(splitToken) == 2 { + tokenString = splitToken[1] } else { - clientIP = host + return nil, errors.New("invalid Authorization header format") + } + } else { + // Fallback to checking 'token' query parameter + tokenString = r.URL.Query().Get("token") + if tokenString == "" { + return nil, errors.New("missing JWT in Authorization header or 'token' query parameter") } } - // Log the request with the client IP - log.WithFields(logrus.Fields{ - "method": r.Method, - "url": r.URL.String(), - "remote": clientIP, - }).Info("Incoming request") + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return []byte(secret), nil + }) - // Parse URL and query parameters - p := r.URL.Path - a, err := url.ParseQuery(r.URL.RawQuery) if err != nil { - log.Warn("Failed to parse query parameters") - http.Error(w, "Internal Server Error", http.StatusInternalServerError) - return + return nil, fmt.Errorf("JWT validation failed: %w", err) } - fileStorePath := strings.TrimPrefix(p, "/") - if fileStorePath == "" || fileStorePath == "/" { - log.Warn("Access to root directory is forbidden") - http.Error(w, "Forbidden", http.StatusForbidden) - return - } else if fileStorePath[0] == '/' { - fileStorePath = fileStorePath[1:] + if !token.Valid { + return nil, errors.New("invalid JWT") } - absFilename, err := sanitizeFilePath(conf.Server.StoragePath, fileStorePath) - if err != nil { - log.WithFields(logrus.Fields{ - "file": fileStorePath, - "error": err, - }).Warn("Invalid file path") - http.Error(w, "Invalid file path", http.StatusBadRequest) - return - } - - switch r.Method { - case http.MethodPut: - handleUpload(w, r, absFilename, fileStorePath, a) - case http.MethodHead, http.MethodGet: - handleDownload(w, r, absFilename, fileStorePath) - case http.MethodOptions: - // Handled by NGINX; no action needed - w.Header().Set("Allow", "OPTIONS, GET, PUT, HEAD") - return - default: - log.WithField("method", r.Method).Warn("Invalid HTTP method for upload directory") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } + return token, nil } -// Handle file uploads with extension restrictions and HMAC validation -func handleUpload(w http.ResponseWriter, r *http.Request, absFilename, fileStorePath string, a url.Values) { - // Log the storage path being used - log.Infof("Using storage path: %s", conf.Server.StoragePath) +// validateHMAC validates the HMAC signature of the request for legacy protocols and POST uploads. +func validateHMAC(r *http.Request, secret string) error { + log.Debugf("validateHMAC: Validating request to %s with query: %s", r.URL.Path, r.URL.RawQuery) + // Check for X-Signature header (for POST uploads) + signature := r.Header.Get("X-Signature") + if signature != "" { + // This is a POST upload with X-Signature header + message := r.URL.Path + h := hmac.New(sha256.New, []byte(secret)) + h.Write([]byte(message)) + expectedSignature := hex.EncodeToString(h.Sum(nil)) - // Determine protocol version based on query parameters - var protocolVersion string - if a.Get("v2") != "" { - protocolVersion = "v2" - } else if a.Get("token") != "" { - protocolVersion = "token" - } else if a.Get("v") != "" { - protocolVersion = "v" - } else { - log.Warn("No HMAC attached to URL. Expecting 'v', 'v2', or 'token' parameter as MAC") - http.Error(w, "No HMAC attached to URL. Expecting 'v', 'v2', or 'token' parameter as MAC", http.StatusForbidden) - return + if !hmac.Equal([]byte(signature), []byte(expectedSignature)) { + return errors.New("invalid HMAC signature in X-Signature header") + } + return nil } - log.Debugf("Protocol version determined: %s", protocolVersion) - // Initialize HMAC - mac := hmac.New(sha256.New, []byte(conf.Security.Secret)) + // Check for legacy URL-based HMAC protocols (v, v2, token) + query := r.URL.Query() + + var protocolVersion string + var providedMACHex string + + if query.Get("v2") != "" { + protocolVersion = "v2" + providedMACHex = query.Get("v2") + } else if query.Get("token") != "" { + protocolVersion = "token" + providedMACHex = query.Get("token") + } else if query.Get("v") != "" { + protocolVersion = "v" + providedMACHex = query.Get("v") + } else { + return errors.New("no HMAC signature found (missing X-Signature header or v/v2/token query parameter)") + } + + // Extract file path from URL + fileStorePath := strings.TrimPrefix(r.URL.Path, "/") + + // Calculate HMAC based on protocol version (matching legacy behavior) + mac := hmac.New(sha256.New, []byte(secret)) - // Calculate MAC based on protocolVersion if protocolVersion == "v" { - mac.Write([]byte(fileStorePath + "\x20" + strconv.FormatInt(r.ContentLength, 10))) - } else if protocolVersion == "v2" || protocolVersion == "token" { + // Legacy v protocol: fileStorePath + "\x20" + contentLength + message := fileStorePath + "\x20" + strconv.FormatInt(r.ContentLength, 10) + mac.Write([]byte(message)) + } else { + // v2 and token protocols: fileStorePath + "\x00" + contentLength + "\x00" + contentType contentType := mime.TypeByExtension(filepath.Ext(fileStorePath)) if contentType == "" { contentType = "application/octet-stream" } - mac.Write([]byte(fileStorePath + "\x00" + strconv.FormatInt(r.ContentLength, 10) + "\x00" + contentType)) + message := fileStorePath + "\x00" + strconv.FormatInt(r.ContentLength, 10) + "\x00" + contentType + log.Debugf("validateHMAC: %s protocol message: %q (len=%d)", protocolVersion, message, len(message)) + mac.Write([]byte(message)) } calculatedMAC := mac.Sum(nil) - log.Debugf("Calculated MAC: %x", calculatedMAC) + calculatedMACHex := hex.EncodeToString(calculatedMAC) - // Decode provided MAC from hex - providedMACHex := a.Get(protocolVersion) + // Decode provided MAC providedMAC, err := hex.DecodeString(providedMACHex) if err != nil { - log.Warn("Invalid MAC encoding") - http.Error(w, "Invalid MAC encoding", http.StatusForbidden) - return + return fmt.Errorf("invalid MAC encoding for %s protocol: %v", protocolVersion, err) } - log.Debugf("Provided MAC: %x", providedMAC) - // Validate the HMAC + log.Debugf("validateHMAC: %s protocol - calculated: %s, provided: %s", protocolVersion, calculatedMACHex, providedMACHex) + + // Compare MACs if !hmac.Equal(calculatedMAC, providedMAC) { - log.Warn("Invalid MAC") - http.Error(w, "Invalid MAC", http.StatusForbidden) - return - } - log.Debug("HMAC validation successful") - - // Validate file extension - if !isExtensionAllowed(fileStorePath) { - log.WithFields(logrus.Fields{ - // No need to sanitize and validate the file path here since absFilename is already sanitized in handleRequest - "file": fileStorePath, - "error": err, - }).Warn("Invalid file path") - http.Error(w, "Invalid file path", http.StatusBadRequest) - uploadErrorsTotal.Inc() - return - } - // absFilename = sanitizedFilename - - // Check if there is enough free space - err = checkStorageSpace(conf.Server.StoragePath, conf.Server.MinFreeBytes) - if err != nil { - log.WithFields(logrus.Fields{ - "storage_path": conf.Server.StoragePath, - "error": err, - }).Warn("Not enough free space") - http.Error(w, "Not enough free space", http.StatusInsufficientStorage) - uploadErrorsTotal.Inc() - return + return fmt.Errorf("invalid MAC for %s protocol", protocolVersion) } - // Create an UploadTask with a result channel - result := make(chan error) - task := UploadTask{ - AbsFilename: absFilename, - Request: r, - Result: result, - } - - // Submit task to the upload queue - select { - case uploadQueue <- task: - // Successfully added to the queue - log.Debug("Upload task enqueued successfully") - default: - // Queue is full - log.Warn("Upload queue is full. Rejecting upload") - http.Error(w, "Server busy. Try again later.", http.StatusServiceUnavailable) - uploadErrorsTotal.Inc() - return - } - - // Wait for the worker to process the upload - err = <-result - if err != nil { - // The worker has already logged the error; send an appropriate HTTP response - http.Error(w, fmt.Sprintf("Upload failed: %v", err), http.StatusInternalServerError) - return - } - - // Upload was successful - w.WriteHeader(http.StatusCreated) + log.Debugf("%s HMAC authentication successful for request: %s", protocolVersion, r.URL.Path) + return nil } -// Handle file downloads -func handleDownload(w http.ResponseWriter, r *http.Request, absFilename, fileStorePath string) { - fileInfo, err := getFileInfo(absFilename) +// validateV3HMAC validates the HMAC signature for v3 protocol (mod_http_upload_external). +func validateV3HMAC(r *http.Request, secret string) error { + query := r.URL.Query() + + // Extract v3 signature and expires from query parameters + signature := query.Get("v3") + expiresStr := query.Get("expires") + + if signature == "" { + return errors.New("missing v3 signature parameter") + } + + if expiresStr == "" { + return errors.New("missing expires parameter") + } + + // Parse expires timestamp + expires, err := strconv.ParseInt(expiresStr, 10, 64) if err != nil { - log.WithError(err).Error("Failed to get file information") - http.Error(w, "Not Found", http.StatusNotFound) - downloadErrorsTotal.Inc() + return fmt.Errorf("invalid expires parameter: %v", err) + } + + // Check if signature has expired + now := time.Now().Unix() + if now > expires { + return errors.New("signature has expired") + } + + // Construct message for HMAC verification + // Format: METHOD\nEXPIRES\nPATH + message := fmt.Sprintf("%s\n%s\n%s", r.Method, expiresStr, r.URL.Path) + + // Calculate expected HMAC signature + h := hmac.New(sha256.New, []byte(secret)) + h.Write([]byte(message)) + expectedSignature := hex.EncodeToString(h.Sum(nil)) + + // Compare signatures + if !hmac.Equal([]byte(signature), []byte(expectedSignature)) { + return errors.New("invalid v3 HMAC signature") + } + + return nil +} + +// handleUpload handles file uploads. +func handleUpload(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + activeConnections.Inc() + defer activeConnections.Dec() + + // Only allow POST method + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + uploadErrorsTotal.Inc() return - } else if fileInfo.IsDir() { - log.Warn("Directory listing forbidden") - http.Error(w, "Forbidden", http.StatusForbidden) + } + + // Authentication + if conf.Security.EnableJWT { + _, err := validateJWTFromRequest(r, conf.Security.JWTSecret) + if err != nil { + http.Error(w, fmt.Sprintf("JWT Authentication failed: %v", err), http.StatusUnauthorized) + uploadErrorsTotal.Inc() + return + } + log.Debugf("JWT authentication successful for upload request: %s", r.URL.Path) + } else { + err := validateHMAC(r, conf.Security.Secret) + if err != nil { + http.Error(w, fmt.Sprintf("HMAC Authentication failed: %v", err), http.StatusUnauthorized) + uploadErrorsTotal.Inc() + return + } + log.Debugf("HMAC authentication successful for upload request: %s", r.URL.Path) + } + + // Parse multipart form + err := r.ParseMultipartForm(32 << 20) // 32MB max memory + if err != nil { + http.Error(w, fmt.Sprintf("Error parsing multipart form: %v", err), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + + // Get file from form + file, header, err := r.FormFile("file") + if err != nil { + http.Error(w, fmt.Sprintf("Error getting file from form: %v", err), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + defer file.Close() + + // Validate file extension if configured + if len(conf.Uploads.AllowedExtensions) > 0 { + ext := strings.ToLower(filepath.Ext(header.Filename)) + allowed := false + for _, allowedExt := range conf.Uploads.AllowedExtensions { + if ext == allowedExt { + allowed = true + break + } + } + if !allowed { + http.Error(w, fmt.Sprintf("File extension %s not allowed", ext), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + } + + // Generate filename based on configuration + var filename string + switch conf.Server.FileNaming { + case "HMAC": + // Generate HMAC-based filename + h := hmac.New(sha256.New, []byte(conf.Security.Secret)) + h.Write([]byte(header.Filename + time.Now().String())) + filename = hex.EncodeToString(h.Sum(nil)) + filepath.Ext(header.Filename) + default: // "original" or "None" + filename = header.Filename + } + + // Create full file path + storagePath := conf.Server.StoragePath + if conf.ISO.Enabled { + storagePath = conf.ISO.MountPoint + } + + absFilename := filepath.Join(storagePath, filename) + + // Create the file + dst, err := os.Create(absFilename) + if err != nil { + http.Error(w, fmt.Sprintf("Error creating file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + return + } + defer dst.Close() + + // Copy file content + written, err := io.Copy(dst, file) + if err != nil { + http.Error(w, fmt.Sprintf("Error saving file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + // Clean up partial file + os.Remove(absFilename) + return + } + + // Handle deduplication if enabled + if conf.Server.DeduplicationEnabled { + ctx := context.Background() + err = handleDeduplication(ctx, absFilename) + if err != nil { + log.Warnf("Deduplication failed for %s: %v", absFilename, err) + } + } + + // Update metrics + duration := time.Since(startTime) + uploadDuration.Observe(duration.Seconds()) + uploadsTotal.Inc() + uploadSizeBytes.Observe(float64(written)) + + // Return success response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + response := map[string]interface{}{ + "success": true, + "filename": filename, + "size": written, + "duration": duration.String(), + } + + // Create JSON response + if jsonBytes, err := json.Marshal(response); err == nil { + w.Write(jsonBytes) + } else { + fmt.Fprintf(w, `{"success": true, "filename": "%s", "size": %d}`, filename, written) + } + + log.Infof("Successfully uploaded %s (%s) in %s", filename, formatBytes(written), duration) +} + +// handleDownload handles file downloads. +func handleDownload(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + activeConnections.Inc() + defer activeConnections.Dec() + + // Authentication + if conf.Security.EnableJWT { + _, err := validateJWTFromRequest(r, conf.Security.JWTSecret) + if err != nil { + http.Error(w, fmt.Sprintf("JWT Authentication failed: %v", err), http.StatusUnauthorized) + downloadErrorsTotal.Inc() + return + } + log.Debugf("JWT authentication successful for download request: %s", r.URL.Path) + } else { + err := validateHMAC(r, conf.Security.Secret) + if err != nil { + http.Error(w, fmt.Sprintf("HMAC Authentication failed: %v", err), http.StatusUnauthorized) + downloadErrorsTotal.Inc() + return + } + log.Debugf("HMAC authentication successful for download request: %s", r.URL.Path) + } + + filename := strings.TrimPrefix(r.URL.Path, "/download/") + if filename == "" { + http.Error(w, "Filename not specified", http.StatusBadRequest) downloadErrorsTotal.Inc() return } + absFilename, err := sanitizeFilePath(conf.Server.StoragePath, filename) // Use sanitizeFilePath from helpers.go + if err != nil { + http.Error(w, fmt.Sprintf("Invalid file path: %v", err), http.StatusBadRequest) + downloadErrorsTotal.Inc() + return + } + + fileInfo, err := os.Stat(absFilename) + if os.IsNotExist(err) { + http.Error(w, "File not found", http.StatusNotFound) + downloadErrorsTotal.Inc() + return + } + if err != nil { + http.Error(w, fmt.Sprintf("Error accessing file: %v", err), http.StatusInternalServerError) + downloadErrorsTotal.Inc() + return + } + + if fileInfo.IsDir() { + http.Error(w, "Cannot download a directory", http.StatusBadRequest) + downloadErrorsTotal.Inc() + return + } + + file, err := os.Open(absFilename) + if err != nil { + http.Error(w, fmt.Sprintf("Error opening file: %v", err), http.StatusInternalServerError) + downloadErrorsTotal.Inc() + return + } + defer file.Close() + + w.Header().Set("Content-Disposition", "attachment; filename=\""+filepath.Base(absFilename)+"\"") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", fmt.Sprintf("%d", fileInfo.Size())) + + // Use a pooled buffer for copying + bufPtr := bufferPool.Get().(*[]byte) + defer bufferPool.Put(bufPtr) + buf := *bufPtr + + n, err := io.CopyBuffer(w, file, buf) + if err != nil { + log.Errorf("Error during download of %s: %v", absFilename, err) + // Don't write http.Error here if headers already sent + downloadErrorsTotal.Inc() + return // Ensure we don't try to record metrics if there was an error during copy + } + + duration := time.Since(startTime) + downloadDuration.Observe(duration.Seconds()) + downloadsTotal.Inc() + downloadSizeBytes.Observe(float64(n)) + log.Infof("Successfully downloaded %s (%s) in %s", absFilename, formatBytes(n), duration) +} + +// handleV3Upload handles PUT requests for v3 protocol (mod_http_upload_external). +func handleV3Upload(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + activeConnections.Inc() + defer activeConnections.Dec() + + // Only allow PUT method for v3 + if r.Method != http.MethodPut { + http.Error(w, "Method not allowed for v3 uploads", http.StatusMethodNotAllowed) + uploadErrorsTotal.Inc() + return + } + + // Validate v3 HMAC signature + err := validateV3HMAC(r, conf.Security.Secret) + if err != nil { + http.Error(w, fmt.Sprintf("v3 Authentication failed: %v", err), http.StatusUnauthorized) + uploadErrorsTotal.Inc() + return + } + log.Debugf("v3 HMAC authentication successful for upload request: %s", r.URL.Path) + + // Extract filename from the URL path + // Path format: /uuid/subdir/filename.ext + pathParts := strings.Split(strings.Trim(r.URL.Path, "/"), "/") + if len(pathParts) < 1 { + http.Error(w, "Invalid upload path", http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + + // Use the last part as filename + originalFilename := pathParts[len(pathParts)-1] + if originalFilename == "" { + http.Error(w, "No filename specified", http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + + // Validate file extension if configured + if len(conf.Uploads.AllowedExtensions) > 0 { + ext := strings.ToLower(filepath.Ext(originalFilename)) + allowed := false + for _, allowedExt := range conf.Uploads.AllowedExtensions { + if ext == allowedExt { + allowed = true + break + } + } + if !allowed { + http.Error(w, fmt.Sprintf("File extension %s not allowed", ext), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + } + + // Generate filename based on configuration + var filename string + switch conf.Server.FileNaming { + case "HMAC": + // Generate HMAC-based filename + h := hmac.New(sha256.New, []byte(conf.Security.Secret)) + h.Write([]byte(originalFilename + time.Now().String())) + filename = hex.EncodeToString(h.Sum(nil)) + filepath.Ext(originalFilename) + default: // "original" or "None" + filename = originalFilename + } + + // Create full file path + storagePath := conf.Server.StoragePath + if conf.ISO.Enabled { + storagePath = conf.ISO.MountPoint + } + + absFilename := filepath.Join(storagePath, filename) + + // Create the file + dst, err := os.Create(absFilename) + if err != nil { + http.Error(w, fmt.Sprintf("Error creating file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + return + } + defer dst.Close() + + // Copy file content from request body + written, err := io.Copy(dst, r.Body) + if err != nil { + http.Error(w, fmt.Sprintf("Error saving file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + // Clean up partial file + os.Remove(absFilename) + return + } + + // Handle deduplication if enabled + if conf.Server.DeduplicationEnabled { + ctx := context.Background() + err = handleDeduplication(ctx, absFilename) + if err != nil { + log.Warnf("Deduplication failed for %s: %v", absFilename, err) + } + } + + // Update metrics + duration := time.Since(startTime) + uploadDuration.Observe(duration.Seconds()) + uploadsTotal.Inc() + uploadSizeBytes.Observe(float64(written)) + + // Return success response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + response := map[string]interface{}{ + "success": true, + "filename": filename, + "size": written, + "duration": duration.String(), + } + + // Create JSON response + if jsonBytes, err := json.Marshal(response); err == nil { + w.Write(jsonBytes) + } else { + fmt.Fprintf(w, `{"success": true, "filename": "%s", "size": %d}`, filename, written) + } + + log.Infof("Successfully uploaded %s via v3 protocol (%s) in %s", filename, formatBytes(written), duration) +} + +// handleLegacyUpload handles PUT requests for legacy protocols (v, v2, token). +func handleLegacyUpload(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + activeConnections.Inc() + defer activeConnections.Dec() + + log.Debugf("handleLegacyUpload: Processing request to %s with query: %s", r.URL.Path, r.URL.RawQuery) + + // Only allow PUT method for legacy uploads + if r.Method != http.MethodPut { + http.Error(w, "Method not allowed for legacy uploads", http.StatusMethodNotAllowed) + uploadErrorsTotal.Inc() + return + } + + // Validate legacy HMAC signature + err := validateHMAC(r, conf.Security.Secret) + if err != nil { + http.Error(w, fmt.Sprintf("Legacy Authentication failed: %v", err), http.StatusUnauthorized) + uploadErrorsTotal.Inc() + return + } + + // Extract filename from the URL path + fileStorePath := strings.TrimPrefix(r.URL.Path, "/") + if fileStorePath == "" { + http.Error(w, "No filename specified", http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + + // Validate file extension if configured + if len(conf.Uploads.AllowedExtensions) > 0 { + ext := strings.ToLower(filepath.Ext(fileStorePath)) + allowed := false + for _, allowedExt := range conf.Uploads.AllowedExtensions { + if ext == allowedExt { + allowed = true + break + } + } + if !allowed { + http.Error(w, fmt.Sprintf("File extension %s not allowed", ext), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + } + + // Create full file path + storagePath := conf.Server.StoragePath + if conf.ISO.Enabled { + storagePath = conf.ISO.MountPoint + } + + // Generate filename based on configuration + var absFilename string + var filename string + switch conf.Server.FileNaming { + case "HMAC": + // Generate HMAC-based filename + h := hmac.New(sha256.New, []byte(conf.Security.Secret)) + h.Write([]byte(fileStorePath + time.Now().String())) + filename = hex.EncodeToString(h.Sum(nil)) + filepath.Ext(fileStorePath) + absFilename = filepath.Join(storagePath, filename) + default: // "original" or "None" + // Preserve full directory structure for legacy XMPP compatibility + var sanitizeErr error + absFilename, sanitizeErr = sanitizeFilePath(storagePath, fileStorePath) + if sanitizeErr != nil { + http.Error(w, fmt.Sprintf("Invalid file path: %v", sanitizeErr), http.StatusBadRequest) + uploadErrorsTotal.Inc() + return + } + filename = filepath.Base(fileStorePath) // For logging purposes + } + + // Create directory structure if it doesn't exist + if err := os.MkdirAll(filepath.Dir(absFilename), 0755); err != nil { + http.Error(w, fmt.Sprintf("Error creating directory: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + return + } + + // Create the file + dst, err := os.Create(absFilename) + if err != nil { + http.Error(w, fmt.Sprintf("Error creating file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + return + } + defer dst.Close() + + // Log upload start for large files + if r.ContentLength > 10*1024*1024 { // Log for files > 10MB + log.Infof("Starting upload of %s (%.1f MiB)", filename, float64(r.ContentLength)/(1024*1024)) + } + + // Copy file content from request body with progress reporting + written, err := copyWithProgress(dst, r.Body, r.ContentLength, filename) + if err != nil { + http.Error(w, fmt.Sprintf("Error saving file: %v", err), http.StatusInternalServerError) + uploadErrorsTotal.Inc() + // Clean up partial file + os.Remove(absFilename) + return + } + + // Handle deduplication if enabled + if conf.Server.DeduplicationEnabled { + ctx := context.Background() + err = handleDeduplication(ctx, absFilename) + if err != nil { + log.Warnf("Deduplication failed for %s: %v", absFilename, err) + } + } + + // Update metrics + duration := time.Since(startTime) + uploadDuration.Observe(duration.Seconds()) + uploadsTotal.Inc() + uploadSizeBytes.Observe(float64(written)) + + // Return success response (201 Created for legacy compatibility) + w.WriteHeader(http.StatusCreated) + + log.Infof("Successfully uploaded %s via legacy protocol (%s) in %s", filename, formatBytes(written), duration) +} + +// handleLegacyDownload handles GET/HEAD requests for legacy downloads. +func handleLegacyDownload(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + activeConnections.Inc() + defer activeConnections.Dec() + + // Extract filename from the URL path + fileStorePath := strings.TrimPrefix(r.URL.Path, "/") + if fileStorePath == "" { + http.Error(w, "No filename specified", http.StatusBadRequest) + downloadErrorsTotal.Inc() + return + } + + // Create full file path + storagePath := conf.Server.StoragePath + if conf.ISO.Enabled { + storagePath = conf.ISO.MountPoint + } + + absFilename := filepath.Join(storagePath, fileStorePath) + + fileInfo, err := os.Stat(absFilename) + if os.IsNotExist(err) { + http.Error(w, "File not found", http.StatusNotFound) + downloadErrorsTotal.Inc() + return + } + if err != nil { + http.Error(w, fmt.Sprintf("Error accessing file: %v", err), http.StatusInternalServerError) + downloadErrorsTotal.Inc() + return + } + + if fileInfo.IsDir() { + http.Error(w, "Cannot download a directory", http.StatusBadRequest) + downloadErrorsTotal.Inc() + return + } + + // Set appropriate headers contentType := mime.TypeByExtension(filepath.Ext(fileStorePath)) if contentType == "" { contentType = "application/octet-stream" } w.Header().Set("Content-Type", contentType) + w.Header().Set("Content-Length", strconv.FormatInt(fileInfo.Size(), 10)) - // Handle resumable downloads - if conf.Uploads.ResumableUploadsEnabled { - handleResumableDownload(absFilename, w, r, fileInfo.Size()) - return - } - + // For HEAD requests, only send headers if r.Method == http.MethodHead { - w.Header().Set("Content-Length", strconv.FormatInt(fileInfo.Size(), 10)) - downloadsTotal.Inc() - return - } else { - // Measure download duration - startTime := time.Now() - log.Infof("Initiating download for file: %s", absFilename) - http.ServeFile(w, r, absFilename) - downloadDuration.Observe(time.Since(startTime).Seconds()) - downloadSizeBytes.Observe(float64(fileInfo.Size())) - downloadsTotal.Inc() - log.Infof("File downloaded successfully: %s", absFilename) - return - } -} - -// Create the file for upload with buffered Writer -func createFile(tempFilename string, r *http.Request) error { - absDirectory := filepath.Dir(tempFilename) - err := os.MkdirAll(absDirectory, os.ModePerm) - if err != nil { - log.WithError(err).Errorf("Failed to create directory %s", absDirectory) - return fmt.Errorf("failed to create directory %s: %w", absDirectory, err) - } - - // Open the file for writing - targetFile, err := os.OpenFile(tempFilename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - log.WithError(err).Errorf("Failed to create file %s", tempFilename) - return fmt.Errorf("failed to create file %s: %w", tempFilename, err) - } - defer targetFile.Close() - - // Use a large buffer for efficient file writing - bufferSize := 4 * 1024 * 1024 // 4 MB buffer - writer := bufio.NewWriterSize(targetFile, bufferSize) - buffer := make([]byte, bufferSize) - - totalBytes := int64(0) - for { - n, readErr := r.Body.Read(buffer) - if n > 0 { - totalBytes += int64(n) - _, writeErr := writer.Write(buffer[:n]) - if writeErr != nil { - log.WithError(writeErr).Errorf("Failed to write to file %s", tempFilename) - return fmt.Errorf("failed to write to file %s: %w", tempFilename, writeErr) - } - } - if readErr != nil { - if readErr == io.EOF { - break - } - log.WithError(readErr).Error("Failed to read request body") - return fmt.Errorf("failed to read request body: %w", readErr) - } - } - - err = writer.Flush() - if err != nil { - log.WithError(err).Errorf("Failed to flush buffer to file %s", tempFilename) - return fmt.Errorf("failed to flush buffer to file %s: %w", tempFilename, err) - } - - log.WithFields(logrus.Fields{ - "temp_file": tempFilename, - "total_bytes": totalBytes, - }).Info("File uploaded successfully") - - uploadSizeBytes.Observe(float64(totalBytes)) - return nil -} - -// Scan the uploaded file with ClamAV (Optional) -func scanFileWithClamAV(filePath string) error { - log.WithField("file", filePath).Info("Scanning file with ClamAV") - - scanResultChan, err := clamClient.ScanFile(filePath) - if err != nil { - log.WithError(err).Error("Failed to initiate ClamAV scan") - return fmt.Errorf("failed to initiate ClamAV scan: %w", err) - } - - // Receive scan result - scanResult := <-scanResultChan - if scanResult == nil { - log.Error("Failed to receive scan result from ClamAV") - return fmt.Errorf("failed to receive scan result from ClamAV") - } - - // Handle scan result - switch scanResult.Status { - case clamd.RES_OK: - log.WithField("file", filePath).Info("ClamAV scan passed") - return nil - case clamd.RES_FOUND: - log.WithFields(logrus.Fields{ - "file": filePath, - "description": scanResult.Description, - }).Warn("ClamAV detected a virus") - return fmt.Errorf("virus detected: %s", scanResult.Description) - default: - log.WithFields(logrus.Fields{ - "file": filePath, - "status": scanResult.Status, - "description": scanResult.Description, - }).Warn("ClamAV scan returned unexpected status") - return fmt.Errorf("ClamAV scan returned unexpected status: %s", scanResult.Description) - } -} - -// initClamAV initializes the ClamAV client and logs the status -func initClamAV(socket string) (*clamd.Clamd, error) { - if socket == "" { - log.Error("ClamAV socket path is not configured.") - return nil, fmt.Errorf("ClamAV socket path is not configured") - } - - clamClient := clamd.NewClamd("unix:" + socket) - err := clamClient.Ping() - if err != nil { - log.Errorf("Failed to connect to ClamAV at %s: %v", socket, err) - return nil, fmt.Errorf("failed to connect to ClamAV: %w", err) - } - - log.Info("Connected to ClamAV successfully.") - return clamClient, nil -} - -// Handle resumable downloads -func handleResumableDownload(absFilename string, w http.ResponseWriter, r *http.Request, fileSize int64) { - rangeHeader := r.Header.Get("Range") - if rangeHeader == "" { - // If no Range header, serve the full file - startTime := time.Now() - http.ServeFile(w, r, absFilename) - downloadDuration.Observe(time.Since(startTime).Seconds()) - downloadSizeBytes.Observe(float64(fileSize)) + w.WriteHeader(http.StatusOK) downloadsTotal.Inc() return } - // Parse Range header - ranges := strings.Split(strings.TrimPrefix(rangeHeader, "bytes="), "-") - if len(ranges) != 2 { - http.Error(w, "Invalid Range", http.StatusRequestedRangeNotSatisfiable) - downloadErrorsTotal.Inc() - return - } - - start, err := strconv.ParseInt(ranges[0], 10, 64) - if err != nil { - http.Error(w, "Invalid Range", http.StatusRequestedRangeNotSatisfiable) - downloadErrorsTotal.Inc() - return - } - - // Calculate end byte - end := fileSize - 1 - if ranges[1] != "" { - end, err = strconv.ParseInt(ranges[1], 10, 64) - if err != nil || end >= fileSize { - http.Error(w, "Invalid Range", http.StatusRequestedRangeNotSatisfiable) - downloadErrorsTotal.Inc() - return - } - } - - // Set response headers for partial content - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fileSize)) - w.Header().Set("Content-Length", strconv.FormatInt(end-start+1, 10)) - w.Header().Set("Accept-Ranges", "bytes") - w.WriteHeader(http.StatusPartialContent) - - // Serve the requested byte range + // For GET requests, serve the file file, err := os.Open(absFilename) if err != nil { - http.Error(w, "Internal Server Error", http.StatusInternalServerError) + http.Error(w, fmt.Sprintf("Error opening file: %v", err), http.StatusInternalServerError) downloadErrorsTotal.Inc() return } defer file.Close() - // Seek to the start byte - _, err = file.Seek(start, 0) + // Use a pooled buffer for copying + bufPtr := bufferPool.Get().(*[]byte) + defer bufferPool.Put(bufPtr) + buf := *bufPtr + + n, err := io.CopyBuffer(w, file, buf) if err != nil { - http.Error(w, "Internal Server Error", http.StatusInternalServerError) + log.Errorf("Error during download of %s: %v", absFilename, err) downloadErrorsTotal.Inc() return } - // Create a buffer and copy the specified range to the response writer - buffer := make([]byte, 32*1024) // 32KB buffer - remaining := end - start + 1 - startTime := time.Now() - for remaining > 0 { - if int64(len(buffer)) > remaining { - buffer = buffer[:remaining] - } - n, err := file.Read(buffer) - if n > 0 { - if _, writeErr := w.Write(buffer[:n]); writeErr != nil { - log.WithError(writeErr).Error("Failed to write to response") - downloadErrorsTotal.Inc() - return - } - remaining -= int64(n) - } - if err != nil { - if err != io.EOF { - log.WithError(err).Error("Error reading file during resumable download") - http.Error(w, "Internal Server Error", http.StatusInternalServerError) - downloadErrorsTotal.Inc() - } - break - } - } - downloadDuration.Observe(time.Since(startTime).Seconds()) - downloadSizeBytes.Observe(float64(end - start + 1)) + duration := time.Since(startTime) + downloadDuration.Observe(duration.Seconds()) downloadsTotal.Inc() + downloadSizeBytes.Observe(float64(n)) + log.Infof("Successfully downloaded %s (%s) in %s", absFilename, formatBytes(n), duration) } -// Handle chunked uploads with bufio.Writer -func handleChunkedUpload(tempFilename string, r *http.Request) error { - log.WithField("file", tempFilename).Info("Handling chunked upload to temporary file") +// printValidationChecks prints all available validation checks +func printValidationChecks() { + fmt.Println("HMAC File Server Configuration Validation Checks") + fmt.Println("=================================================") + fmt.Println() - // Ensure the directory exists - absDirectory := filepath.Dir(tempFilename) - err := os.MkdirAll(absDirectory, os.ModePerm) - if err != nil { - log.WithError(err).Errorf("Failed to create directory %s for chunked upload", absDirectory) - return fmt.Errorf("failed to create directory %s: %w", absDirectory, err) - } + fmt.Println("๐Ÿ” CORE VALIDATION CHECKS:") + fmt.Println(" โœ“ server.* - Server configuration (ports, paths, protocols)") + fmt.Println(" โœ“ security.* - Security settings (secrets, JWT, authentication)") + fmt.Println(" โœ“ logging.* - Logging configuration (levels, files, rotation)") + fmt.Println(" โœ“ timeouts.* - Timeout settings (read, write, idle)") + fmt.Println(" โœ“ uploads.* - Upload configuration (extensions, chunk size)") + fmt.Println(" โœ“ downloads.* - Download configuration (extensions, chunk size)") + fmt.Println(" โœ“ workers.* - Worker pool configuration (count, queue size)") + fmt.Println(" โœ“ redis.* - Redis configuration (address, credentials)") + fmt.Println(" โœ“ clamav.* - ClamAV antivirus configuration") + fmt.Println(" โœ“ versioning.* - File versioning configuration") + fmt.Println(" โœ“ deduplication.* - File deduplication configuration") + fmt.Println(" โœ“ iso.* - ISO filesystem configuration") + fmt.Println() - targetFile, err := os.OpenFile(tempFilename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - log.WithError(err).Error("Failed to open temporary file for chunked upload") - return err - } - defer targetFile.Close() + fmt.Println("๐Ÿ” SECURITY CHECKS:") + fmt.Println(" โœ“ Secret strength analysis (length, entropy, patterns)") + fmt.Println(" โœ“ Default/example value detection") + fmt.Println(" โœ“ JWT algorithm security recommendations") + fmt.Println(" โœ“ Network binding security (0.0.0.0 warnings)") + fmt.Println(" โœ“ File permission analysis") + fmt.Println(" โœ“ Debug logging security implications") + fmt.Println() - writer := bufio.NewWriterSize(targetFile, int(conf.Uploads.ChunkSize)) - buffer := make([]byte, conf.Uploads.ChunkSize) + fmt.Println("โšก PERFORMANCE CHECKS:") + fmt.Println(" โœ“ Worker count vs CPU cores optimization") + fmt.Println(" โœ“ Queue size vs memory usage analysis") + fmt.Println(" โœ“ Timeout configuration balance") + fmt.Println(" โœ“ Large file handling preparation") + fmt.Println(" โœ“ Memory-intensive configuration detection") + fmt.Println() - totalBytes := int64(0) - for { - n, err := r.Body.Read(buffer) - if n > 0 { - totalBytes += int64(n) - _, writeErr := writer.Write(buffer[:n]) - if writeErr != nil { - log.WithError(writeErr).Error("Failed to write chunk to temporary file") - return writeErr - } - } - if err != nil { - if err == io.EOF { - break // Finished reading the body - } - log.WithError(err).Error("Error reading from request body") - return err - } - } + fmt.Println("๐ŸŒ CONNECTIVITY CHECKS:") + fmt.Println(" โœ“ Redis server connectivity testing") + fmt.Println(" โœ“ ClamAV socket accessibility") + fmt.Println(" โœ“ Network address format validation") + fmt.Println(" โœ“ DNS resolution testing") + fmt.Println() - err = writer.Flush() - if err != nil { - log.WithError(err).Error("Failed to flush buffer to temporary file") - return err - } + fmt.Println("๐Ÿ’พ SYSTEM RESOURCE CHECKS:") + fmt.Println(" โœ“ CPU core availability analysis") + fmt.Println(" โœ“ Memory usage monitoring") + fmt.Println(" โœ“ Disk space validation") + fmt.Println(" โœ“ Directory write permissions") + fmt.Println(" โœ“ Goroutine count analysis") + fmt.Println() - log.WithFields(logrus.Fields{ - "temp_file": tempFilename, - "total_bytes": totalBytes, - }).Info("Chunked upload completed successfully") + fmt.Println("๐Ÿ”„ CROSS-SECTION VALIDATION:") + fmt.Println(" โœ“ Path conflict detection") + fmt.Println(" โœ“ Extension compatibility checks") + fmt.Println(" โœ“ Configuration consistency validation") + fmt.Println() - uploadSizeBytes.Observe(float64(totalBytes)) - return nil -} - -// Get file information with caching -func getFileInfo(absFilename string) (os.FileInfo, error) { - if cachedInfo, found := fileInfoCache.Get(absFilename); found { - if info, ok := cachedInfo.(os.FileInfo); ok { - return info, nil - } - } - - fileInfo, err := os.Stat(absFilename) - if err != nil { - return nil, err - } - - fileInfoCache.Set(absFilename, fileInfo, cache.DefaultExpiration) - return fileInfo, nil -} - -// Monitor network changes -func monitorNetwork(ctx context.Context) { - currentIP := getCurrentIPAddress() // Placeholder for the current IP address - - for { - select { - case <-ctx.Done(): - log.Info("Stopping network monitor.") - return - case <-time.After(10 * time.Second): - newIP := getCurrentIPAddress() - if newIP != currentIP && newIP != "" { - currentIP = newIP - select { - case networkEvents <- NetworkEvent{Type: "IP_CHANGE", Details: currentIP}: - log.WithField("new_ip", currentIP).Info("Queued IP_CHANGE event") - default: - log.Warn("Network event channel is full. Dropping IP_CHANGE event.") - } - } - } - } -} - -// Handle network events -func handleNetworkEvents(ctx context.Context) { - for { - select { - case <-ctx.Done(): - log.Info("Stopping network event handler.") - return - case event, ok := <-networkEvents: - if !ok { - log.Info("Network events channel closed.") - return - } - switch event.Type { - case "IP_CHANGE": - log.WithField("new_ip", event.Details).Info("Network change detected") - // Example: Update Prometheus gauge or trigger alerts - // activeConnections.Set(float64(getActiveConnections())) - } - // Additional event types can be handled here - } - } -} - -// Get current IP address (example) -func getCurrentIPAddress() string { - interfaces, err := net.Interfaces() - if err != nil { - log.WithError(err).Error("Failed to get network interfaces") - return "" - } - - for _, iface := range interfaces { - if iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 { - continue // Skip interfaces that are down or loopback - } - addrs, err := iface.Addrs() - if err != nil { - log.WithError(err).Errorf("Failed to get addresses for interface %s", iface.Name) - continue - } - for _, addr := range addrs { - if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() && ipnet.IP.To4() != nil { - return ipnet.IP.String() - } - } - } - return "" -} - -// setupGracefulShutdown sets up handling for graceful server shutdown -func setupGracefulShutdown(server *http.Server, cancel context.CancelFunc) { - quit := make(chan os.Signal, 1) - signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) - go func() { - sig := <-quit - log.Infof("Received signal %s. Initiating shutdown...", sig) - - // Create a deadline to wait for. - ctxShutdown, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer shutdownCancel() - - // Attempt graceful shutdown - if err := server.Shutdown(ctxShutdown); err != nil { - log.Errorf("Server shutdown failed: %v", err) - } else { - log.Info("Server shutdown gracefully.") - } - - // Signal other goroutines to stop - cancel() - - // Close the upload, scan, and network event channels - close(uploadQueue) - log.Info("Upload queue closed.") - close(scanQueue) - log.Info("Scan queue closed.") - close(networkEvents) - log.Info("Network events channel closed.") - - log.Info("Shutdown process completed. Exiting application.") - os.Exit(0) - }() -} - -// Initialize Redis client -func initRedis() { - if !conf.Redis.RedisEnabled { - log.Info("Redis is disabled in configuration.") - return - } - - redisClient = redis.NewClient(&redis.Options{ - Addr: conf.Redis.RedisAddr, - Password: conf.Redis.RedisPassword, - DB: conf.Redis.RedisDBIndex, - }) - - // Test the Redis connection - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _, err := redisClient.Ping(ctx).Result() - if err != nil { - log.Fatalf("Failed to connect to Redis: %v", err) - } - log.Info("Connected to Redis successfully") - - // Set initial connection status - mu.Lock() - redisConnected = true - mu.Unlock() - - // Start monitoring Redis health - go MonitorRedisHealth(context.Background(), redisClient, parseDuration(conf.Redis.RedisHealthCheckInterval)) -} - -// MonitorRedisHealth periodically checks Redis connectivity and updates redisConnected status. -func MonitorRedisHealth(ctx context.Context, client *redis.Client, checkInterval time.Duration) { - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - log.Info("Stopping Redis health monitor.") - return - case <-ticker.C: - err := client.Ping(ctx).Err() - mu.Lock() - if err != nil { - if redisConnected { - log.Errorf("Redis health check failed: %v", err) - } - redisConnected = false - } else { - if !redisConnected { - log.Info("Redis reconnected successfully") - } - redisConnected = true - log.Debug("Redis health check succeeded.") - } - mu.Unlock() - } - } -} - -// Helper function to parse duration strings -func parseDuration(durationStr string) time.Duration { - duration, err := time.ParseDuration(durationStr) - if err != nil { - log.WithError(err).Warn("Invalid duration format, using default 30s") - return 30 * time.Second - } - return duration -} - -// RunFileCleaner periodically deletes files that exceed the FileTTL duration. -func runFileCleaner(ctx context.Context, storeDir string, ttl time.Duration) { - ticker := time.NewTicker(1 * time.Hour) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - log.Info("Stopping file cleaner.") - return - case <-ticker.C: - now := time.Now() - err := filepath.Walk(storeDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - if now.Sub(info.ModTime()) > ttl { - err := os.Remove(path) - if err != nil { - log.WithError(err).Errorf("Failed to remove expired file: %s", path) - } else { - log.Infof("Removed expired file: %s", path) - } - } - return nil - }) - if err != nil { - log.WithError(err).Error("Error walking store directory for file cleaning") - } - } - } -} - -// DeduplicateFiles scans the store directory and removes duplicate files based on SHA256 hash. -// It retains one copy of each unique file and replaces duplicates with hard links. -func DeduplicateFiles(storeDir string) error { - hashMap := make(map[string]string) // map[hash]filepath - var mu sync.Mutex - var wg sync.WaitGroup - fileChan := make(chan string, 100) - - // Worker to process files - numWorkers := 10 - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for filePath := range fileChan { - hash, err := computeFileHash(filePath) - if err != nil { - logrus.WithError(err).Errorf("Failed to compute hash for %s", filePath) - continue - } - - mu.Lock() - original, exists := hashMap[hash] - if !exists { - hashMap[hash] = filePath - mu.Unlock() - continue - } - mu.Unlock() - - // Duplicate found - err = os.Remove(filePath) - if err != nil { - logrus.WithError(err).Errorf("Failed to remove duplicate file %s", filePath) - continue - } - - // Create hard link to the original file - err = os.Link(original, filePath) - if err != nil { - logrus.WithError(err).Errorf("Failed to create hard link from %s to %s", original, filePath) - continue - } - - logrus.Infof("Removed duplicate %s and linked to %s", filePath, original) - } - }() - } - - // Walk through the store directory - err := filepath.Walk(storeDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.WithError(err).Errorf("Error accessing path %s", path) - return nil - } - if !info.Mode().IsRegular() { - return nil - } - fileChan <- path - return nil - }) - if err != nil { - return fmt.Errorf("error walking the path %s: %w", storeDir, err) - } - - close(fileChan) - wg.Wait() - return nil -} - -// computeFileHash computes the SHA256 hash of the given file. -func computeFileHash(filePath string) (string, error) { - file, err := os.Open(filePath) - if err != nil { - return "", fmt.Errorf("unable to open file %s: %w", filePath, err) - } - defer file.Close() - - hasher := sha256.New() - if _, err := io.Copy(hasher, file); err != nil { - return "", fmt.Errorf("error hashing file %s: %w", filePath, err) - } - - return hex.EncodeToString(hasher.Sum(nil)), nil -} - -// Handle multipart uploads -func handleMultipartUpload(w http.ResponseWriter, r *http.Request, absFilename string) error { - err := r.ParseMultipartForm(32 << 20) // 32MB is the default used by FormFile - if err != nil { - log.WithError(err).Error("Failed to parse multipart form") - http.Error(w, "Failed to parse multipart form", http.StatusBadRequest) - return err - } - - file, handler, err := r.FormFile("file") - if err != nil { - log.WithError(err).Error("Failed to retrieve file from form data") - http.Error(w, "Failed to retrieve file from form data", http.StatusBadRequest) - return err - } - defer file.Close() - - // Validate file extension - if !isExtensionAllowed(handler.Filename) { - log.WithFields(logrus.Fields{ - "filename": handler.Filename, - "extension": filepath.Ext(handler.Filename), - }).Warn("Attempted upload with disallowed file extension") - http.Error(w, "Disallowed file extension. Allowed extensions are: "+strings.Join(conf.Uploads.AllowedExtensions, ", "), http.StatusForbidden) - uploadErrorsTotal.Inc() - return fmt.Errorf("disallowed file extension") - } - - // Create a temporary file - tempFilename := absFilename + ".tmp" - tempFile, err := os.OpenFile(tempFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - log.WithError(err).Error("Failed to create temporary file") - http.Error(w, "Failed to create temporary file", http.StatusInternalServerError) - return err - } - defer tempFile.Close() - - // Copy the uploaded file to the temporary file - _, err = io.Copy(tempFile, file) - if err != nil { - log.WithError(err).Error("Failed to copy uploaded file to temporary file") - http.Error(w, "Failed to copy uploaded file", http.StatusInternalServerError) - return err - } - - // Perform ClamAV scan on the temporary file - if clamClient != nil { - err := scanFileWithClamAV(tempFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "file": tempFilename, - "error": err, - }).Warn("ClamAV detected a virus or scan failed") - os.Remove(tempFilename) - uploadErrorsTotal.Inc() - return err - } - } - - // Handle file versioning if enabled - if conf.Versioning.EnableVersioning { - existing, _ := fileExists(absFilename) - if existing { - err := versionFile(absFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "file": absFilename, - "error": err, - }).Error("Error versioning file") - os.Remove(tempFilename) - return err - } - } - } - - // Move the temporary file to the final destination - err = os.Rename(tempFilename, absFilename) - if err != nil { - log.WithFields(logrus.Fields{ - "temp_file": tempFilename, - "final_file": absFilename, - "error": err, - }).Error("Failed to move file to final destination") - os.Remove(tempFilename) - return err - } - - log.WithFields(logrus.Fields{ - "file": absFilename, - }).Info("File uploaded and scanned successfully") - - uploadsTotal.Inc() - return nil -} - -// sanitizeFilePath ensures that the file path is within the designated storage directory -func sanitizeFilePath(baseDir, filePath string) (string, error) { - // Resolve the absolute path - absBaseDir, err := filepath.Abs(baseDir) - if err != nil { - return "", fmt.Errorf("failed to resolve base directory: %w", err) - } - - absFilePath, err := filepath.Abs(filepath.Join(absBaseDir, filePath)) - if err != nil { - return "", fmt.Errorf("failed to resolve file path: %w", err) - } - - // Check if the resolved file path is within the base directory - if !strings.HasPrefix(absFilePath, absBaseDir) { - return "", fmt.Errorf("invalid file path: %s", filePath) - } - - return absFilePath, nil -} - -// checkStorageSpace ensures that there is enough free space in the storage path -func checkStorageSpace(storagePath string, minFreeBytes int64) error { - var stat syscall.Statfs_t - err := syscall.Statfs(storagePath, &stat) - if err != nil { - return fmt.Errorf("failed to get filesystem stats: %w", err) - } - - // Calculate available bytes - availableBytes := stat.Bavail * uint64(stat.Bsize) - if int64(availableBytes) < minFreeBytes { - return fmt.Errorf("not enough free space: %d bytes available, %d bytes required", availableBytes, minFreeBytes) - } - - return nil -} - -// Function to compute SHA256 checksum of a file -func computeSHA256(filePath string) (string, error) { - file, err := os.Open(filePath) - if err != nil { - return "", fmt.Errorf("failed to open file for checksum: %w", err) - } - defer file.Close() - - hasher := sha256.New() - if _, err := io.Copy(hasher, file); err != nil { - return "", fmt.Errorf("failed to compute checksum: %w", err) - } - - return hex.EncodeToString(hasher.Sum(nil)), nil -} - -// handleDeduplication handles file deduplication using SHA256 checksum and hard links -func handleDeduplication(ctx context.Context, absFilename string) error { - // Compute checksum of the uploaded file - checksum, err := computeSHA256(absFilename) - if err != nil { - log.Errorf("Failed to compute SHA256 for %s: %v", absFilename, err) - return fmt.Errorf("checksum computation failed: %w", err) - } - log.Debugf("Computed checksum for %s: %s", absFilename, checksum) - - // Check Redis for existing checksum - existingPath, err := redisClient.Get(ctx, checksum).Result() - if err != nil && err != redis.Nil { - log.Errorf("Redis error while fetching checksum %s: %v", checksum, err) - return fmt.Errorf("redis error: %w", err) - } - - if err != redis.Nil { - // Duplicate found, create hard link - log.Infof("Duplicate detected: %s already exists at %s", absFilename, existingPath) - err = os.Link(existingPath, absFilename) - if err != nil { - log.Errorf("Failed to create hard link from %s to %s: %v", existingPath, absFilename, err) - return fmt.Errorf("failed to create hard link: %w", err) - } - log.Infof("Created hard link from %s to %s", existingPath, absFilename) - return nil - } - - // No duplicate found, store checksum in Redis - err = redisClient.Set(ctx, checksum, absFilename, 0).Err() - if err != nil { - log.Errorf("Failed to store checksum %s in Redis: %v", checksum, err) - return fmt.Errorf("failed to store checksum in Redis: %w", err) - } - - log.Infof("Stored new file checksum in Redis: %s -> %s", checksum, absFilename) - return nil + fmt.Println("๐Ÿ“‹ USAGE EXAMPLES:") + fmt.Println(" hmac-file-server --validate-config # Full validation") + fmt.Println(" hmac-file-server --check-security # Security checks only") + fmt.Println(" hmac-file-server --check-performance # Performance checks only") + fmt.Println(" hmac-file-server --check-connectivity # Network checks only") + fmt.Println(" hmac-file-server --validate-quiet # Errors only") + fmt.Println(" hmac-file-server --validate-verbose # Detailed output") + fmt.Println(" hmac-file-server --check-fixable # Auto-fixable issues") + fmt.Println() } diff --git a/config-example-xmpp.toml b/config-example-xmpp.toml new file mode 100644 index 0000000..e69de29 diff --git a/dashboard/dashboard.json b/dashboard/dashboard.json index b17d0fc..43695e8 100644 --- a/dashboard/dashboard.json +++ b/dashboard/dashboard.json @@ -27,8 +27,8 @@ "overrides": [] }, "gridPos": { - "h": 6, - "w": 24, + "h": 7, + "w": 3, "x": 0, "y": 0 }, @@ -39,12 +39,11 @@ "showLineNumbers": false, "showMiniMap": false }, - "content": "
\n

HMAC Dashboard

\n \"HMAC\n

\n This dashboard monitors key metrics for the \n HMAC File Server.\n

\n
\n", + "content": "
\n

HMAC Dashboard

\n \"HMAC\n

\n This dashboard monitors key metrics for the \n HMAC File Server.\n

\n
\n", "mode": "html" }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "title": "HMAC Dashboard", - "transparent": true, "type": "text" }, { @@ -76,10 +75,10 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 7, "w": 6, - "x": 0, - "y": 6 + "x": 3, + "y": 0 }, "id": 14, "options": { @@ -106,7 +105,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", @@ -142,10 +141,10 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 7, "w": 6, - "x": 6, - "y": 6 + "x": 9, + "y": 0 }, "id": 18, "options": { @@ -165,7 +164,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", @@ -192,6 +191,10 @@ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] } @@ -199,10 +202,68 @@ "overrides": [] }, "gridPos": { - "h": 5, - "w": 6, - "x": 12, - "y": 6 + "h": 7, + "w": 5, + "x": 15, + "y": 0 + }, + "id": 10, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "value", + "wideLayout": true + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "go_threads", + "format": "table", + "legendFormat": "{{hmac-file-server}}", + "range": true, + "refId": "A" + } + ], + "title": "HMAC GO Threads", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 20, + "y": 0 }, "id": 17, "options": { @@ -222,7 +283,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", @@ -262,10 +323,10 @@ "overrides": [] }, "gridPos": { - "h": 5, - "w": 3, - "x": 18, - "y": 6 + "h": 7, + "w": 5, + "x": 0, + "y": 7 }, "id": 11, "options": { @@ -285,11 +346,11 @@ "textMode": "value", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", - "expr": "hmac_file_server_uploads_total", + "expr": "increase(hmac_file_server_uploads_total[1h])", "format": "table", "legendFormat": "Uploads", "range": true, @@ -325,10 +386,10 @@ "overrides": [] }, "gridPos": { - "h": 5, - "w": 3, - "x": 21, - "y": 6 + "h": 7, + "w": 5, + "x": 5, + "y": 7 }, "id": 12, "options": { @@ -348,7 +409,7 @@ "textMode": "value", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", @@ -390,10 +451,10 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 7, "w": 3, - "x": 0, - "y": 11 + "x": 10, + "y": 7 }, "id": 15, "options": { @@ -413,7 +474,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -457,201 +518,12 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 7, "w": 3, - "x": 3, - "y": 11 + "x": 13, + "y": 7 }, - "id": 10, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "value", - "wideLayout": true - }, - "pluginVersion": "11.3.1", - "targets": [ - { - "editorMode": "code", - "expr": "go_threads", - "format": "table", - "legendFormat": "{{hmac-file-server}}", - "range": true, - "refId": "A" - } - ], - "title": "HMAC GO Threads", - "type": "stat" - }, - { - "datasource": { - "default": true, - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 6, - "y": 11 - }, - "id": 21, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "value", - "wideLayout": true - }, - "pluginVersion": "11.3.1", - "targets": [ - { - "editorMode": "code", - "expr": "hmac_file_deletions_total", - "format": "table", - "legendFormat": "{{hmac-file-server}}", - "range": true, - "refId": "A" - } - ], - "title": "HMAC FileTTL Deletion(s)", - "type": "stat" - }, - { - "datasource": { - "default": true, - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 9, - "y": 11 - }, - "id": 20, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "value", - "wideLayout": true - }, - "pluginVersion": "11.3.1", - "targets": [ - { - "editorMode": "code", - "expr": "hmac_cache_misses_total", - "format": "table", - "legendFormat": "{{hmac-file-server}}", - "range": true, - "refId": "A" - } - ], - "title": "HMAC Cache Misses", - "type": "stat" - }, - { - "datasource": { - "default": true, - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 12, - "y": 11 - }, - "id": 16, + "id": 13, "options": { "colorMode": "value", "graphMode": "area", @@ -669,81 +541,17 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", - "exemplar": false, - "expr": "hmac_active_connections_total", - "format": "table", - "instant": false, - "legendFormat": "__auto", + "expr": "hmac_file_server_download_errors_total", + "legendFormat": "Download Errors", "range": true, "refId": "A" } ], - "title": "HMAC Active Connections", - "type": "stat" - }, - { - "datasource": { - "default": true, - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 15, - "y": 11 - }, - "id": 19, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.3.1", - "targets": [ - { - "editorMode": "code", - "expr": "hmac_infected_files_total", - "format": "table", - "legendFormat": "__auto", - "range": true, - "refId": "A" - } - ], - "title": "HMAC infected file(s)", + "title": "HMAC Download Errors", "type": "stat" }, { @@ -771,10 +579,10 @@ "overrides": [] }, "gridPos": { - "h": 5, + "h": 7, "w": 3, - "x": 18, - "y": 11 + "x": 16, + "y": 7 }, "id": 2, "options": { @@ -794,7 +602,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", @@ -832,12 +640,73 @@ "overrides": [] }, "gridPos": { - "h": 5, - "w": 3, - "x": 21, - "y": 11 + "h": 7, + "w": 5, + "x": 19, + "y": 7 }, - "id": 13, + "id": 21, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "value", + "wideLayout": true + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "hm", + "format": "table", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "HMAC FileTTL Deletion(s)", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 0, + "y": 14 + }, + "id": 19, "options": { "colorMode": "value", "graphMode": "area", @@ -855,18 +724,254 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.3.1", + "pluginVersion": "11.4.0", "targets": [ { "editorMode": "code", - "expr": "hmac_file_server_download_errors_total", - "legendFormat": "Download Errors", + "expr": "hmac_infected_files_total", + "format": "table", + "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "HMAC Download Errors", + "title": "HMAC infected file(s)", "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "files" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 3, + "y": 14 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "editorMode": "code", + "exemplar": false, + "expr": "increase(hmac_file_server_clamav_scans_total[24h])", + "format": "time_series", + "instant": true, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "HMAC ClamAV San (24h)", + "type": "histogram" + }, + { + "datasource": { + "default": true, + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "files" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 10, + "y": 14 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "editorMode": "code", + "exemplar": false, + "expr": "increase(hmac_file_server_clamav_errors_total[24h])", + "format": "time_series", + "instant": true, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "HMAC ClamAV SanError(s) (24h)", + "type": "histogram" + }, + { + "datasource": { + "default": true, + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 17, + "y": 14 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.95, sum(rate(hmac_file_server_request_duration_seconds_bucket[5m])) by (le))", + "format": "time_series", + "instant": true, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "HMAC Request Duration", + "type": "histogram" } ], "preload": false, @@ -876,13 +981,13 @@ "list": [] }, "time": { - "from": "now-5m", + "from": "now-24h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "HMAC File Server Metrics", "uid": "de0ye5t0hzq4ge", - "version": 129, + "version": 153, "weekStart": "" } diff --git a/dashboard/hmac_icon.png b/dashboard/hmac_icon.png new file mode 100644 index 0000000000000000000000000000000000000000..9d5843c22d8eb0acc89df172d9895a865fbb5006 GIT binary patch literal 61498 zcmXt9bx>5_+ux%?|90LRbVJj)hL4g1F|Gm*rfWNUv2I3$PIMPm5R#QnhK3g=Zh7L#4{`6wZjP6rb>I z&A|1{vie|cqpUQdlol;GR7g~arA0CBMFoT1LQLpnpfw=5)hD!5-@1fM6;uKx%hrYr zZtF%(u-qTfgX4iWkv7aw!|b?`nVNr2c3-ntt3)KxkcJQn!---jqrs|O$wh9 zD(LG&V#ni{uv%!(U(Z@+t_o#R!Y@aDrH+meD#S%WNQR9^{zNN_sZqYyPP}*$&U`gy zm^W>oUw=AeO3Gr(I&Ab)IG%{O8Yxxela>PV2ff&-l2i+N*#{-um)K2`p*Lxz2=9`R z|9zEle_RVWYc;GD;@@YK|0Fe@;1u!^Txk4ty6X7Ua?v>-6lSxge0b}iW(H5YdZ&;; z1x8%#>hn@ay9phpGP_mE#2QLP;Ct$$KMQ`hnvNRn9Ja#W*a(+UrwL{A%IE;&;DntM zSXTzLi7qWKcG@gFJ%O~&-Lq-GR-^!r1;s)|K@P+TVh6F0XT>`LchFrG^*ul!j@SRa z!M-p_Z{Q}Xr;?gH>K-OO8a>zt?^OwKi_BAA&r{aL$;sN;6C~?yZSHApMeAedX-li1 zq^7AGhC>1Z(SnrZq_uq)j@tcFC})EhZ=Y6%%UWCSPx^zdq9_t6Xwe85qz#I!hCU^q z`7=WMlZ;~oEso-{u}=+ZbQh}Ghijlm^C?H?)fcvG_BL;ZFX;UGiCO+`Acy~!4;NAd zA^jD)mpB|7xe0zypw7ttu#7$V`Hg;=SvAIo>5S}*yY|zEW|1kws`>aw;mA z0!|B&PKyoZhYjU=?e?z>3=GV&pswrFWKoD;r}Y=x9Y(wRnu8v2mf8bI`Q~AR9s}mK&B!MHEW0)d3Oj`tdTd0Z+(_yxvFN0MVLFxh1^vp17UZP<fCCBV#sy3zE4v`tT3>P^^d$^g1fMQ<)WX#kPt@n=_l%Qab&1QXnt8U7 z$bB9wRla^$F{b~`asyldjV?vWCPk5fi<=uf;3oFtO$u6A>-x{#UyL@<=A?>@@43`? zZ;}5;*xsuE=nFz-}ycUEPqpdFHdg-(Ax}* zA3uK>@#7;lKRjviuZL8+Q))Z#kBp6N^e0mIUQg?~t@j|SYN&O)<@tm?N4Tpn5Rg(+ z_YV%DHJr{ReJItT%N?Rm=AvNFI#3UDp26Jqklw3*4=V_xRel!5B`BybXR#2 zlsSVTgSK_)NBWNHpW6gKqVp2&TE~yV^q#9aRzu_Yazyme6}NvR`S>Gc5dEZ*D--_}8Kk2PlZmlwZLaD2-r^{Ct%uJ&rz6mM%`&SEi zGG+RW#5(NIO8S1^6(6CbaBD+6xxJGUX<+x8ld32?ua4#AN-Cf^zK3-)uIW8JJqX0aB&l4ZX6p&L zjBcy5#&T~Vy6fS3i_o}S}}6XL5q(O z(%euIZn+3-n1u_!x`rKNvjpbQ$75y~m#}c0{WP+ht}Y386I~mzG=_YcA4b$HGm**b zotDir6V52mksKvNu#oest5Kq?mH|%v+Yn98YgyV9{t5V&{cfe|X*ZYG~X{uOh9Ekmt`%vwFY@e=Tjq6K~ z#D~EFff-fSeIxR3X-!pz(PGa>l7$(R$M5fxv2k$Fe{p#O^$jJK7>q?|Yf1iSX1NtL zGKeVDTu*S`{!jJc553pg|Z6rky>$Y4eH~wv&oj4+z@xiz6|V7{p98qf8v2vY2lq<1ri?%8%vn(tw zE`A!a%1y7ySFFR=5)JjH=Q&($Y~`K#8_?2i0%Jg(U^I9vBYq)mCc6`_Esl3s37w!arn#sL7%;j`Q)^*z@iAm5aoALbKz0urL zgr|Vx2D>T7j~_pp?T+Q|ot`QHuPz*HHuWxwO4GiDdWVVC)#U8#>>q9ip<0fZGRmHH zK1c+gesS!w5BF?r(3jM|jp}vSn)1&7fD8hMgKEv9wg1pTiH(hoOR;;G*dq6sih}9M zX^VoP&R#H)G#hSAE<(hf04H?^6SEzH(4v`=U;LS?{)goe+wOjeGYc~Y!XvJA5FFmc zoO%M8EE%--DO^e^L^7lh#RHnXmr^-;J2Yv1`qbA&l=#+Bc*Y=t6^pN6+ta_94>{Q` z;HLf&-TIl?42U2`-$w4XdocqHOczNX4vmI~u7ZM(!C&g5X*W~_5_HD5;s=&hU<5lP z#Af6uAGwAf;zQ9496Y^zHvjyGG4lI&~9zHAo+)q-O%uI@q7AW zB%Ubvh`uPXKn#@^v+Jo%MZjL0Wp`(9Z?Av9ct5aOk1{tqzd2z0!rIc`{y?juw$Y)t zHl)4wFzZq_a-H(pwL=2g>^g*xxI=dA30oh{+-bG?vQnqgWI1ls6!2wS}jJIxWjj~AlEU%v(b(z_0J8g#EHDyOuxG*?PgR2=HA zt>51rJYr<5!lo1*s-Sgs4M>v{YV2N|6_Db{s6OP27lH2sL7~l#$KYwLB+aDP{s9ls z1aZIVShlt3MMg%HDi&9wZ+r*b@=7x~O+^GG1w%Y6+&4x^3Id+BU>|w{zGOg-7WU%n zC`6f7SG-Ph>L1#-qGo@c1pU;T;HuIv3OX$zfGG(diWN&>5xu zU0u%{5D_Dc^YincdJUt;w`-a?Sga-Stz!q>x-}MRUpw<$)+RxiNOpELUMa&Ydidr| z87$%pr8gS4-1CO45QuNmJkmG6YIA(kDV?^wGUsf+nVX%Bu_^FFX*R+EK9+h8-#w%0 zrM8UxWT`m zY-^)}tG`ht&cLc4HA0c(d3(h`tmRU)TkgW7KfqJ2mrKTQZ-B-C%{>-L8p(w3;@F`( z6yz{8QUld5s)xOqot*Ul$Mw;YVULuwow^FUyne!9Rr^nK@OqB}Diae^rAdpY+eSHs zU~pvi8;2QX;jJbTPL^${fN3XX7WXt4KD+WP{fK(TaS3bA%S5~_L?jl`;k;+p>i9Df z6?oc+@mE4ZB{Vukv~C%I%IPbkpN>JuQ=+X{z7xsV67-0;ZwRZVW24~r?0ecuePWdR2LLhQ=PjXib(@XSN)LWFjz04kUUt>P zuJx$hikTf8O}=Jm#&uLIudicMxw^V?RAfxMQDLD0yUtmf7uO5a%`{>}^-%y#_cUPt zUvX)p%j&b%jDh{kf*U0y32^|iev{dDI|bLd{8jbjL|LIeIA@uGEVH{^v4Q5r#4I zBD_HW{wmAGVBVxqwceoo!9-?EXK-PSPPHAP^#h<(DO5-Nir;N)=v7&64yQ9;hfH1g9wygYB-Q@R(Nln3^jKc1+ zxbUj?w@E3z+|NycZ0s8YXdI}kM}^U|kGqth)nTx!lUvHK*;&+&oJ*5NcBGC?*ZYo5 zor0KjgDI22rw=PW_KP#+?xQ*UPBMQI<*YC>k4r}$eUc!iUvhH%-#HkkLEf(Cfa3!{ z`_zAg_d1A+i?hLyKtd)35g$ssYs&oFgGkR-{7h@1*cGOoCY?GK!PGFKA3uJ$?u?MN zT}`U^ZYBy=H8tIGOuQ624{2lM3cB=nN1(dxO-PsNw=)tfVA$yCe~vP4^2xI0{SBsn<}eOfGHQ1mzOi;j!H1X9zmhT1`gdL=;has z8yg$b9=l`P&llq_ftlaZi_?1>tkawptwaSV7a6zbG?k@U<&{}UMkJ~bPg`(c>+Xbj zf)ej)`f`y!=H@vk@%kg}UC;VP-8>Dncno`;q~hT*to5%%4PQ{NwIk1jAfXrHQVN|N zB~ghs98;4uvqa6klMxD6M5l{4IILW3coHN`OXE~wahgrgNFr&R#j^dft$)b8){c;w zV=XPL9waH-$0rvV_8e^Fd(2y-^uO_>WvD$1;QvQGj3cdN@oJ?t&8{D&IiLPq>lqi^gDdhUrd~ z*JI<`NQRtyvN0Sd7Y@UlYY2K+r55wF9Y0xa9f09Q$e|;QR1GGw~LEY{^DPJR}TrZUcA1T#nZYivD!_d@;&b(G3Mnc_^^xrCs)vU z?QR!<{+Xuz`Jz5wJ7PR`_5;Yafb-)&!k#3425P{7Gy4cN< zhbDEGykj4+oFW6kAhgyiR73q1-gwGSLf!O+H}|u3TvAff^t>ZnA$IQzc<<`SN$YI} zN4{u)z_m>XJi{qzGotm~XY1Yuwi+smKMg}3U&HDR5a#H9X%bezplinHXE*fgnVUX#7ez*WWhGP`N?-V3>g#1d`GcTU zM@LpgI#fzpijIZkW@Y(~8KOB}fHl60e`%kg?tKWY)G7U?%?f~o>saP+0)5Fz_q8st z*yRW>_A9qD;>6-1r8eIKqJjXpew*KB|Bd6wQvku94B8bOzCIYFaMo3Asn;1S{;61- zLE^wvAJ8^PDJU+L`Cko|`oT#MQOp$Mj5v0$17&IdM&U_BJWLw#)Y>gJ{9RlO;7V0| z#025`dHcw)cwJ`bj_r2d{S(wkOiBtKbaQO5=hv!bBUx%-8qEmT!QtcWKFlC4$BsK^ ziTdJkgye>HGzwb%h4Kr1!cU*ch_sH#(!<{DX3yiLF^S!{y1_m6bMM!`YHQK(N7o2v4kj_#fSy9T0Si`I@19!S6-Hipo93b{f@kGD;{SYey#VRh$%Vs z|CDn_^7i)k{p83mY|!hB9oXwaAVoT*sUJ#Ht)~YY*ylcT z4pZ5VBF$jK3e6q0D37qaOqJsYq|_+=saWSOY}4n}x><|5e!N?Yv3JNJC)J8}fs0XM z$5!A;dnKXz_oBAj-D4$N@e~vOP@)iUejJXW3BFw$98}aotA2-Oj<2QW@YLtt{K({S zd1?8>P9%H~2Sj6W+tDEbKfZ~ILKae<&iG1`blLKB<>_Myh`N}Umx|)z(g_#m6oKl2XT66z7`h%j0-&!|~wI zIV{thba3|)5Y#L!Ex}#wMO7kQ^#a&-Rw5!IE1a-LVW`RB{K{ozp5zzb`~9C9?RPu5 zl8%PNvp|)UPMaC>*Er^;Z%PT{WGxGMH`yUYO{f%FVg1f))=F#EiYLS?d4=9debaHt zlhnE$wlcUvpT5LoLF;nzwRAI+ym2IU=&rw?8m;CA&rW_j+ni%hyelA)bFNc`jKMG5qM3YYmZN1vRkQ=v|>^?5AiO@*33JZ2~Bu zM>#Jwg}I17@fCzT`>}Db0jkOEC>PezN;^ezo>16wt9P?JHYA!SV<}BiR50N~P^sCX zKwzb_P$vKwW&)}0#&adOxupy8ZYoR!`F_+3MBx>moSfjI(>-JJ`q-8e8XSt+sDJsv zRN!k-3sL@8@}XqfUc0pU=va?fxMDyI zL<~-T;OfH))55|+13FzDUXS_CPy70Xkeq^o;HfjRmHPEpGI!TO?d@ghJxh;bl#9_R zJpVkptZ(G#*P|V&P}_ro+y*d8GBBK1I}-l`<{>veAu+|p#XeyttEyAKfdl${@O82o zfV^!J>d`GgvlS+?rzO;72ga||LZ!q7m$|S204b|X2LF!98O$ioWEh@J2yClUeyq(^ zZjUcBBE{(~7aK!q_rs>o7ZL*v#el?QU#_fEk22hV0G|maHdvGT#_TZQpM@_^rK#0Q zf!%S~La9KQa`-OAgY@hMPtXP99RyTiUn_ZWK`vPQa|ii~ z^=bj`kpFiG%a`qH%iS@ikfVX%k3fyW1Lgy9cth&7IbL?}nwuA?*Qk8gg`1AX+S}WY zL=Sg%ch_N0@02Z`{xq}dR+v5AU*pr>C9$~98w0czn#tn2@8aTWf~!&snR3)YoP`qovBm@@M=8slwu1; zD`Z-DIV1RE%wNf%d4pYOQx)CcW^7sU^c8DkPzX5Q4u5&h{-WgL=}CD(jHW<1ogYI2K%$F#^C+=B}<)(qVH+5$ES_c_LmI zArFZzYcoF0Ti2}^SE?e3mbjF?|5-oXg>+UtcE1e~jXKZb8>U{##0HqekbHa6DnDe^Q7!eNCG0NaL1qrm9)BD14zwSgTVHd@zKEl&Cz zT^aO(ama~fRQo33(US@?uLlo5i4|1Tq#sk+xGrxA>#zx$qF9>OE(93&cd;9Hz0aEN z;~7~ws-b6dX%h55Iw(8QzgU;2?cnm=eU}o76}1Z~Ci)3s$ybJPwOJ3ZKK|2q(sl?4 zc;KLmB#=q3pXn6m7QTA}`XFtUil;soY{NNR0V%gI4_?7;_B*N&|1b+!VS9$(n%p)~ z0Al}2*dE5u$i(P+0LwfZ{aMfhIA|efp#{U+XI&Av8{MchzbDpcA^V;qm#R&HcRVMp zrxI*z9Pe`A4l|lbG8~z*0=a$v{t-nTsJ4C56iA1}d5SDA%*?<&mVxwBKr%>lEvhf3o%?FlNQznfm4Cul#zj*UWSw*W~49(wxU0##{k$e9zpjz)JBn`g6AaN_Ju*@(%Bm}1fy z<{GnRB#^LM5JD~LbMi5AUC%*PRdwxZf9iIpu_KB#lQTruKyCOGW$^M&EQv-kX>%ai z($;p+EAMWY2fxConPBc1AVXRy0sMnh`S}2Y%E~EVq(kzH+y9iUHX~A)u6zTBcSvRq zKc&vxOTg0e05-CoDS7`l6{t*jy2=}UX5wGkZ{w9=Y-I+Clmm?A{LM+7U7Ls3;v~+a z&3M;3Lq*RWv1+Dv-Xuhn7JFl%55gjo(P{HW&Zu6f$tD?K-Itwa?-@;P8sv~7#$ZE zhdnOwplVtUHKrmZ)kB(aA?<&-EHBT^zO#TqieqgB_1|VF1K;xB^fYq0_A)*tkplR$ zljR}&*!TL;&(qT@DzEC~Yf1y$`}>q#dVJq$e8dS2)t-}+lTh$u)9{w8taDoIgi`S3 zz1Nb@7FsMC%v~6we2{#$zqi-5bi|f1Y)#;-Y1CD|uP&67Hquz{bZ~U^NJW2d2h%=$ ze0(hDd>s1HcK#D+!MGp(SZlEn$rtN=S8k>UmMr}kVd=EXpt2ih+df80oqF(T9GOWR z#TS1Br}xvE=NAWD^>u~hW~JWY10$^*c2sARcS;_LJqlj}%G9Xm5Z~RL6r|&ZaJU-y zn(dxBaHl97bje~$51!-uCHA<%2e*jjqGcgSXMfl!A@RWD8E{i%O$4}~xA^tH_sh%1 z#>0{WfU<1NOq8G-bjR2hP5FF1H~$xGM+&$w*G-*`oYVHNdJP-@b4mgC zIpNooBVz;{Sg`jj>BH8l{>#oUN(ew-r^1!ErTwqay1To-MlVTnZ*Fb|zHi+8|15xz z>z?Ms0?W~dS%cU4>fh??rz_b!q)&d7Ai!hvRs2<%5S(vvV4V-wLg$a+Cek2OXe zaflc3z*a%7Psv(!D_$Sv(6fMsVMy^j+VO}5Hh6EVM1Q&pK4Y;8loP+WR`;&Xz~%PF z0k?-uTPCBa<;F#DP_|e>Cl?UgHo4n2Q)b9XI8Ba30#2410Z~OFO&JE5ze)8CbxTaJ zK$zZJc9bA_8MDf1`+qAdNe1OQoIwCd-{$c?nF|}v{XBzNO~+Arc5lBK-~D0}}{jqdRp>o)eya4O3CQN5-5_US)tW8D2a0j@n!1gn9c zu|aCpQYL3E(Tw<{xF;T`t6QhsQ7e_J*?3~_tU_#IC8eQFb(OhpM3&cQukkHv=O(Ib ze;@o&a!^a6c>88sXp31s17f!qMN+V;_cmh{5ML_@{<_kq-o_cJVCu4Fs2H%k`xP7x zs?aX|w=#pW=+*|#@v0)hK(1?ODC+E_`S$Ib^8@Rt&}1=DOG^u2uxqkqgwMObFodrB z{TtQ`2t>c*2uaB)Jyld?1^k*J-SFc@ZUF&eeY&UJa`tk7R4^#sZhzw$*(lX1zc>5%nyK;NiR%FR+yFu~E` zvo-X#3;o(RhpqCf^hloJMfWQuTwN5OIps(tf=zL@%)N1OwT(V4_f|&DQJZ(gylIC! z`hHo?__5gNaw7(mdw#&p@xwcj{1vuHp{!9T4L8=NXR)5q#|NX5X$-*p1fk_-`=X8S$Y!x}cG*pb_)L`%eV}LN7}NOLYC- zjugGUy-gmw>faqFIGJtj?79IvH+5jc^}Vhd%(DarvsP137zjZ*1LO&A3GU~_VPd7jwrjNi?gY}JsOBCsbvemwe6+F+X?PVCR#J^2LS^akgE2qcr2tT8}81^^);D6JE6vol;lI7{FFQI(pVl z0Mvc8T!H`cACsrrDv?(7VSp)#MhD1avBECY%QtT(01K?zsV~t7hj2DGx ziHnWBK2vV^cfqWQr5cdXftW*uwvVMK05i=Oy-O-9PtUdAUJI9Bzk~f7(e1d%h~tzv z|J@51ZA3ul;>i4EP&|B9W%;|Yd3j-T2lR8WX)|k?N++$_igXnv*?+t}pv%LYIf};5 z?`bsAY?qOaAN~Z529qvh)uNVo-F~0DxvYfF9Eme83ZgZy--gR(3mfMtRx5t`fy9Tz zBBW{7=#n@rDF=r`9oRpX;hM4uZHmxXr^Sj0 zalZ46Y@iQU`&~i>!-*DR03vi!K+P2zb>}z2$rd=!^-P864*)A5kFs)EaAUUE;8d~6 zp66c7?|n2ECb$~NYfG?RS@**ZNOA!D44m))S?2oqd8K0|gfsFM8Kw=iHu!XM@Yd^A zK+0?=0WwQ43oYQ3G5_GO#T?T!&*9~ofcJg?ObtDwwSpYkKyasF&@uv!wqu zc;UTv?%`Crl{|W8wNfuQZ4)u=z|Hfv{IZ9adE+at?kSQ$%Pa;IoyjC?o~0oNqS_-^ zzbf1H!}S94adh*JJlOC^YAk?le8lr2isU{6(ujhBA|=KBD?1Xok&T{F|Sra155&5qWs8%s*e15A)L0ee47YH}79q&=oU)~59* zhV;ec7|zJM+P;Tlm%Nyx7P@828(Lic2D8c)O$zvj3?Oe|%T5 ziXAiCQGkB49qgO+<;%c-(luan+}mBg``~kP)#Xh&qN>rV|8@0HN>Gi^u+h0MfF^`8 z;N83Mw>&Rmt3FrEV|fyr^~-TMYG>I-O??{(p@rFm4c2c0`~#@f-n@U2cWCc-%R85R z-W+cS6t@nyGEKHvi$dl<+n=iJE84f9$#-njnFr>f16?cs{t|a|P1FTO&`(1}cKk8o zbm8C=zJ-n9YPdVLWuT}LLGKahjlYr)P5Y<02M?dL<=#a_Qho0Le$+J_ZWh0Fo=@-G zxtje??NzKWb?l6I-k@25xX@D8Q)E(5JJ}|R_4}nPNogs4Vq-Sw&u`GOC}3jzmT;lL z*E|wCxUmiC?(@s1B$Y%N@qL8=^zU%E85}gV=FO&~`<-gUG3%l~qcq?~gx>v3Z)r%J zm0bjl4oKAan7lbxZP`TjCx}F*Y326B8IVT+cmJCaN60CHv2t8kw{+W$_-Xg^(?diw z?lWhbpNAxeX$MldR}fAEQ$AL2=%&zmG+y+Kh?3lVM@NTg3gkH+@>Wz((C7La&D|gz zSaheiL>M*NS7HY`gT%OE+eOrj38DIBDSgBr@}=R#=Zw(^K#ag>*RI^hvK6{fQC38kN=<-U)j+sz?ua zm)0=rSNW;#WK(_$$HgL*iDd`&rduzU$=l4~OCse8FuovS>kPS%^J@RvnwerhA_Ec- zg%S}J)vD0u+D@E$E(z69%+E|uU)zBsE7Cs+0x1L+$?M!szapczTGg5A&(2D=bT8&;z6&KDiV7a2MrLcG8QFUPLy>^<%0!^AR_-7N8QdZyfVrElG zR(Gpfd2=gO5`G)lk~YAR2W=}HXB@uqQE6I=s_NUD`&X<(Bhm|tLe|4UsJq(+EJWx-M}(HfJL|F}*D;IKOi zLFYCu9v=EgyhGKgM^};jj=H8M-n^=Si3H*;|LomfeR@>xp?!MrNU zU4sBog1oAldJB6=XJ%-J|FylC`vy`HwfMmgbPRo;wrUhK{FE43+Y+3MuKmxW(1(SE z#TQmy0LvCv1|2)_nm%rjBm=GMEdXVWI8r~91Ktg@*RcJ~-+P}KJ3pb`JCr|S^^Bu*4V#9MZ?$p`VM*%%7 zCzm%EwQ*~Bg9zR97VL@qd-F3<1?{F?zoR&8OOeYC@4T?3NK}Ts?Oh|xvyVdkiA{P3 zEO12GY+5-9qcOvRg`v0u%gbTpC+T@n&CR2$_cM(mxKtvZ!r7{Dvgq>yM-D+Ce$+7-H|BLM-c78AM#9v`%p}OA(^U-e6 zCXcs>#!5}Xw;dGn|Ju)3{Z@4&wKqq{1<;0{6fyT>?WWOumt@1$gm$PqKt-?myu5uL z&K>CK+s+;@0k!-10VBUPR*^ERVrS=1+GbUS<& zyB94w08pt~EQ?+Wf;L}PqyRuCTBBtf#sKXsN*4UW zrxi3^)>5x2tv%Fyo8d3xu-s8qrTKhhBjK^L(U*`6Bp2oXHV!P)r`vYa3Ud4k?nF%% zk#`uO08@WD;trrpME^=>lY3FniKsv^KrJSkhm~dy=lLm+__zCIvo!l!L0$26^H}Cy z$2YcgahiQ4!o;OkshiLAbF_M(Bh7#q&6^wq#?_{ewVjRlV-`3IMgG~2h`;QtzfSSI z5gz5edwOyK`NaP1rE3-{o#LSr;xqTLr{C{2^9i_gXq?3F8X_^JJlH=DY6eO{0gZd| z)38&?jQzMBES;$=ONM|Q2VPOZVFcvP0M6L`0HqHLlt>2P*LfWJ$oOc`O=y84gX`KF z7=W#~g&~(a(b6%xmBzh*8>j^@nJ^;TRDXSLlZ~HhHu_^zXb4cwH{;|sa5EtDcXxM! zlF!x2P6atR3lKe{beP58`T_Auf4P!}IL=^`7TXh7%)I_l1`%fHb6>9wHiFQBCuYFE z7dpMA%zYH@5p~+?7DmCBJxBYS&cW=sBQTm0bwbQ{f96E~nx9%eW?dUddK_u8$nP_< zyUES^vS{)LvL%%*9${Xt@k)CbU;!{W!Eb$fhS*333no{8G(M%*_VmlTpHI(kyc7+m zG4&5V;{3MQT~9F}BuiFAKbkxbgMDw4bV3~gbiE6M^6T5Ku23ZaZ+ZGVl)S)e0URHuT?ybqQq%bWjj=>T;Q7C^#e zMe=c;*19r=ugADL*hg}@%`P0Rr9&r1p_Lxm7(rd59Su%MH4Mbjm(0n z5dul)V*R%N{${1gy!z`ZAx|7Zi?ln_)=KD-&CM^|3=MQV8aYW#)uH>nqbd?KQGNWA za$66djD{i`+@$sf7%oW8v6k0X1TvMtRc+Kix7>Pa8-FN@;P2xjNF8`4O)2hA1cVx; zY1r{xxBnbAz3geRjc_7$-B988$o|xoEvRx5{X08*W6rY8lmUr5KRXFpUM);ca=s$vOLi1~hcMKu8JX<4^xFx|^#?yM?CszrYNgOh1~QzdP)x@nWqr&ZNiSAD&2 zRX?J2+`Bu6cYOr!A6povVpk72L{GLWnSuNMaJjzPm@FWIF6EOmlhcc1XzU;!^t1!Z zr&Ch@F`?R^1`@r;+G-zeA#BGwcQ7jBsS>b_U;Khw?f2UxS!BmnSjQA+&#fzfVMloP z(Hys~s;X-LV7io(QplC|P2^mjIt*vN;psc7HZArreA?(>2y)^2 zxk;N0fLa`)r+k;2%3`g<9`D0-R4;gV2)iQlZRmM%QCJS{g};xK0$^W38Ht4}-{Pr!JD3 zndjQ+Q}s3oKO+Rn=7|%>JM?FZagn7qJ5P*|(UKeXTm0A>r z%5<00G&)fjuoh`bD{B|b!bCrpll(+Uw%=l~WZ7RC+;I+E0#_gFx7~Ctje<@V)an z$KPR!nrR@6F-YsuW&b@!$kWB#TOHc;pxOKVT9`2R&2;W@EQtJ*RE4k4?leliFCBa2JToYOW2s9IvtBuN~Zkz{RL}H$Q2VY_Eufl*Ozt03anI*31$*((!s+r#= z5NzJ}%|_I1!N&_K54evOFTr*7jm;k4+D!|MI4lR?RHUr|g12;Sc8m;) zDZ9SBRNLSE{r#x{@$^MU8P-2A@Urv%%j4FIHXD(qBgTa*rBPYRx1=3M>JursQIFcs zggwCos^SN22!px*KB8`aBM7A=Z)cBr+1~#*gL{$zwa1Js8$fxTL215Ya-v>6 zc6!u|olQ04=m0xv&L#A{ldQLRyu)^7M9xDGuaM@NSO*8bl77)fsqLJ^E)|b@zzQe2 zp7EudynHv9c7?;X&^i5ukCV1$ajNA>HhTei_|vo-yGRL$wdfUHd$_lAi&z>eoU1CX z2>zgq)6%q@#E2t8YftdIyFwg&9P}!oFU;`Ux1Tph%ZmqLLAvz@K@iq$-~AdByS7Y< zQ^9iP%xeGZ16r|T>@RX0nGxosS^4>Z)%j0+{_qCr%26OeJOIA79z)X)gz=Z(!B@Zw z4T*)_bdA{QFA=`4q_(_g2uB>>mVe5p$`=LXnB8>9{?O2nOUQ*_@Mc7m1%97}*5}-? z!0asF$WdwRyWi_`_F~icwjvl+_-p51i9}h$1DjpR$jWIwyyVLs)@<#^U$9{Ku8y|oYcLLid7r#IskB{_445a9x%Q)#D2Io6gAUR5Uaz&I zw_Q7kH?@n#B%;s}+G6tki}m%62{1!b^A4N+C$gq+I;ctzq`=d1j5zcsq}7l&PhZpY z4P~-1&Wxu^-FT{3J$l-E!UrlfhQP8zHkoQTR7YO6*&%X1J1{rM(edaA$oaJ5Sy&3B z={M>zKjx1g6aZNG*I=SlGCmzkzTD zpl@=?s!?T=(rE=Z}vV|gG)zI>%EI^66Z zg=Ms*iijaoYK837C1UvzepOdZl3VY?h?fT7_cjYGL0aQk^RI3bD;{Q8&_A;g7!aOHtk2+ zgs>iPx>-^SrH`@7yuoAb2=|Q_w-Gca-QR=}5qU<|h-C2!mBchr{7%mA4r1#A&H;&0xU#VpR3Y}R3%e9bKy2&0^M(@p&avZIb}$>m_oGTJWUaq+&?^``hk zl{1AECq#x-m%>hXOL*yU*hEcir1NmxbFC5f10#c{C$7D z@(azBYtCm`zWcVnkp0Pej!>2Am1I~Gncs>B4;6CRN|OS2)xe^R<#r@F8zTG%&x%_1 zbvlRZ1N`{0k zb8zrbA)b3P(JUZ!iurFK0TTT|B31k1`s)}sASKMU3hTXUkBwGOSn36C?dRG2z@SA^pm2XoV{w&q5r-cAh65g|qd` z-40W4nRcGcx@)!>=`n3>BioEvFnjZ^P5s~MW)hjfDym=&@P_sG*3OHn%!WT@^9`l5 zKRBMJO=nkbIZq20*y)b-|1hgjVO@dt7+rsY#6NZA(j4}cFAF;gAWcr}iANA|mka`0Qnp=%s^w_2GZS~dLnqy39n(ZyY({SQ5)Yi)W9vpCHm40QgjKEx&P zN3E&>?LfApg)G&Gfa=-Yuw@l4C{(xK;pP{IaYvgQFuk@qXvGup!%hNg?eMd8M!P!y z!lFa&eSMEhAwH^vN zqOhtpgfre6KpqA@ zn!Vfi3MpH>x*ZYq|F875E6rqZSsbk4XlG*T*4gv+`QP3n7tZZAtVv{)O>G+JV4#zL zP665}aQWYN2o6y)*I%1b4~1crM6BSA!2 zL+*ZWR<-vr%}gj^)BstfX4NS7MCf48122FE4?M-Jk_H*GOft=i5cU7<3`NR&PFQXyxH)WoSMSq z5)xi6n3Uwv)2Uxik!R-u$bk&L$$oNrN($0u_%FHBk5f@`L~Lz!>9Of7j7;DF3A8LODm z8>hs9U;O`4>+XA47>V^1E{eZLnZANu9*pjED*#osGI!%<_0*?&M*c1H2qg>gH{_ zVi5_qfoiZ_;YoPqIJk4?4i*;|R|Yx}VdJJvIPvJC7#BX?##*h0WHK3ai}C*~ zn4qFLnM$WRT3TA3&{}uR%*>qq%x6Axs8}e-VzEfIYK0uz@#dXTeKLx8J0K5Pdx*>S zM*AF1vf!F0$1K6-4dF5ZWR}V31tnHh#D@1zCwKkxyphNrh}6o~y@T&)JT=Dv?mjBE zYvuFh7k*=vy8Snu!WxyPU8b}G{P3Fm!TO#h;5hd(5#ZahSeKHP1Fp>2(u zH!ad<7mcrfd@j4`i>1u!zpke8&pFAhUoT}d8{nMkGm!Fys*)t+6j4OL+3v;np#5(v1ba9rKEps4Pks8cS?!u7CR6YqS3xwyiP zMEl4`z-|}@Pft%@-o1Nwmy~i_E}NZQTwJ^)nt&ZZz}A*!fr4BXf;0?dG8xRz&trUS z47FNqd7yW6bl~Xy_v7%PLuhSnja(T@DYUe-z;T=agdxc;wbluxb*ihg>(c=CmP)0W zkALiAyUV4LC>D#NTB*4G-TYEhu`-)44RVC+rPz;6O~_*@%a`L>a6X%Jv-8F<(k{S_ zE@0KeuO|2YY}M@OUr$1s(Hz~7G5F5fbk{wA?B=*Br*?bx>eEl-wa-38wYNyx3zTq} zG8$;{0G^ki2NJmYD}5Neq02E_#(ZS7Ei@5Zwv*`I^k-Gn~Vy6Gr zF}J-1nY_t@rdZGv6ODny3s!2~ljXFr%TVKO2GW6IiC3%L3(%up(m3EDh$tz=hGQAk z{!?%usS5H3>DLDPsh(BCP@V<(4JNS34}Y;z7*q^U+7Ex=^7F`2!D)?L@k1HW)O z*|*~e38N#Hgda5`-Vw-yMlZ~)n6+9cO?HHEicFJtm=Ch%v!UBm-lEurU03ma}T61Q2jkSa_~1_g;X&U6A;8HPJd=!0+$LsEi>*cVzfs^8hE79PHc}NDG~Q*I}12h^VHoneBXY5R8@CXbyauu(O^0E{C>wemd-idU0v&2d#}Cs+KjBDOvb#T1K5k4 zX|jYP9iCH@)kVNWc3xiOW| zk&&Ghr}EcE2YuiC<~I-Ce%o!=E-fyqT5W}0w*eh_yk$f6EYQ;%N%KHX`5#YT0q^Xpn!z9F_nsr4pWml>y_a6Gnh&Z3$cq|jvfXp3A+_wZTM4kE zOLZ{L*W+Kt%yRc#ckOuXYhSycnK$?K^$pL>&)=?;itFnVR=Bvi1?J(0AI93qktRNX zY}CxOmSVTzB1dn3$LdYv$3tr;Wk3ZQI~B+~)ppF@`v7 zcxN%j0WdH=KK>D7%o#H?GkZVs;lI6dIrd#|G{^)&zQWEehgRecq~w7-Ne1m6WgSEH z)cJuxKI0qfS`!RFY1%^pIxzNF|C>v=4vVj?3`C>%MQMzdNZ=!ap_#0a>Y2wkL43( zx5zaWmStgjYHI)Pi!Yv3N?kBGF!03k^726row$Vn(Z&$7f-wd=ckIBu_uh-ib?Z`M zVzt@_KM3%`3ol?H66RF=k6X8G#r4-;k8^jOn{IOj@bJSAW8QlnQp0P$z5?rZ4?AZC&b)wT1vCTjF##O|Xayjh z%y63ImS}6GOpTl}`;nrreGsMr^)=5qG*H+l?fO18Z`lI3(MXuTgtP0FN(HmCvp912Fnr$+fj%@egv&3x9M@iZ zEk;I1k_|B7$#K?M2m+lTFc33yR7$I?S+i!hWmzB4T3es{+~;0=`st@OEH5po+DeT) zFI=wBF20qmmluGXI^7slqTei0Op>H)jLE%A$Ql;v-_o$wUsM4pO7~25u8~pxKeW{J z(8_7Wn|bCj_ntr8Ow-@mDc&@RuL(Nz!3cdKA!iew{KIkF{E9JbJmh2W_80M$KU>0O zUs}f6+Zx#Lgu>Me0$vc%0>JYC%?W54pgDw=&KYoLo@4^xZHpC=DPtrGn*wTpa0J9( zZCC?~EBfzu_sOs#9!GnRI2CGWodf-5!?FSJsbw28Z9p%_>eMZaFFbuRJ@7ey_SD1| z9`qD}9n4Jo_wS#$`s%CqS1Oft)oOLa!otEW9EI-$h%pANR8wqwN^eAR$XQ7xGJ9h5GtFFHuJ9g{{J=qh^83zs=z?zAPsAe9j>>MIhi4g}t|LExG z-xy;y9y@mIoB#ZYPh7vW6gI`U4HqUSFXvuLX{n2t7UObnk`C*>(mTsDUT}Hsu1TQt zAZTE7-CDo9k_p*SE@YP+fD0hT`0mQR?swk1#T@$I8{i$b#P?cDR-+$d0wxH{KT*N$ zA6UT~|8*IY|6a%FBfv#90j~&nNkB^iT0-*^3(?m>US5;?HD!RFvwjIFIU+OfNmLWy zUf7p>vF?md2KpOY9T^~^!K6f81V<`C&H_RpSh!)uuFm8!dci?&b!7E+u&4Wb^6aV# zz+MLMlsW!ILYGep4PFi&IB?I^*IcuSna}F$?|))>W#s^e!klTbdUzXc-MSU^dcD~) z3nD0HRQvj{xVVTThYv%IjEvxlE3d@WS6zjH!NIm=@jG_xfakiQ%C0pjr9eaqk$da< z_3M8Mzz=GzkNwVj-gDK;$_lM4FL9&ZAl)G^&u&d=UyYk>Kg40O>o2Yr1HEjRZo>I8ukqaSqyP3LdF(^46Zas)t7ie8Y8I~(oVyLq z_X)NSkSn0NfNKEP0jfp$cKq2OklU%oocp!p06lMsBHD{osgf6A381eA*7mQ|oXQ^7 z`_|Yp1dyFdZF9V2x|BvmK=6&cc*BZ4I$ff~mSuDW6prAuHPGZdsnqS(k$*F~$H$-9v}x0XWm%Vcp7&X8j7LOqX-}bBbaP@&Kv}s0Q!~z!A7n2gn7;57|3* zvs+!KPE(vTC^saVda0?vNet1`2KEmxRQLSwi~ZFZU+xJ6I_f)28s_xW3amwDbgzJ&Q>|80pSt0O^A{EukBG?Hv9YmV z^?lzK5eo!j=H|8-F$Nd!-kmV+j?7qt29Q*IuwF|MN zuZ;<7<^YL^(m~+#_xJw~V@!2+cJ}^n+X=5_<;{#*jJ)$ZIsH zR;!Wgx_sm3K6gZi;`dKZu3fuZMC`DuH+EKm&e|m4iB<&k=Rx(S zUo+Le^_E4xBQQ7?WhylarI+t=qOj zDHYvkE$ZKO?zthGht2fOmBwO30jOG*^_yC2=f3;yTR3v)&>L&DIyD+D1wnwKkiBg+ zEm20mO>}x-^&l_*c4yD|PT%ek{6%gBds60p1R!GEMR<3-?_9OA_jTef5WL7^vK6Mc zkwP{NE^7EpUdp)DHJOWA0Biyr0&Id<;b!8_Axu+J$n114<8zY-{-h2XxpW;fpx>@l zuKmTS{ZQ*w=*z^9(0eoulV;!7-MXRq*z@0ls6d-dfQ_uIo}gTq??U-g)POF{WZ!)^D&<10vF5b*+h`T>YZm zyPLvZ^F9r`Pg|QJ5f%}8)j^C$NXZ#01eKqK}lM1WDZjqNN!!<=tti@^m}MQ;i%{ zZ1Iq^XtWq&DJ8ZwCbqpvY-g)f?5(IeThQlhkv?a$RO}66sRuKA9`cHT@B3)f>r}7R=rf=C z)Pnv1~u&LZzD`o#a z^pG>!e?nQ6GkQ8h2(4}tNj$sE7;P$lq}KN&|Lk;w#$%@viYg*%6tG4`)L=kk7c862 z-}6sF|JY`RXuG~}SPtFwHh1}jV^Tl7JHGLdw}xWOaaKEpcViQPQpjwe0f16*L>wfphSM$dY>sW91Vk+U!TD{JdYE^E!=_dZMANw)evaGic4i0{<-e??QKr@jEv<629^;K70g}d&) z8*yuG+(VEOlDBQ!hN-D35D@~c$x?~|un}X8j*f05qBjRYpuX^hFD!bl%k_Gle9xu0 zF(zr+h$59eGu~I6m$wzu3f`2Gpzd-Wbowo!D5(+njmK1F@KuR#r|!QQDh#C4FS89A ziE>XK$dhoQyyqSJ2`U9xlLC7})4Hpcs%O96W46``(XYfel2nHwV`o6bY+kI*eTg6W z)J0b9;O4-0$TxGMO`X!hTwa?=^5qv@io#1IAjoS}{dR(SWqS{j*|r@JV3=9Q<`fS<{BSr1n+S~=iC-!Jj!jJ=&>9r&91mkqQGp*=0|NtZGsal+ z^Yi=m-hclq>-9RhZi50pXzqM&2ROQh>?o+4r$Z;|-VH}MSsxKP0^s>?zqwU&moiiU z02Q#sZMSkx>`mF`uFU00ZUxG2_GJP{G1yu!T-hMbmA`%8;I1DSu`FkjlpH!HwH~sA0BS9Ix)9aj5FiKqeq)* zXOq!6(HR$LO%%rMY>cr+Mn^Uf(N7p-__Lq=Y~Z>s8jS`8eyf}Y%C}?gF2HqA*$Yq9 zPSz}aV=)ns*!WB4cb?}uesEytIzKEzsUYoNdX&BhpOVPWBc zM;?CY)s1?cJkO&b(5dE_yafw{bUCQ4F{VIer}DE3$3g4uA$#ZPR!#6{Hi{0OD@|b0 zn`yT8qQ^YGq)z>RYdwtFXvv zcGxvnRi*ND6y)k%Dvt!LB?M!ZIoc_B%bphk;PbV?pKD0ZZ}xF9d8hk~it_awGm2EH zt)Q-s%%E4u>XKsh8$b6sZH%#q=uQ3o{lj96igWcim6oQ`k1%u>5nO)x<#1g$97Anw zK(#8BO21O-%_3rd`O9B++=h#W>r&wR88(lB+iGh~Ejpv@`r-~Dds!457tX2ow_Xe& z0SA8l&ipxy+_5VH0DExaT%vx2ji3eeoPTLLKHC?66bKmz!{V?^vJgd>Rgi(L`TamW>;gj~OibtAHh!5afC*xph9E zi$eJ9zh{}foz=rvpJerY`}VzRadGi+5$PKp8Tlb&j4gnQ*H%;HSo8SfkB6^a5c~RVfSSFiI2F zT3gKg)5aL*_S6d`qpJEvOl zYX`6=>VMofM&SOSqt85b2r)a&Xvf!|Z3{%I=;lUAkQ0a;W$$2cbJI!7MYThIUamDb z?CT-H5Ax#wU_vDi8&sgUyT&|oNazgd`Vi$oPRAY6Q_rlPc0)=6x|r2P1X^o&u8W52 z@>jp|mG9A7?-!9x>(;G%m55jpiE>Cd#y<7bQ(zOd+5w2$UBg8TBKG?A>t6xjOs(|` zH{Ep8rHw{|JkN!W68yGh3Q0Ccmd*18xZF6mRCYzHenUW=MYW%w^Agv-O!=);_*JIxO6|s!R4^QqP~gdRT?9eE_uO;OfHB6hEbFIz z&r=9lJ*uVT<1D|*9xzJiiD_w~qqdGM-I5Z02)y5dU_10VOaT^WxJeRcAQF~qf9<*GO zy^`g$-m|6Ej7oLBpgE2~8{KnY+`mce9;yX^dg1A8L%Ljao;7$!n z;3Z63BsHm%XF8w0(PKASf%}jvtl0nnAOJ~3K~&{qr# zBp?zWFm<>y2fhw55}w$}rIOtqCt7YV0x?0uKJx9KgQN`@#Jd&0 z3JFAJ_i5hXp8%lKDyd~P!dz)qZ~M{ATzj0W8`D(aJ=c}5e)X#pB4R70-e`=8MY)K# zg@PbJw1WXj5l;C*z|8zZ#u)3qd+)V^z?ZmgZd%(VQrjil^7HI{-bHCY!I!*4h@QFC zDX+5JNsI$vmSI*7Ew;R&rPRU0ePb5XaFANoUGx*u!z&9N0s*}UJpHe)feBVxS#4`b zu81os204sCjMQeH{ubP0eK3o3yv&`Wr$GVo^Ofa~5}{JwJ`tGrf-kIuOWAuC;<$?_ zc`}m1p1!-F*FAd}#KSXMYxrKs>UZ98$CX;^Cyg;{)~{cGr5IyDM8$ilMI;pCM5NgX zCn9RyuHQh8Jo#@NMgY>#VdNADgf8Y^FQ~p1Vi* z@o-Dy$a?L&k0*f8IeaGqu^NFIRK(q1T-!r;t6m8JJx^%7(6@5%u8+fL12GbVy`#t! z%|(ha68P@Zc`Nf_;)8XZ2gJv6u!UF4+0hu1`$!HyM;iHA4?!? zj8V32zuFkX^YiofYpoqG4%>|($qEOAv{#Cg#KamkxK`0Xg< zE$L}MRh;>Q9|w;j)gjm6sGv4L7A3F|A^S&39sK;SxKmGlE%X)Y4EFHvMPRfT?HzOP z{fqVRjtxmLMTo8EC9j~i`uAoM&}%Hy7v3`63uR%zLh@ulON~{e)?Bi@cCzlcF7bJL z_gts$I<#{2Kx=prtAFR)-@Z2r*}u=WtUeK8h%f;p#)L^}D{KdQp2y7mS`o4DxZ{o? z-}8*;d65X0r`S@i$$g#(xB9yI@hPF5uY7TyUiiZXfLQ^(aQlxm_m+2kBH`iatl9b_ zP(!BJzG;<5UUOHZOYjeq`R+%)zxMpze-abS6+TCamq}<7)Mt;>@A{J}Y6k}p93dDJ zk6E`l;ji-D5rnKNTUcfl(O7rq-By0yFy^7h46WCzcwjC8_7e+i>RP6wJf=^jxcLV_ za;i;h4d3_4b6tAs$tT|s1i=F$Qkj^TxYmeR5~=SX0!x^*Q=Ff#Teof-5$!U@EZqFH zuU+eT9{Ii>ZDka+75GzOdp8lTmmBza8WhWP$Yt|%PP2N45MB@hU6%HbiwTyKC97`X zJ6DDo)|imu;&`zjOfR9%Q@BEIh2XtN^wg)`;@Fl?_>Dz7od@Z#d)qgrbx z6yf}4qkMaCP6g~qnKiSW&$biJ<|y%aS?7wJdU|2)+5~`h*>5asHE%Z(YGILNE|mL<2ov1>O=`*m4K4d0(!9QV{alKHULtfqp|>MZ zPj_Kqh@Q&+s4Y)7LC{p+AA0DaS8J{J0~p=3Y11Xf7!@@&5=S0lB!F78X3bgv7m0}X zZ(sc4MV^;X+tWr@2|}dY$qDN1OgnkO)2j1yPXCH*_$~nS0?gdQqcDNny(P9evrAMR z8H7D9q*OUVlvP}%c#TxsZzw3Vwd}+wy_v2Ba-EyYWcN(w#?JzRtOMlNMjFpE!72*{ z0r1F*+`Ir%y>uKm4;SP&%bc_jz4X0ExgmZ2^UL2lBgQ~$O}_8L^E?=1uy5bKZN?aO z9OqSm)=G?Fh(v`R5fWome}DfT5neumIw9ZssEEM2=@F+sTrUdWb5VFj~V8^K^Kwi+}&dH(n(o%MdwxVq#+47^6}^cU_m5`ARXye&(5H z?vKLuu&fq0fOTTVj?ePANBmDL#*Zg_&m@sB3y{SpMveB5Cdyb{n`cyYsEWQVv!n*4 zG^>|n`J&e+1$>{~7B2uy%;uB?Ina6VwZJihAR7sC^@#IJoav*=KGW9ACNTg$zSwu} z>?+juWY}Jmqf@V<*-5w=S68T%=E4_N{Nd+V?yPd_NpXHQigf^UwG0mBf>)ws!z@+3=mZh3^5utZ?Jl zLOqXEx#p|zF=9(Z84F-BRIby*MuY(zq!i-_v)@4rk$$n(6tTI)(wKW{R6a_@4_ zxdu+vaY?KE|Ge;B0cj71yn8zg-}NHsz*l|>V*HM#kttD*$?6~94rkpAsfwf~+Dfvw zl(-&J@c-$7)i_*NDV049a;z@PC15jP-Zff#P7&-9c240v49J1HeC=7G&A}?}1XaM~ zooX)h(!swxlgIhF2u#H20RYeU;nwTqxh@?zaNq$Ev55J~dcDrZ7-kbrw-7U5WQ+7v|?$lSoOqlThrQOef)mpN#OGxII?~3f-o{@O=en99r)$9Jv)@qRHv* zHFdBWR2RHuAJhceI_0_ua^*bDDfeKI%s-oE_|7RN_bFLYL*m*{nOt2!5DSpANsg^| z!rD37_LR0iKO+PKm`|)!UpW=-hb}%@RuQ)2B@5e&NRqk(JT`iaS3X7E&2g&bi=bYs zq0wmI8@Jy2y@@=1*l4X-jEVE}3q-^^eB{Uzp6`?I`{YNGeR3MJt4ms&DS7t`d+JZm z4&i%Sj?UeDhzUj$%rf*-U%oOe%1I|yClHZ6yy*%!lN!ouce5%0dzppJomym);e1Mb zBQ6Np^FsG>Ehd`@=6A+cp0J1p8C@M-nnlWnmi>1HJXS9q%%(veZvzbJZ===rR#d zg3XJ<^OrtsUyTOW65*0-)Z&jo1oe6y+8BBKvB%C85mHL+GFmf>2yflGbtiy+t@U%h z@7uBB9?hS_Bz8E(o<7p?%;^!_Csc?#o(1Ze-uEo4cWjCE33`cxM?b&4c`|mB%c3a4 z9^ZO3Dx0QYRoioQNefhYf?V-Ck=&-zDp{r3T*L^Bfj~xI~gcr!*RwK93py}!9 zIT2CJe6H(x%uyH3u1JV`(io$fj2nCxTwNvE??NfP&mqXg$}DcE@E%Gmh^EejB?gd>&R2$x3CKwAWo}M{?qw_fFIB8r zDj^Z1%4tcEo1C9%xGr4R#estd_lt-{M7taHI&lyLq?9^OMCjp%AHLdXtv%0!8w9P~ zEy3tgQud|O7T|?pH1dp(^F+DAi8JF8^b-5u{rp4^tWF>%YiRA|=-)mU^}t8AFXcoz z$;sbC>AD8-X9oQG;&)@|pn1E9h=_qPM)Up~-w+rVzPKGgUL}~Z~Z3`sS(i` zlarG}?D;+u(HSD5rlzKz^?aY)MuUQ6*bx8X#L3d_Nu9PV-*tUy)`TL#5mbWJgISo_ z2L~kZj+SP15J0Np?0##F?8#`0R_~&mB>!FjF@_{qB=^COoo$RiXN(pxMvO5cqQ!_d zGxN`XJ6Jy0ALtc$qT5-5(19ECd~itC5agHarT33pSi|Xky2U3$#O7aHP`4ZtI9ht| zolI_&$#d#|Db)2+K*+;WCTvgU5N454BjrNzc z_GXVAd-5iF@Ncj4gL!hz9BIk*l0@f1Rr8t(D73BLLGb_68vfa^3oiB;lrC%z1cK#H zyUvwUrR|)hP=xE%4DwU}pH_Q5B5^T76Ne$IX2W?%HILqt!_xD^#F{h+58$<;ID8&hz3;_8DH|0x+5QNp@fHWSSIM&`fn zkn?&53f&6Ym3ER3&sD5vgw6<7!R8#Zc^47x)Q9cmqTC{2r|Rd)9KFNu^20fGdGygo zzxUEhFBMCvt-N#R&W!*nTI-{M?>nyNHN*DQW1E~jJIA*O-ou|6ZRI>A6nG-Aveu~GKmTo5n+z@v-+%8 zr`Wt}ZVoiGdEx6GSB@PAn=5qhX@5E7<%K(H1+6Mrm*3$w8dzFfoQVXv&0MWkH$^+0 zj%X3~!mz!~fYhLSo_>1ayUg25T|#@#^uI^9QkPskj6L8o9uQwWb-uOCCa!3Om~WKX!pO24ply>wp4(k%C~3ykL=B?ZYz#m3txp6@Ui? zJP7cBKn)nEegXGs@ZdB^Nz^STOu#uL-tSM;Y8Fu?Q2_74<|GDWJ~Y=id|1F*uxd7s zMYy~gdN($INr0a@wFSu6znYP>iTplI*TvNJMuTQ%W)4Yc`+O!Vr8bEO`M!Txw5EpV z<-Yd#G)d;^=@Um@HiX{(>bjNM5^DydT%Eco@M1iIo+t02f7u1mI#ZYI@T8bPY*#LM zTV1N#r(ij;y40qCh_s7vNj7henuY@+2n;ANAm4yI1M>26asomC4-3?&K#d5j5d&*D z{2bo3jH}-L9aP2z!~zP(Y_}222S(?6dyra*)jv+){y13(~Z-hh(fqy;Gk4atYsaOKn+J=NwkIP^PXfIw0sd z3Lg1~SHJ|zO^@}?svt=e!mIwVojCvJKLLA#fkJa`U6PcT3D-@A&6DO4J^&AyhM0Jg zRaGK@uM^l42F@D6z~*IK__lw>HSc6>-SciJTX6r!8TBKh5KVxALn4>3%>Ke!T&)0$ zix%c)E|Lk$X*k$0|N5PPCl|Y6vS58p9-DK?+REw9@aZ0j&h_)`M<4>vbsb~OiiiwY z044x%!*v&eAXpGV+Ec&EAZ)GLlB7Qd60W#edCeO z)~Xl2NgyO)GibDTI=33oQsJwU=e(&ge32{jjjG5@%jGp$FQ-;M9@{?|3FpdnShhky z8IVUFz#s+iD4>P}Y5-6J1_PVsaNdvHiN29ngZRynmB}>j{TQKsWE{pLNB~6!yGYFY z*DS2qY;7)Kz!jcPLSg{%jm*ApVc?qQCA*DIP(0gKj?G_=EMAzqCn9MPDZQ;L2m)xW z7nD*%Ohju$MAd4w`OpNi_#+PAlZ>7{b^gy_^>kgmP~{O2j3Ei8NM;`B3%uH0>1KrX z5MEL$JQJ9m%B4Tsz`!|kum?p{=s%k*rN!ZS^6NNUkEhT)ggxxhG^#+cK#dFR%>rkG zL1ofl?1B|s{jMi*+1rLNF#dY5s)Y35KZASz#-RS z5lQ|CYQ`DPL}B}1%vP=aLL1#&V~K>~P*8(UHppbzylhGBq#g#cx^Iz!NI7qirPHrA z;g;k%Al$XK762_REzO5Z8j62UmeI?AyDawQB|iu{*dtL^fe(X4g4Nye*Z?qfLNH7I zgMYsT5;W3$TzCy)g9?zcM=MvnV?iq0=V04N=IeRln~|2GV*tP-z;6K@u*L-T7J;)- z(7$C7XaAQ!z%{?Mggrl9MP=}8Fx!aifl*&LfV=-{0`+6-p;tl+X#}!`jQr2Og%5A9 zT9>KxX5Y>?U(^A2*fY1?;`88((o;`MN2R?Ro1b=l?7fuQ3L4frUTb4uj9Cysn23e| zsKtearFaip#|M?frh-h3=jAip(j%+q)zq5`yei(>mk{P8m?kVgwQ1$hzQ2OeWrVq~ z)hx=_*>biym%n2U&NhLqL{ykbBu7k}!W*@i+-Hc-5LgL-S2hU-ch<1`?Kj}+-ym$h z=smFehrx=&6eYT|mXF z%O-s9h1JpSC)WhImCj6yHn18^5*#Cb@>9PN)Mp<~sPGA+xqKc9drK8r{cCpco_E|O z&Nf6~7vYx2Tbz??0s@Bk3gRmQ?2!oIn+--Un8B65eG9JoRmQ}&{|w6>0x3f1{7q5O zOJnBQPh#&!uY|Wi2$m?^mF{5;G_F^=@e7mkJytldlCyCo4YT-OHGpV={;S!lJYr~F zW0e~llexJvdm;jNaYtPhM8DsjFc1 z>;zj;xh#pqfn>Pcokg|b{gkqIh+7?sgYSE)#O^(OzcD6=567yA@bc1;k&S{4+5|+`Dk$T~+(`<9CYxBZ8=ZAjafPhyUvS$UjGQ+c{ zbxExZo_Z@`H!p)RhKw=3L~&@9h~&9vpZ#&8^~=68al%=>sF5-o!j}=|A{8FNQJCrb z`@DripGh~;q={Uz>>lOoAm-}m=4<)VcRe80og&UU5obh9Wu4L0Ger+wRHuvI@#*nv z-;Ke|d;Yt&`-gp2svY3X-RK6(QxDdo$0fl7O>ZPVbtq8Wg}fq>(uJ{AL%BR-qT_-#>lZ_$95rF z;7}0}G9q@Fu%OIdftNzFIJK-^)I?)-O!J#6ylxPJX{Go5%};2*{$yfeC})2|+M7Kr zVllvqt0NosSeL)+p!99grteH6qnFO;tA6*JV^{wMjZAF&39alQmle6hBuKQfv`hvB zCh&0di4Wnvzr7Id)ByZ>f?fioE`To+Wbhix)Zafjs~wi%7RZS@xv_;)vIGMXEifOQ zt;~H}s}1w98_g=b;H&}>u8hO$%grCIi0yk6<794bZjXq-0uT_9qL?vG5M*9{tS)U| z76yV8$r}{C8Kj~=t*DvCq?>0FUrW?TgJ6z`s^s4Hk-q*bf29Ux^-FUz3{m?V9D`j# zumd8j$Qhj6rmuR})0V(uyKkc*x;`MLfCwIlTx4d*qUKYG2(&+o2mZ;%(t%%wzs%4p zfYb@_1pqcO$@R9l?_IlSY{rm62IL71gA!0n99E)*f;?yxF@TZNUaS#dy29*7#rLD3Y8YmLRDcjAse zsbFFM5WHDNFb|kTg4BheaVCrT9j^bu`_?X!!x&KE9@qqvXF;BdK*H%*A(;>4nO~n9 zdc)H~EGYE2EzRcL7BieSY%V7RsU7s)64ePJ-mzoHjYK3CfCdrukB*LNR!VSs_pF{G zR*yPV+gf9j+EZjo(G1}w0(q2~lHXP$Z~n@dqKyXpqcAfM3@#tN;?D=ypZ)9Mzi`v2 zBJ01NWOxIRArb>n(u>^#!A+S`L}d~Yfese&@TcpTd;BT{ON?MS6x`4Npv5A>PFCg* z2I~K~XH@^TB3K;u=tVC&KG@n}CQTcXa17oT%)V>7f8cQ;ZcfU9dS-LGAUE&ick|NU zz;fd0sYgE!R!S_(A|e74kqOGR$jj zYJ6xzz$4%Cn>PReAOJ~3K~y%jTLj|NGtWSbI0N#~i!uVa-nZ!e>HqdswS6kBH<8Cm z0KE*8pFXwmlXF-0_!@-)V&;Az9BP>bV1|ZK3{uj*RqaQxfUY;V)0C@STc< zIq4|OMFA(-X|N~r^~A3hfM5(nzX&uAt*hPp*MTwqj7aF|liZV$T%jZ_u!|NEAJL`; zV+7h?#A9Fm4DR~i|9unVdsY zjFiGoNsycGF1dgC?%LQp4;UzEyMX0yQA(NZjkO8!T~{s?gpi$ zyJKijKpI54LqNK_L0UpWy1T#qe8=(rguU0j*4@`hvU|H!@<*`k(#E8=e@>!~bFsrw zWROwG;Vj4f(%%1GMDgAfA~lJCq#(`KTHwE?fUC*%5XJzTOTdeOgTHuw>IXQGP(7nPe=@PTfTQ} zubjQi`krwQfmZ(GWD2Qj?BFU~nx;RsQ#1xuk|(O5s0va&h(?U?n#a>M zd`Yd?za;Gb#ULWRM@Xr!7oS>Rjz*OaGmK^jZHrxX{!f7K&h+hsglR6~JC<+a0j`*f zK{KwK!{~@!iA3n%8?{I*b-PE+KkrJ&LYnm|;&fAxXS-H1-o~t6=N9BZ1S1aIFWw2i ztLAu~-?A|s#*dAz?LBmsPe$KNv^AMYs=%i4js(JE-9H`vUMDY&X0Fu~7_N+~1xS&^ zrDRhCySc}0zg~X)+I&Tq8ejoh}<^#w`^GF&6w*z z#stgJb$?S#w&nRN{*#%GcJAky2W%GOI4;XL=}HXhwnzUz>dqi~c}3Om+mzpZ#Sy1j zpc*^T571Z;H#=AzV&M*m+c8+`9xk}^Or~M>35lqYD%taViR(0eDsp|wh9MAbqq_)S zPrf(htVAo4=G3jmr11{w&7ox%6KHaM86b|uDU?%wy*CAA2r#ugP5}ybrHt|Zk2_OB zZ;@V@X@#KN4yli?EkbEp2)EvbHFae41}XKQX=QwfgjyrVRgeh^3ytnn66LQ96@P6@`vk8qCRtzDjHq0*nO_kb=+El{50 zv6=(FwY+P8Dc=|Kfh+HZIo1*A}_KdF`TN% zxOJ;B`KEJ2Pry|;ULlbHhx71S_y(2lRw#M+p6a%59pP4p`ELrx7KGS-*GB8YFd@$S z42v{nJwUHb39^sbThkBev1Pq|PgE&y`>N?@Q0odq8(Af0Gzuy%r)R)p_R_7RIPC{% z!36aP4+(unc&|F+nZaD+Z7z(NgC{JUB9YriHq&7QyDB>pD6DU%BCL7zfj^7Kalq2n%1F5g&~F zP*x31W0-CJZR~v%)Lf01&c?`;3&pP$isaI{TUR!IGIw~nk^D4>(O5+wtrbs;8OZS| zVP|$kRVUq#JkXbaf|e!JV`K&$Z=m5W)3D_^3m!9fPqX~})*TKX1vLH#?3U$cKQCRl z{3|5YzTb#;y?gmaw-BAqc~P`N|H^$R(9ldGTI23__E)5JxloAM+gbv08O2H8NggSI zw-&GIJUt{O&bzRSD=Rk4*z}4MF01TOjFX5~g%GaMwoFJvX*{rTo~~=2o4CAeb_4O= z*+yB;>SzqL8i;VcV<(p_*aZ2WfbC8?I%bY9j3B?PN=LEc&M))gOE&{KMlP?e-uFs1 zWJ?$JH%f7~6EaCX>U0F#IvFgcxD+Dx7Qi_<8 zZyv=AL5cy!)a(T@mb%t>_*z<8*_Hzha*%9#u;1S>5(yK1V$o!b!;hU|dDU5U^)7lV zK8CwyGkitgUTevV@;mxa4bT_lSopPF)zdGiFQu-aOAdWHVo(*R_$kqcVoo~OeNxf) zJ2wm)>F4~2^OOvJNTZBfq!d-HkP;1rSj}O;;f}RDXY0giI?IsCc@m*$0n%Wiy1de{ z6Z6o9GpkB)>s$SyT78=qHM1M9#Z2*%RuaRA=O6^(lgEpW-qC}|kK21iITCg}byIqh z4$BQ#I5?HoR+~aN+7zhLKk%JZRKB+#yjzyGJLo@UqA1i5!20&3ON6#VIA_vfB{M1L z7k?GbYjn3zEf!7*ybYB3+lTbZNqgMgAPXw7b0zihNopKHhXn}>|4alWXTIsVx^b&+ zR@SUAWvXC`kpO`e$M(NvIbN*UOk4#GZYgbOM;Jb$3(HLC-c1ZD+Oo*PD$<;@Kl`z^ z4&VJumZ{h43nMZ5l?eX{if0>{9v!uEa5z>26O7c3Yo%;2#nGu0fVEtmo#aD*5PXNa zzyG>b{!O^xOy}5oNd4DLJa}2|o2kbzIWOU8?Ld>ya7iJ<0=^yIVq{^)=?vW)m+Fst z?EVwelNry>HpTzml<$6I_`Cs6a3|IVB|&INUf2FgySbEoIqD1Q<9e|1?2z(mV*6U$ zh{3KkbG#H5>u18eeD(069!!OcHb~~7NWXh9@S)A9dAZN5n z1AvXFr(*b5hJIuG#%>g-ahsjB<(D){x?LP~zBRjj{6|DiB5ia786;Z~=OtAoCi@&^ zqq0GYLB*E&kTV3lfbKjwW(s^cF7g70o1?!TZ9+ylXSaVr7C#fvgUV?>fJ;7z_idns zwnmVINct>a?uOlv)}I9;e2UcK9XgwrF%Mfw`FT5c5T0x#bM-Z6)y?aV1EniN_f0;H zLfWbLgP&hWNOjT%CRgj}IOL}TJL6lT0enzA{#2TNgPrhzJ4Fqpg55GB591Y0lbJqm z>z9ASm0K?!7Umf?^RfXLQWA<-T68{=b6y&8P9yRZZ{T^0O$@A2ehS023=0Sm%m(FL%P;$wz zcbYZi)Q0Y`hrjN+A?vnxml6NsnScmBWL#G>Dyh&^B^jvAhgHbvrZm|5FyX)ZG36PW z#{W7|D2}c_H3F_^?0eRys*}>!u-WNM;Uu&_=Vrq=p$Ws#F>KCsa;}v~pmLMJI$ z&mDO-%==N9R!6`uo|mzn3#oFE$09`FAQ+0m;{f#HZD*E?=ImRCr>=~3)Oj)ZNcWJf zmM%{2HWzDLs0ANw6+%C<^2-_tI2FbP%px|q{;8Z+n$Nhz8_vO*`tDj%prj_0gX~+V z#5q2L95@zZQxD+zz&_W9i)8ift3Q&aX@9)x9{smF^#eELqZ&*7W_jKC{g)#lvqga< z%PSu;T{C)(th9K`C&C7UQFFHwt7QdT9}{zX0T#Y=EP)GcM)_S8I;d|4y+x4y9C zeD~zuh9pgGY4~Aa&HL+fo) zb=gCg58mh2<0d}9cOQVysoSg>pnARB@I5K`-SDlj$r$kogE!(?2g9Q1WAl@J(pPce z-{R;-+E0#(*J_V z0pdif0}=Od3U1vSR=mYjNf#j=OJTD8$2?`mL>cv+t4O|#@KjF3QDT2mH+|oS=?kH++jnntY$kRZaH}s+cG7D7L4@yH9bdS182OOPOYUagKllU?y~J-Er*Yox#23AUygPB zVvTisz4$|gwVe{VEq?=pQiCET(z^s2_=62aZ)`u%@4qi*<6Vn>qBFL#)YNv<76eh% zVi@~j=a#Y7y*eytM=1OhY`F{oi5>s^IwJ$?VS{D_9sUR@K%(##L00l$dB|S# zG5&Ic@YkVB{6xO@>Z#!foNC;k>cRGb(gsE&;^?H(PjyzAl^wkYOA;a*?|#?*_QbW9 z598kvPW8sBWYJ0rN8Wsmq|FzeNe8a@SC0SNAQVXeFMh)G5AtXwJ<*)F1XA;2IBSbn${kiZ zt{%QEXKAO=-&%#Y@ImYtmGr*9PTNU)dP;Uwx~vKss&xxafnJ-3!kfp)LKH+gC4Xrr z_S#|m+ZV!^fUpb;cF_olISeHv4tBtxi5SP3lJdaG*PgSVwGew(dcmxw;9kS zzfa#Dtaq3GKI=g2B&;3Ri2}xi2n@ck;H6dDY{`ac;gxNUFRsLt&M9rJCZD`Zn9N+6 zNci!}(dwnS)|;e^)S)=Jr}BdQiR|xP;t5a0PVq!Rdso>~=C8Sqt}13CnQE=tVA{}( zAqxR;;(wrJDtrboE0T$902*E1TV?g8%7@HP>5?y}|HibD)ylT(PIg6b9tys6DaVzr zaMG;jjl63)-U|3c69A`s#fw6z(H*VO8%oM2(aSug?Uo2iRbwH#Hmzu0?DF~!vq=dm z)*-waZ2sro!nS*5FBuA()9HX)-Fx~ggx{WuJ{(D$YKtKwl&}@q7lmwUJWu^ z6kxG1Ah#lbYw={h;Bnpy4kf@aKr{t}T1`eVaCWQ^tR-fNBu%Ezs4IPI-z|!$OtmrO>*FRtdPdUol&0L4%7l7KVr- z2@?~fcHi|$!Isdzz!BcRkXO?`QuhPG*)taPPsVKF@(q@ifY7-&NQ$9=s-){e{}OJ>0Kp^QDzy9y zi2fbZ-rx)BBjh<7Hll&Jz#FQ-2>4SMZ;@);l@5~WN~GKu(5@SJMU26F93>kHYLH z4aJTiw4T7YxVS&zs8p%IX^T&|;IyAZxmqf}|CB3UsJryQ3-2oYetSoRe-PuhmXM zv{(;<=ZYq-Gd0?oUXBr(YfeLWF4&kmg0K#kZMafBl@kW$pc2%KkRRS2BrT6%isAp(ez*6AURvgrMcZ?%!{vCw!L zAi5-(|0aS5|L?eAi&5ObW*NAjo}<9-TFlYJ%xuH4%AonuvGc|ZKfnBIPL=82Rk)H& zwPm{9q5vlCA7Ci)JtW`CxoPJe8X0018P-T`r<j^sTYPHQ zfC`6`ZjZ%9@CLT+yuZ)gbU_YdFPtwS$Sy;9Z@EEnBCUw$D;{AG41`vIo-Ll0mF3mLE#SD)aCG;?)$s_} zhUJZT^*$gTm& zV+OI1>nTD**x=>z_eAVArk_`y@y&Q{Lj>*bA20b{4HKonz*_bu^hg6{XZrDWi(hE+ z7(pg1+hM?k@o02h^Y^&y#tkxF+b#@=BV3=j%?C4ZljK z>D&>%Yd$&}x9LlrDv5@Gx;1d!CC?`;F8-u2eAB`3aD$Et{GN!o2>f9l5gR)LQLhb1 zMt^FJ`_a+?U}If6>j-^!Jm1!x5kocMjzkG3(*};GO-?gSAUyq?As}%4FDC zh**ZXhtJTEyC+l;VUjG^t(;3IRd??D514jB@jTxcs)&f_%klWa0;*PpLE043fDza` zIgxFg%R-At~ZZ*)5xZRHxf_58qI&b@ z;XdqQN3uNsvDFMYq|wpQ$_f?K?D{7n*20zU=Lo6Wlh7q(-C2~3- z7u#Xs$LM1zIVRxYhq)J2QA)ZWazIORH2gDTVbDvl_&gawtgyyGP%)=*^CeqTS{mcF z3qx)a;>3mGyLVz4LI|Khvl>!1LX0s`;4AH_XPC9MH5J0fkVUPq3~w#0^c{_+b(RjHAKR%)&_%^KYEjHLYeEPJQGh{(U3-++G z`bS;A%=zTA*D`j=zgFKqfhb1c#cVRzL>u&S^#*b6+mv-FC6bHd;tOE z;qd2IjraC&GCzNP5K|DOwXo`h0PJ89Xgi~>P7JcZn_BwyX=d;1K)fSNkzG2RhlAiV z@ZmUqIE|b}LNBHh7tHJIKSRjO!yWpdEvwkEwy^|k^hhks9+%Q6C`nKxKBg3gR@zlo z2=kO7K95PaA8*L^V!Xeb&o3SjV)wb_NQW_VGNUewR=E^w7;jVSug`w#H^nl?J(}jk zJnY{sjoA#50%K_Ou7K!Yc)~3{8CnqidhxLDZ&%lgCQI1x+OV+5py=sZ3YYPjTam^v zOF$h$3E>|y>Krmks%Pg4a+JkYZ*ulqGf4N~yx$9u8;C z8sbZil0$*m+1R9`1ag7`4|{tp`&wh^j6TuVSyUY#JxRK0MI^&$3VMTV=U4ZcDKdTydcX-SCY^diPA<~!ygiIS+uNMuHQ|HfdvWsej6Tv(qrn>VrPizn zefp!CEmiTxFL$bDB7A-D9)C=KpUYMbKHh9+r_=?&h??S#LG_t0A3^{D@K;r@?D+}~ zn(;5LcfGh4Z`0n$z)^t^q;9dsSE3^$xj7W>m!@2p9BPt8z+=)p5?w=7(}58i1O=aL@Lt<8l2HthwfjAZphPl;i-h3xA{!q4Zx-N+KLNHh z@A&67T9Zxdk%~r-XIUl4{sdY8i*KlJ=-$^XtDQ$dc{W?$pIt+>Dj%MqLxlfJ`KKx? zEZ$#EXfeI_A|N7WP(OmKgud4mK?Ma}(1Xw*zN`!XP*G6a_a|hmP+%CGMywDmI( zK>+RU={oD|EdFbbxGp+q9Y9dTZMJg2YII8&Aa)0bhmZ3V@19r+xHHC90f-Ynza->$ zqsx3Hx;WJkq<7$Ic^1g4gtcr~w4<$9EtZns>VP+B3am&`?AF@-;PC0}9lw!U_DJvA zhUpVpaPF?z>>iS2kpn4=VC3npRw%;0<**}R?5x3%r7O1P588bV1Bgx{^9lj z@RIXkW@lCTN!I_S(G>_Xh2Dz&DNAZD$w;zDU2JiB_T`;Vz)nHux1bN6ZEHgf7PDsj zy5|Gz(-7g;+eUX-yi=VJd^JK{v%864T9}j$&)+K*hSMZAm@M)mrU#O~>Lf+%1@WL@ zxr>WavJt&9;oa}mdX<2POi4frzb{F1jv4)OHU`)5Z%^mvK`JNhAtw6Z=nYM%Gc_r2 z0uEq;THO05w}8a6F|dQdR?xbsjx-ot>@_=_oYP?cpi;5Ag>Y7QDc>W41%r}-u57pP zbBXYm@oE?rW}?EnjbpKJJ@&4S0&sXm3SiN&{X)jNRV7GmW7UZ+p!czHaRrT=eDBny$JOwr#uccaM8Fcg1|5T; z!t^BsfLzy4=WS>o=mR?;3bbHaPph~~R^(};bE0Ilb7nPR%N4+;GcOq6#y<5n*0S3t5z zD^YZv<;Cdv<=)I|z!Sz`obNz`E?bSStGLxKKO*o10PY|WR`fxqLR*?FtLIK-E`|RU zFt#p4KvNwJWU_oc478l68`<^sQnq!0-dGBUXiM|+$h76ivwYnXHq>S$%Yt(L%DKrv z+rg^P2Ust8=fK#QpD+=zMI|pjsOYDtA)`m@EenxFt5$w(H-fax*F}Kx2J)SMfO`>l zU8KN16dpt(1!qSyl(xKMrR5$gt>|%9ast=;_Cl4K<*fi%Ux@Dk`!R$l&iE=x;B)tK2G`Ez9|<`v5<&0)_)6DvVJ_QaB#k zP18rZNNgzCwwyDB=rwG^<5>wD!Yw!mn$-TP(_Bvtj9N1>j@k*Z)h@EXj!;(eQMPL7zt*WOpYdVwLa?|w1D}6DsrqW+AuHO;~E_+0cMuKYD(n& zrVcauXIZ{FQF%U+>afMoX97I-x3n}rb*9=~p4Z6mO@W3r*Q?H!G~FyAe_z=e9bcIu zZIh9?iU9KXfdESU4DW~g=jX>J7s;IwyQdS6v+l6%H`Klf_=!u)%Z)$l>n+JlpaE4# zpvU3$$#rAg)%LNNE-wu-q;lPyT!tix61oEVJzFB~QwN#*Jl`H&HWSvZ)G74QNKfCJ z>k54(e2>@BeP+kIk*_PDd#hsxDTnq;zorlZ5B}cm<^WEt-DCTFd*Y+Asqg~V`~)ih z6huR>&1P2h`s!NXRVN`$3SOsNFA~l5o@SZB{1|T_QH_Qk9mYo2`;2oeZn9X-0+{E< zLe2P)T?32FldPjLG$9)mc!_fMb(>w$@I{xGmLy`}x<})v>(W|Ljw`>;B&+o5Z9);2 zfVVCJpQa%7Z-49?rTXqrFowZVF^Y}t;Q~-P0%;E$tFdgi-*#h~rYq-kj33r~$wtKj z$;!$~espknmILLsyB$cVPF3r_p|M!g?<1bT)A5W{`TQOf05Dp#K@07%!mMpq&0f4G0^RlZ~ zENjx;HsYY?HLr8~2rUl)Zq0nGoYB8&H#JUiYFhX!bml?b>NpE5}_<}Ng zB=Vy$LBk#rzE){{je}?eymCddI4SDsC%Jql*=+Ug3$?e{-@fv4_TQKv@c0KdKl9?p zzlnr&E7KFmzfFUzi9|Hznf#E47(~MT;J;E5?$JJT&|WiaoT+s~wT(0WMTM(t()lA3 z@V_=A0du|zoC{oMONRPfG#PXozi!_t5d3Iih1jc!R!xi_Voi--f!{7KkTlLx(7LPl z1r~fsul)Y~3eK^rzJ3P&oMe)$WD@i<=Qqr`Pj7ECBSk%|f`3~fZAoZiR9x!oQ@B5@nn*M{Lg>0bY#T%C$>LK^S$5mUe$&VE;qZCS*I5Q z$q7(tLhC{M^sgD4%%~01x@#{gsN?v1$K!H}wdnUGE(Jw@-v2QDl|XDlcF)V^^xA$7LRD$l@to=ZlF3k; zeh{d`j#A^9Dfu`Rzsi_LPjiingH0)$bDA_-9$JNmG*X*eRyK0u)Nzq@5=y=B=MNS@ zNJ=FO!-K-aV^gC~+-v zV+nBBPS+msT-NWt+w(Gn4!ncpE6f;WW>VZ-@GV!3ROtVUd+ z9||oE=^!c4w7AW3v?%w4gfMQ3k%lVF3XGWk!Y^dv6R#^b%eA1iG04cu>eiQ5_}8HaRdS zZJ^<`in!hw|1R6zmRp`V_Htu}z$UKJ1ZA0-5JvkCRsbD$t892SbhbbCu z&3a@O{&F#S*w4OgS42Ty%WKGYAX zMZalhN+75EBALjO{=15`<2S9^in5GuS<#6269EiP@I}qGXyqCn6ED-mJojlBFEl_p zC!>0cuhrc~E1mate5yVeR31SXZp%r4Xj08tR!S*LN4)#fcDq&9f|4K8tq|~)B>Wh3 z8=WFbN=mr3%^3vc4e9om!h>7e9 zW#}m7fN>e9UFH$)ZxCUzdup6lY*x^y#M}py27hcQq_WLl4X%~x9sWs$2xjY$LMnr1 z)#N?@!pPNRZ|^{VN9NAKJP>1wj42>u!$df*Sm~!ySIAe*H4Td?M<}lnlxIkSzC$Yb zh0++0|BaAt&!y9w;g+Z(ZvSS4g=E2|K?AzAkCU5tv8@;!gOA|rH0$83wrIve{=>ZD zTG?$*AxU}w4llf4n%}1gp8B${$6{RUU_;^567>{)$|(I9T0p)&=yWA9#2jp;T^Z_ z#(N{4b5*{fQQ9$4&X0}%SA`HW$V0b630>yRNG%OjybG90oBD27!I40X7nDgxPVSwJ zw-WdXx0P$&%@n{eB@^XOiGCtQ$rSqB&9psCl8zunkS8j}clip^stDnrez+ZoAczc& zvNekfEq>1k3FS1{{P(%d$kAR+;e;1mLq_a1L{i#*DD;hSwobqDfO>xGC!jX0H!j1zSUbE>J zd|_KQVIPTf4dVypCA0e7+568t4boBy8XVUgKG{AOi8vl31j0tyT|J{UXS8LbsTfG- z%5`67^WpP6IONJeRPlr>51t)m(5!)|JuvGA#b8Tyn`S`%p$p+pG`4Bw7%|C3`ApeS zwnVAV?ATSaygY3}h1r1riB41`5~yAWtJSX#msAq+X+gwQ(k- z@BjK;_DHD<2mbpb`7c)_?<=w>KdTYnSCXg_{>BWKtr)M0{a!jn_ZOUVg9+hZc@o_J zVM9JFGjGZNU1!8=libKbgTg$WvDY`@0tjt8=9iZ4^YOZY*z_gBl#~+;<&fIUZ7^a% zHZ(Mx0#i`$_Q5`-{1dWTrfrg!sXng|p-bfB7~vqqcnN~H0(q-~$MI4M z(V>RF+`I2ILhh$=zXpO0&50E?9>6xnSXHo=MyB7hDe&x6}N^irqv(T zKT`a^XIqWo6$W(^A%WgvHT=Yn?K3<58}FMhcWcuaKtV<(90uXs%|1%x6zm)|A(!vd zv$o1zZgdDcpI?uDQ;2?abVLKs;|Z0aECU!KhNl0j@Dp#^Z`!Jc;`%hei6({u10Q60 zP~FN!1W9-JFwTAUS64900vx|rxxh3bhOyS>$ESZxj3!e)<8}wnk6e7bq%wciAMvJo ztG?Vz?r#*HB_d~|!j>tIqcp?Y_JMwPwjVl0w&f0w8y zOzf~V_tJn2THHAvC25gR&tVlQE=){JFUn{u#l9#qYKkSA%rr=UG^?a7 zGwSi%Y58S(gE+cSJkp?pa**w3kmX51{xixcXJxl}!1ssGksD}j_r8>83(2N38U!`mG>%JQ6@zTSOqw3%lPH8$auPL993RH;iVg|TMiPBpn+$En&YwF8C1(KZ{${@6AJ7G z1f7cE!iMO}4Vnn-mN^AWQ3K27D}yZ4O?iDdM9{f9e_vj17b;tcg0Jm?PXFM@!*zvK zLFWag3M-LW;*BP*(*Wx~3OXkJr@C~DRNSqbz9ac!o`m+i*%`xyf9H%&Rgbei8wBQ* z3GLw-{VUDq(Wa+w^aQa-RWs((QH5QW!y&AFd&=R0lIMeUC%!wPjpX5f0p-+115S^y zP$xX8T9~qo(}$n+R%7I(9WF&NbcTv9_9md~wo3vytOhL*Z6t*=?E2cBrsJqCzyzYx z?kl|I+{Ep^?%d%n1>(c{u~u7d{L|jYvP8rC>OS^ORACpEr_NYYz!cX4CF&a@k>RN9FCg z{7p#VxlJVGx{HTqg)(tLlcwAqMZ`u!6|LE7YG;R~RiTFrklwmbO0|NDC7UBIj?$$nTBzG)0zu=$x#K8yv3N=o8=x9Exd+7XXu3cvcsX&78BN0^k+n zX|R_;-{Z4pe%VK{({6`!v)?r7qBr^@-L#6T45>Z?3NgI)1a}Q-LqKi`={M)WTFPO!<7;> zKWTD$2}4{d<(AR{OtKFQ9!xGR>8V|lBJU}43Z_8|&LNjNj*eVeq8<(tqnDQ`y2sCN z*^F|Kr;8s9a8K1>l~fBmi1)S#r-DiJM+3Nlx|B}DBoPzpA!-%(<=f3JuG5KgwF9mO z*+UlVE;%ocg8!AT1hC9M0+Sj6rvJz|+%&%w=L&(j5zcf*Nv;%urWb3=vebvpve>m51OO-hTi7y^3~&E!?y>;7U8d z?L9d0!;l8FZ{w_%xE+PIYgV1LRJ3cnAS{O~-fK-;m9EB-q0V2M_1ioVi{p6fZSb`D ztZk9}*J-`E%$?M4TDEQGe90-w&To@psuFiwC6$$xjhM8Vu#(G%KC{nYB;mR!;%QfR zcbR~Ij+e{`%kkuQSzHDE8x(AYou7yN?(4cGO?`S-En`^| zHG_c4!K1!(8}{G!`$lX$506BWfI7*2JJ7709)E>rn}!}c%8O`y2I*up; zlB&@`>cawD|K!vZf{e$U6zHR^ZAec9PS_S-v$3_D!m#gg--zpZshyp)3I-)K&(7vUY zK=R|3b9T1b_ZG>D!=Iu9HZf~};jgyTtkm7F&w=+JeR6b(eYCJNnXDY)sb$DEpMwj2Lz7w>1My3Rt{!U6)v_*T*=!{3%JB*mX?~;&=SYCovDGc z)n@1}(im0AA974M+JC<}#nnUGPNu*NUnNTf8CX(4{4mAFkXC1sd*3-AN3UO5fxcgb6@$OKLK2U~mligFco44{>b0L;V(%@nDI95mOp z=M;NLV}v1c^Rn;l1^xT?FDlemK>;;W+=m!w(a6|RU5?|BO0C#f8;el|beNJ-wzwaQ zwFbOA6M;b9d`_0FMz40=Hm{I_pDGP*1p)G|VrPIl646CqYa({AZ9>0G-!|UfNTqb; z+q4+(%bEcruM?lO78$hfpmoXrI5{KSV=($;;@U$j|2xls0Lhi8!*I428|xWA4BS?X5X?ZJS~FmU`fHi*1=A@d4-itJ-?n?fm;S=?BgZSL$1 z8;KnH&wqjL3N8vZZCMwNOW&c#yKxTkL&vwdf{5)8$XoLpqJKt6TZsn`nh*K)a>o>! z<%R@lkrrx5>HR2MmQFigP5ADs=Pa3!hKIo?fUeg|B3_;%B#u)j(wz@wx#90{GF!D- zUsTllG7Z+Qy_q7@iEMGFr~9ksjFi-a?j$G!P=f{DSd5jG6;pJIk5kGHRw!SF=oMLH zt+Gc}Y)J7<8Na=W9M}}Z>hWYvQdZYI=5irbw+>(aL`7Fj+VH}n zB1t{g8<6x^bpE@-3ZlTeUy7lpVgqw$lXNNzPWH$(t3vP0EzTJ*V zon~+7OrcY6Lj<(&AM|e5Em$KkI)Petqz3Ekdq5EpaIo#XJfDHVELYu00qx`n5e-C} zAd`+Nq?P!t=X?Ea}o{@olh^EJoi(kj2|OJaDD& zX%}jSf`S6jX^Q9CId@HIL3xnjJBzOLeyI3nVgJ8b0G{==Nl?{6>3q$f*+cKJ(0ZJ~ z9oNM!?YQ(52SIv!2MV7^>yM*ad-_3~-@JdXcF zHXqFn3o|62i_NdRjvrW)fhY+$u4Xm?!?q(cGc&fqWO}Tr*=j4d0TQ z9^wm$jFrWV5YEFA9G47n*}Yi5s_kh+y#SHR!^8Oacn*O~QWDIVl#1$^sfSN#cmA7_ z_&vLbJ{}yc(x^@EOq`H+R9Ur$t*RA~tNj)XtSZXP8iwg;>OZxaAoOz zjV>NJ6A%*@Cv%d{4}6d=ZEndvvbek|w10alVzb!^Xkd5(J6!7T^fK$KN=JIy#CG27+q7I?ACa)bT*Mu#;CzQnOglmH1$?m&k* z!U%rOw?PJPtA@)z^Yni+KVU9eyMW% z9%Y<}l8Z>iCPWPhWl&1#1cnyanpu3ZQU43@t#kZ{RkFm;|F5H~j*9YaqDyxO64D_^ zNl7dX3;t9T3F+>X?(UEj1?dJ6X~_lY?q)$)x|aqeh41m(EQblx8QZftGHAK$nY=U64ST9)H(yITFgSd{QmG4-DMm?-Fm7p>KrTRKHJ z=ZOdj58PrQvn+a`uGvt1_}Hs@2BZiXdVY2aSTHkotO&wvxwJMF(NHpwJEhrB}r7s$`5qIN5%YnjGivSo?(p za@Db|kqo`QqeU?R5s`ZR<)G7_=Wt3n{tJ8Q*UjVqeZI(F{N1N{SJC0G1GoT6CN7RdwgV<@VxZ9R{Yc{c9q zk8m={nc!$g!FMmBy-I0nNn`rtKRzMDyPIUZTjjR5w_gY&ea^XgL_$ImICO00Vnx}{ zx{aKf!FrKEANr@h{xqVS5QH0IdTGLcfJqTukOb}C!{@l8(yTgi*w@T$NJ#6!(3I~ywJ#gU2t$AZ)(SJ zK8pF22!8C^;_v_ysUKv-??n6Z(cQ}+p9d?3;7bSl$j$zIfE0hm^dNPUODK8NT%Ct< zJ@8_a&&bZfW|soCL67+RZ}+@Zgh>($QZJc(0ooKVE-t>Z(h)jd<7U%y!ATk8^u3Mo zkK_$crkKBv+{S_wmUs|fVpe)~gn}e7h;t^4+>oFzUnrpgYnWwv4aCvul2Ly=6LuCJ zJxETP9NIn`WB)uL!t$#TgsC||ytjG-h#Tov<3p(9eK2^2j`S)!bM1)r!|>mwywz@B ztQ6%*J;G`{deFXYHd|VLnaNgs{o99^2J~Xz7ENW2nooyji1;6!<=jgG(Ee7=tyrDQ zAGCS*qfDp+@Z)sCzc=9aKY|c4|BI~S3k!ND^mKi}KqJ!b>iefXk@W=)SEVRsH-65(tT9%>;rubX(B?~zen-KZ)|U-G5)<_a(7lK*eroa%{}%65 z7^vTAeB5OWNNP|N3_5OcET7IcFx+4E$t+o=TRwYWkTtL=Etoc9IKJORRfU>iB6(c< z)dt4?8841Jluv?@nmTAZCy?RK(K5-E-(7`cT%+hkiixQydUOwqP(!V(o}C?hlIXTV zs4syra%pL)s*^q4tkQIyg5`8$W8(vmXN-`pm3T?Eu2mzOarlhjr!XKZNJv~4eCARv zFq~-M${cg-pyt#FksC9RTEdl@JN$uWCBIT}asxL#_WiGE#(JO(hw;md!>*9u{yO9G zMSVkyeU=F|I}5|GWlhfD_E{(l_X+H$0M?$0k~U6G*2~9$3&R+vKcFmVZahRb1F{1t z;aELrhkqXQfsvjTqt7|FyoEkJBcmfqq}19m@(!R%pfNHb(2afA?zGa0YBa#@09h@? zej%U-*a-CMk*7Na3H4O?;W1sTtjL#PD$l2d1wSOyI&E9^EMBl~$9vts|J^c&dno?| z*%RWcTITZWiTe>ZleC6T%qB|7U~f%d4VklE5I{!ac&sp>+cF?ZzoQuepG|KuzFHBWrJ&KCmZ(lD?5M! z5#T}}OI8v_{GeqFLVB)$m~+nb1L{Nvmc%FmW`;xXnXj`$a98dxw{`Lp5~tqvJ4jW! zSBjBVqO~tSebzRlom-xmy83=qqm{rxa=E{!i|yOPc8P~(OAWpMLuFQidXn1k2WnzWEWp!5LT+2l4ewv>?rg#r;n`< zK|UN~F-?Vc*n|F)^5b^(T0n~C!S~`Ro41cY?~zDfz#RdF<6mk*9TH`jnwol<@(jMH zA&(pRJ&d2ZpH)&-MNe8v7)xnngp^1EsIOM%s?Cm(#W@kLQ+UHSmmIxvF)qd0tE&Yd z$32h_3i;^!VYna>6eb0rc5=_SSzdAigry?x_^n)m{DHEPvXKbUy6+bYd)EA!f7?X2 zKV@eVY>#Fi2ohp&7@xays5nn;>v&PzXZIBa%71dd2rX=m(axgkC>(3j(ztI;sqk@g za}KBAtJ&KF98KVH+VXX|7XZ_t5hjhpw~Uc5{<~{Q66B7eQ*fJ4sdZ8MXcy$^;Sqv@ zw%#y^-KXgjzs-cAE&E^FK=1cTJ(IZu4T6YQJU`LXGG25n&EuVT{Y2jf41s2aKHj^j zshV~m^_mh2z6>EiL_L>~8j0-x^5)biFPaOVweAL$olJ zDr<$G)C!_RvsA;RVdv_=k>w(bcJUz%`UTx=GegD_M>}^wWu>!Lt~w1^k0ao!ar z`0tPVF5fKStbb5=LyF`{R?MMeUY81v?BE)!(M9))TXzgV8%d{}*-h73B=oIQoaM8#v#(4boBx(1BoYgw z%@K7$IN3rp$FZuLP5=Cu-|tXhah?SZaw6ygTD3Hxnu{t~PFk?!d zT&_Ev_a~l;k8i7SJp5PvW(RCNf=Vfy^@(DQqum|~k&~0R&ln>TXc3iNARQZoFuyXrw1KjPsenVQwCjCO8q@Oyk9n3?=ZE>gVIKo6H7%_Ks1jQu!XWf* zW{+D09B-uwjwj?k{?@;NuU&{|Jnv#_s0^0~Y=ED@myGPpvWAo@oA@Z6jeb9REfs_@ zy}ubjvyw+aVtS$w5_mo~wesd>T7Y1&=5TOuOyV4L zRVPGr+m_u8a=KlS#+J{R5?K^BeGHv#T~6o03!CSUMb_IlfZ;3lyWkE5gMLk&&ewrL z*K2BOn9*Aaf_6zpW+AE_+VPp?I*Gtak)Ru_op9Y#@Ajpr3~7S#-;jzKdyu1f(2w23 zi55Kfy$+|^PH0VMzH1ECQrzExLcQ{hdCe1|E)KFpg3Vpd#p&s8#mXTarY9jFa&``U zeqv=o0Lsww9C55Mzaw9#y^R6id>`K~h}7jLpyuEg7RFNKw03liRA5n^Q1t-*s|uqw zVqmBu^C!wV_}GV!~U)!`pm%lRTNjs5NH%S&etPZ!?dSnW@b+Rc>Y`ud9n${ zIvJ;!P)o0{wyz5wqtFB1)_b+q>SNqz53TVVo9v}z=85!Vk9IY`dgxIt41pJQk~hte$X76 zG8Ve+O+k?+=b5@Wl+qO*L)B$By28B_T||~C=}i`J9Lm`K?{^cyZWAFsPUP%Qo|xF$ z;uos@`(Nr@Hthi;20 zeh)I$?Q0_1UyKVaH|OKW#}oRz4R3Kf6vBig0%aC#hZ|(KFY$=beN-L(w#;E3R#Q~I z_7E|x+r~}as#y~@J7#JwKy#QE0d=d<<)?vf=7Ixl=hX@H_uWrqO4LfGb(zB=qwYZ! z{=0Pk{hvb8{D;+R(f+eUg1bUv!^25>87MQH&q~eB&8vV%wm68AuJn^8dGajMg9t2gZ znX|gW7by_#_mMz!!Q=y_jHx>U*`f#F-2>oVno}J5p`FV9iPXZS3jf&k6U-HUcoyW} zD(!XV(c<4ycj)>dk&-5c&v(k74uR_~`JrK-?&yp2^LEeHGkShPRytesmC{ug zI31l#09|(Aoi{XKf6i{JVr=_)s|e04?O=)f_x86iUxX*94WpGFp)5D^8-KX<(h}b2a)qs3p=^ z5d&)Wm~iwjifnFlI7QpRNn{0t_T`Bq6fHaXl_l{7rN#ZzrlYjXhm;T(eU3+nsBhmA z5r|i+vo*hJc)X<6?1?|VFmqx-pLA}t(4mXzfl?;8@=%5zNOOlK=79*RtEb2P0?LU6(5Tb-oPgI! z57p{rwb@0ML135m`Hq>33mN#ya*B8Z=465zPYZ#c>t8jEe7F1RUzHFIg zi+{RHS!M+tw*oC?Wb`*K?y=vJ!McQ7p#6IhxGoy#>W3K3BIzUlz-~g#!2R0_B!Zl*^mK7K1v$gT%_s^- zM@OqEex{6(h{t+$JWT+d$J0yUAJEg+2%-V@DY)Q8LMOqP3<;=gn=8FfMa!NiNbBnA zt~?1?d$MxU#~2C18jOb_$KrQ|K{K|5$3e(Rb+w8*dEZoBRt73s+^&TUui#9(_TXBbvS(6J!&a65wE&wyzN zW&-g&rtcR--Un$oZxxpMXt(lpuwm3gO zpr)9bofd3by^}zQ=pz)#sfZ`uk#WWT0oHjpTPkf{SVZKlnt*HYZ(k%8m}W?YMBjVy zk68pKARF8SueA?PjJqO;KE5fk%vuCJYUAC>qAHWFh^jEYvJ(yb!X(m!KIS3mhgI5KdGC1g*PC|W zyk6Pc7}7K;2n^r299IX3Cz?klYqF;(Q%*wG_J2IyK);G zLpzLbFV>JmE2!Tu+o+gwQ1^ScvAs)j(Xm49z~nkq4=j}b-aj>V+l|zbw+HC=4*?xK zK7HtGh}Tzbp1UhR8$F-;AqjIsQw?!|mkAJqR$-ajo2;>Ie{Kc=7PRUwZ`fBiQw&2Q zMI0*{m*lQ5dPg#xc2?xPOpXj2rTLE|I%a9#{udWj<&M(yIvsejnit6c7@c^Ri2!rv zEYMf-kh6PEhaV!=r zHa0IcnCrXGe4}zzzG;ME(skp}r@BcyQd`6OEmX(u=utU*EUh z{RTeWDb8f|0q1_sf@JOiSz>> z=m(PB$*eM`WBbnRxz0py9k+(2r_}{tjr4K8c=Z^bAI+w?hOaUWQ$+V9>xQ9wy}t1C zLXyP2_?Wal7ti8B@wyN~*6!PFnhF@j{axXK%gi z#<>e$8FH!0M8e?1k{$6Cm94J9zcUG32=PGEvFLYiB~t^u7_A_+k;C3A?zRri1wI6( zaxTgky(W{!$q;VdCK9K1R8CcMGeJsqQ~XP^l{apoa8cp#iHV7(S*uf5aA+jGPccOu zm%58{0(=OjHT^#>uXp3uIT57s2TzrGsK3HQ#O-~!DQGOi@_vzY5 zngzO@)V21HL5>@fuA}Y?^>x}jUqCCLQB6_CJU0B18TR(i)S=C@|Zq|^(plp~5+73zxIP?0E z3hq}MpC2<$UcZf`udRv}?MJGe@_GK0pD5x@3 z@Da``Jd~=uDgL4c2XBU{<)L)F*S*-9T3XI`aiYWqo(T8Ep-p}nnFLx#AV06C?Ig2v!O=8&F4?f#< zhM*$Ql%_HR?~c1lKv_TW<;xd>BsH$Gh7szUIt!gPrJZzN-!%L}OKe&m^4YnXYB! zmA;lOTa$=IpM=IKdc@3{fdLg!*Ch_DU&eWKVZ$u8`U>omJM?|`jsO50nXTB4ANF%! zp1<#7{?zVD7s-zZNA;Ww#{`ne#+$CTL{iE^-)pV1Eq&GtoaaB$idkVpf9GX78=#MsZvUtV4gC7rz+7p7n^DN*lSTVruRHiZL4dztyF#B=COaobd30ptDYDJ;2q#ux#3B}t z`$*3xXPA7AZq}N?Q%UTWjIBv_qt4$(4JjYY6p@r1XL1zf!n*UDHkzWEg$=fo z)bef8{{paaJP)eR_r}J?IcnlCPKz*7t)Lq;v;EJubv|bP{(qf09-1bpY2oUpk~o;O zY}V}_?ygQM>U|0S``16G01sVXL7a%N=r+wY~V%{8IE7f0(;dJR=z$$Rp2 zV%rzl6ahm!JMRDbdXJHw%RL(#k4Lkm2;|*2F^t(@Kn@le;6(oz)os#A+=xw$^#F?L7B8Dn2P>Q9(DFiAG41d+VgAiQRg!vW&|-o~cb)i!NQ6?YYS3 zNhMXhqV4KxOH0k%2O40r5_PTuln*|HB8&z@Wh?W3!X3E`&iUdJP)>*_O<^?P1jrm} znmQV7C707%Sn{V+=ch(_w_ zRf=WrU9_Ndz^2ySz-!dIpjDB}ml$M|F0ESZ;6>c-PNxrd!2fq}7PS=~_T-`0}_oDA8r`el^r!j6l>n+SvKg(=>X+X5lYNMw!dc{N` z`x5bI99eIb0S7GY)7W_V-oxXM{|Viom=*f5Leo_RW=qoMtO$P!Zy@qvhMAhB+@!K| z3;fng`WbIO_XviFkaAuxR^ETl2lx@fBX=DnfnGJm#a1qEZbJ0LkvAMu*5e`&2vO@R z1v&L1f3m6{#Axl3+qV(x(7>hnc{pey7oo(@f|YQ|>j(m#HafPTx%I6t9;J;^f6q35 z^u<2(nWhYr0vePV6BIa?`!!=U+!Q@7bT~l#K|nz8Pl-oKNm*4>b0(_P^cRU{pu6Jb zDcbvvQ{Vf>J~l2szo-Z+y#g`kfwe!G)eIH;K#`|=x?Ybhs@s^!|Azdli7ZmR-rmgD zDpCdO{9B?|nA2M^=I?dIEBYX{aT*}_-YK@px=cgQ~_ zKRAB@j^ZI*7))%Ur1;^vwAgR>HrHi%PqzS588)l0mrv>)_8O+F?dMc6U#|q+t}QL) z&O`YqPSrkS-pGKcLDJDw&=z&QR_r_Jw*9)?ks}}g_W#=3quo@rK>R2(EmGtBqF7{F zt-`^}%WGJ!&h@JQ?vLrYP-3<1fl&pi&Ed-Ucp!k*tdom$Kq+O57v5E#r}AxcIPKX1 z6|eqfK?O=c(T0k-Fou*2J-2s8U)vpfG{t8;=7zKgKJvJ>}jM1p8#~?xgAZN_2d`J6)39 zqGxv6lAG9Z;&7aqx~?_xP7ul4+1u>mhQqSbzq*sbOv64B!(K`-7t+guzp`AHh+)4v z;8$XYHJC=hq(8cX>7dJ-ncL7Rj8V@=W_p=tU{G4aQ1YM$kPoN|*d*D~6q5~SFN~Ns z81_&^Di?k*C9|mm1CQX$djlE?uUuHcgV5(K&@Z{?`?QU*9K^gtUUoUwG zi0#Dp;#3!IR=%`pZ?J1>YF-vkY3t~CTOiL7=sPZKWO#soViA^(Ctzg-c@8`~c}89P zhgER+E?S!FPyfwmEqrcCv`JZARJ77J8=Cy`^6~CMYCNASefX==87{Czp~S4Z z(KP)X!_b1EO~h7_ERY4;YEYeqy<-)M3BeN;e$-T92ma6$(G;n#Lrh}fD~lGf0t?Ri zef3$2b$hs^&TNFMA$}X^jjcsmPg`lUZc;Ry`!*kwr|sp39dEtF1TfzxDjjEy@hkYf zy5)d^_j#P_lBx(xj`{eSq4T7I*YCPmChuza5s(qj5@Ff6jS=*)Xnsjad|NC4kh~jv z=pc8Ji0?r#F9I-ufG`sPWKc&Qvs-P0FD0u}6OJB*MU9S7cvqv>2}^x_gTF8ZmHg%O z_h-ABzv_t;Ie#f&5PsKYn8DKEk^0y6Hn@OG8dbAR7%k+vV>y`C%rfG3DyyV<=8xt| zr0+mdp4PS?wiVF6Cy<=nqp2xl)x}o&lfBW^F+jj=(gUsqq5o-zEI8AF5Adj8n}2g$ zr3m_d@tu`Y-Y0+|Ol^PD<>j9X#^{cj8$z#lYClR#wS6v0&j<*>`}YC)6_2&zAI$ej^?Szq;bH@Y&@YZ|j{|T8am9#8~Aw zz(S4-=7UDn)=m>x(R}n!cwhU5J=B~94aP|}@EHHf&mMrcT?(xU3&ME&0$ZJNVItrP zh%i13?R?0h;jnhEk9eM^62v;Usa6u^0^ff&@xk1kumVD#$x0Kqc6GL$ zn;y~QM)R49Z`PnK_pz>F0+@N7l|^4xR%U#%DnC3j!u9*D%za2?C0OkU0F5Vo9Pa}@3D9`@ed1sLd8gc!P?7$Cyeq2HoD{qm9| zo)Yq3$x$z;7!(C6=&bI~oUrteW75=5Mp|e%;nwQcl~>`?9aZuOcZGrpc^q}pey*F5 zWNnF@G1iKSMeH?FPI9_|78{-&KYy=g%j%~Vt zys7PTjtiOrscG2(2Y+tu@Z55$Ub9Q31o+YUf4~Gv98%Sd$~Y01tSxx3l(ZmFNu)HI3;|NeFO8$q)p*A5J6%*>Ewx({B5<^>}qg zNh}Nr2tD5-rpG&SPHdLj`ROAR4xl5?FV9??B^BcY)$iNKkqZ8gh&3L|Wrc}p5c8Kf z6F!0cya+KdF;I!iLdIaU5jlTO^#tgth%w7N05|aKS{!PoZD{zP?fc<2&7ava&|w`+ z(_=sB1YxveI9%)vviTw$=Vdnf-y`cR)IpCISGYP?xPst^w`i9iS!%y6RN<$pqZA&F zOY7;JWaxtf*7>6>ePJheU zq?SnI(T0kCPs}{%(UL<;8Z8$?WP;JpKCr5c0+H}>^9ZD}m< zO^iIkJ5505S?lb_9ZiT`(N`wPpWx_Rb)rvmqvM?U*2SrXv>B}i9X;I1SFw-~`voXx zOD{$j;okX@5mQS91Vs-PoWHJo6tSAn$D1-##Y1rB>gsjTy!^Wf9V?w%WdD=zo5odf zVxFPSjt;c?hKA~&KVv}>C@Nwrv#hB!x0vS~zo+E=XszfB-0h&3Xku=T2?m+$SUbLO zb*<@3C3sFJga775a7#y+3TX%sXHhp1zq4*(W350p^|vo~5V_DwHk@*&2a1D3F<`23 zSq%-vc<*R`l^{H5FN%4lN^TuSJ>X2I{*=HMPB%M?IJrDA<9YgYb)w+K9AC2S2-{l( z2z}$>WQSI-UlRbiIyMfD02CUfz!D79j^HjC_h+wsGLga4M`OZTU`1${VL8lcbA{K< z%!sGle0+1khbhk7ZD&W}6__cIDd9;BhN!Ksj&nSQ7OF_t7&Z@QH1pDRFg`r>es6F8 z4^}5I%JiNN;@3CSt5E*NEJi)tF&g|-;9M}lfsKtLz|S89g3)pv(5p(9xa-?({dd+H zN-G0(JN~#(7F1J#XUW9(_lK-jxCAa)?2r9tq`coM10RoL&~-9bvNDA}JvcH07-K9) zrf+J~LelpSpa|rdEd?1{`dTY4;J0C0a4G#V3E#NdlH2&Ny-vn*> /dev/null; then + echo -e "${RED}Error: systemctl not found. This installer requires a systemd-based Linux distribution.${NC}" + exit 1 +fi + +# Pre-installation checks +pre_installation_checks() { + echo -e "${YELLOW}Running pre-installation checks...${NC}" + + # Check if service already exists + if systemctl is-enabled hmac-file-server.service &>/dev/null; then + echo -e "${YELLOW}Warning: HMAC File Server service already exists${NC}" + read -p "Do you want to continue and overwrite the existing installation? (y/N): " OVERWRITE + if [[ ! $OVERWRITE =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Installation cancelled${NC}" + exit 0 + fi + + # Stop existing service + echo -e "${YELLOW}Stopping existing service...${NC}" + systemctl stop hmac-file-server.service || true + fi + + # Check available disk space (minimum 1GB) + AVAILABLE_SPACE=$(df / | awk 'NR==2 {print $4}') + if [[ $AVAILABLE_SPACE -lt 1048576 ]]; then + echo -e "${RED}Error: Insufficient disk space. At least 1GB required${NC}" + exit 1 + fi + + # Check if we're in the correct directory (should contain go.mod) + if [[ ! -f "go.mod" ]]; then + echo -e "${RED}Error: go.mod not found. Please run this installer from the HMAC File Server source directory${NC}" + exit 1 + fi + + echo -e "${GREEN}โœ… Pre-installation checks passed${NC}" + echo "" +} + +# Check for Go installation +check_go() { + if ! command -v go &> /dev/null; then + echo -e "${YELLOW}Go is not installed. Installing Go 1.24...${NC}" + + # Detect architecture + ARCH=$(uname -m) + case $ARCH in + x86_64) GO_ARCH="amd64" ;; + aarch64|arm64) GO_ARCH="arm64" ;; + armv7l) GO_ARCH="armv6l" ;; + *) echo -e "${RED}Unsupported architecture: $ARCH${NC}"; exit 1 ;; + esac + + # Download and install Go + cd /tmp + wget -q "https://go.dev/dl/go1.24.linux-${GO_ARCH}.tar.gz" + tar -C /usr/local -xzf "go1.24.linux-${GO_ARCH}.tar.gz" + + # Add Go to PATH + echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile + export PATH=$PATH:/usr/local/go/bin + + echo -e "${GREEN}Go 1.24 installed successfully${NC}" + else + GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//') + echo -e "${GREEN}Go $GO_VERSION is already installed${NC}" + fi +} + +# User input function +get_user_input() { + echo -e "${BLUE}Configuration Setup${NC}" + echo "Please provide the following information (or press Enter for defaults):" + echo "" + + # System user + read -p "System user for HMAC File Server [$DEFAULT_USER]: " HMAC_USER + HMAC_USER=${HMAC_USER:-$DEFAULT_USER} + + # Installation directory + read -p "Installation directory [$DEFAULT_INSTALL_DIR]: " INSTALL_DIR + INSTALL_DIR=${INSTALL_DIR:-$DEFAULT_INSTALL_DIR} + + # Data directory + read -p "Data directory (uploads) [$DEFAULT_DATA_DIR]: " DATA_DIR + DATA_DIR=${DATA_DIR:-$DEFAULT_DATA_DIR} + + # Server port + read -p "Server port [$DEFAULT_PORT]: " SERVER_PORT + SERVER_PORT=${SERVER_PORT:-$DEFAULT_PORT} + + # Metrics port + read -p "Metrics port [$DEFAULT_METRICS_PORT]: " METRICS_PORT + METRICS_PORT=${METRICS_PORT:-$DEFAULT_METRICS_PORT} + + # HMAC secret + if [[ -n "$HMAC_SECRET" ]]; then + # Use environment variable if provided + if [[ ${#HMAC_SECRET} -ge 32 ]]; then + echo -e "${GREEN}โœ… Using HMAC secret from environment variable${NC}" + else + echo -e "${RED}Error: HMAC_SECRET environment variable must be at least 32 characters long${NC}" + echo -e "${YELLOW}Current length: ${#HMAC_SECRET}${NC}" + exit 1 + fi + else + # Interactive input with auto-generation option + echo "" + echo -e "${BLUE}HMAC Secret Configuration${NC}" + echo "Choose how to set the HMAC secret:" + echo " 1) Generate automatically (recommended)" + echo " 2) Enter manually" + echo "" + + while true; do + read -p "Choice [1]: " hmac_choice + hmac_choice=${hmac_choice:-1} + + case $hmac_choice in + 1) + echo -e "${YELLOW}Generating secure HMAC secret...${NC}" + HMAC_SECRET=$(generate_random_key 48) + echo -e "${GREEN}โœ… Generated 48-character HMAC secret${NC}" + echo -e "${BLUE}Secret preview: ${HMAC_SECRET:0:8}...${HMAC_SECRET: -8}${NC}" + break + ;; + 2) + while true; do + echo -n "HMAC secret (minimum 32 characters): " + # Use bash built-in silent read if available + if read -s -r HMAC_SECRET 2>/dev/null; then + echo "" + else + # Fallback: use regular read with warning + echo "" + echo -e "${YELLOW}โš ๏ธ Note: Input will be visible (your terminal doesn't support hidden input)${NC}" + echo -n "HMAC secret (minimum 32 characters): " + read -r HMAC_SECRET + fi + + if [[ ${#HMAC_SECRET} -ge 32 ]]; then + echo -e "${GREEN}โœ… HMAC secret accepted (${#HMAC_SECRET} characters)${NC}" + break 2 + else + echo -e "${RED}HMAC secret must be at least 32 characters long (you entered ${#HMAC_SECRET} characters)${NC}" + echo -e "${YELLOW}Tip: Choose option 1 for automatic generation${NC}" + fi + done + ;; + *) + echo -e "${RED}Please enter 1 or 2${NC}" + ;; + esac + done + fi + + # JWT settings + echo "" + read -p "Enable JWT authentication? (y/N): " ENABLE_JWT + if [[ $ENABLE_JWT =~ ^[Yy]$ ]]; then + ENABLE_JWT="true" + + # JWT secret + if [[ -n "$JWT_SECRET" ]]; then + # Use environment variable if provided + if [[ ${#JWT_SECRET} -ge 32 ]]; then + echo -e "${GREEN}โœ… Using JWT secret from environment variable${NC}" + else + echo -e "${RED}Error: JWT_SECRET environment variable must be at least 32 characters long${NC}" + echo -e "${YELLOW}Current length: ${#JWT_SECRET}${NC}" + exit 1 + fi + else + # Interactive input with auto-generation option + echo "" + echo -e "${BLUE}JWT Secret Configuration${NC}" + echo "Choose how to set the JWT secret:" + echo " 1) Generate automatically (recommended)" + echo " 2) Enter manually" + echo "" + + while true; do + read -p "Choice [1]: " jwt_choice + jwt_choice=${jwt_choice:-1} + + case $jwt_choice in + 1) + echo -e "${YELLOW}Generating secure JWT secret...${NC}" + JWT_SECRET=$(generate_random_key 48) + echo -e "${GREEN}โœ… Generated 48-character JWT secret${NC}" + echo -e "${BLUE}Secret preview: ${JWT_SECRET:0:8}...${JWT_SECRET: -8}${NC}" + break + ;; + 2) + while true; do + echo -n "JWT secret (minimum 32 characters): " + # Use bash built-in silent read if available + if read -s -r JWT_SECRET 2>/dev/null; then + echo "" + else + # Fallback: use regular read with warning + echo "" + echo -e "${YELLOW}โš ๏ธ Note: Input will be visible (your terminal doesn't support hidden input)${NC}" + echo -n "JWT secret (minimum 32 characters): " + read -r JWT_SECRET + fi + + if [[ ${#JWT_SECRET} -ge 32 ]]; then + echo -e "${GREEN}โœ… JWT secret accepted (${#JWT_SECRET} characters)${NC}" + break 2 + else + echo -e "${RED}JWT secret must be at least 32 characters long (you entered ${#JWT_SECRET} characters)${NC}" + echo -e "${YELLOW}Tip: Choose option 1 for automatic generation${NC}" + fi + done + ;; + *) + echo -e "${RED}Please enter 1 or 2${NC}" + ;; + esac + done + fi + + # JWT expiration + read -p "JWT token expiration [24h]: " JWT_EXPIRATION + JWT_EXPIRATION=${JWT_EXPIRATION:-"24h"} + + # JWT algorithm + read -p "JWT algorithm (HS256/HS384/HS512) [HS256]: " JWT_ALGORITHM + JWT_ALGORITHM=${JWT_ALGORITHM:-"HS256"} + else + ENABLE_JWT="false" + JWT_SECRET="" + JWT_EXPIRATION="24h" + JWT_ALGORITHM="HS256" + fi + + # Redis settings + echo "" + read -p "Enable Redis integration? (y/N): " ENABLE_REDIS + if [[ $ENABLE_REDIS =~ ^[Yy]$ ]]; then + ENABLE_REDIS="true" + read -p "Redis host [localhost]: " REDIS_HOST + REDIS_HOST=${REDIS_HOST:-"localhost"} + read -p "Redis port [6379]: " REDIS_PORT + REDIS_PORT=${REDIS_PORT:-"6379"} + read -p "Redis database [0]: " REDIS_DB + REDIS_DB=${REDIS_DB:-"0"} + read -s -p "Redis password (optional): " REDIS_PASSWORD + echo "" + else + ENABLE_REDIS="false" + fi + + # ClamAV settings + echo "" + read -p "Enable ClamAV virus scanning? (y/N): " ENABLE_CLAMAV + if [[ $ENABLE_CLAMAV =~ ^[Yy]$ ]]; then + ENABLE_CLAMAV="true" + CLAMAV_CONFIG="socket = \"/var/run/clamav/clamd.ctl\"" # Default, will be updated during installation + else + ENABLE_CLAMAV="false" + CLAMAV_CONFIG="" + fi + + # SSL/TLS settings + echo "" + read -p "Enable SSL/TLS? (y/N): " ENABLE_TLS + if [[ $ENABLE_TLS =~ ^[Yy]$ ]]; then + ENABLE_TLS="true" + read -p "SSL certificate path: " SSL_CERT + read -p "SSL private key path: " SSL_KEY + else + ENABLE_TLS="false" + fi + + # Show configuration summary + # Professional configuration summary + echo "" + echo -e "${BLUE} โ–ˆ Configuration Summary โ–ˆ${NC}" + echo -e "${YELLOW}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo -e "${YELLOW}System User:${NC} $HMAC_USER" + echo -e "${YELLOW}Install Dir:${NC} $INSTALL_DIR" + echo -e "${YELLOW}Data Dir:${NC} $DATA_DIR" + echo -e "${YELLOW}Server Port:${NC} $SERVER_PORT" + echo -e "${YELLOW}Metrics Port:${NC} $METRICS_PORT" + echo -e "${YELLOW}JWT Auth:${NC} $([[ "$ENABLE_JWT" == "true" ]] && echo "โœ… Enabled" || echo "โŒ Disabled")" + echo -e "${YELLOW}Redis:${NC} $([[ "$ENABLE_REDIS" == "true" ]] && echo "โœ… Enabled ($REDIS_HOST:$REDIS_PORT)" || echo "โŒ Disabled")" + echo -e "${YELLOW}ClamAV:${NC} $([[ "$ENABLE_CLAMAV" == "true" ]] && echo "โœ… Enabled" || echo "โŒ Disabled")" + echo -e "${YELLOW}SSL/TLS:${NC} $([[ "$ENABLE_TLS" == "true" ]] && echo "โœ… Enabled" || echo "โŒ Disabled")" + echo -e "${YELLOW}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo "" + read -p "Continue with installation? (y/N): " CONFIRM_INSTALL + if [[ ! $CONFIRM_INSTALL =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Installation cancelled by user${NC}" + exit 0 + fi +} + +# Create system user +create_user() { + if ! id "$HMAC_USER" &>/dev/null; then + echo -e "${YELLOW}Creating system user: $HMAC_USER${NC}" + useradd --system --home-dir "$INSTALL_DIR" --shell /bin/false --comment "HMAC File Server" "$HMAC_USER" + else + echo -e "${GREEN}User $HMAC_USER already exists${NC}" + fi +} + +# Create directories +create_directories() { + echo -e "${YELLOW}Creating directories...${NC}" + + mkdir -p "$INSTALL_DIR" + mkdir -p "$DEFAULT_CONFIG_DIR" + mkdir -p "$DATA_DIR/uploads" + mkdir -p "$DATA_DIR/deduplication" + mkdir -p "$DATA_DIR/runtime" + mkdir -p "$DEFAULT_LOG_DIR" + + # Set ownership + chown -R "$HMAC_USER:$HMAC_USER" "$INSTALL_DIR" + chown -R "$HMAC_USER:$HMAC_USER" "$DATA_DIR" + chown -R "$HMAC_USER:$HMAC_USER" "$DEFAULT_LOG_DIR" + + # Set permissions + chmod 755 "$INSTALL_DIR" + chmod 755 "$DATA_DIR" + chmod 750 "$DEFAULT_LOG_DIR" +} + +# Build HMAC File Server +build_server() { + echo -e "${YELLOW}Building HMAC File Server...${NC}" + + # Build the server + cd "$(dirname "$0")" + go build -o "$INSTALL_DIR/hmac-file-server" cmd/server/main.go cmd/server/helpers.go cmd/server/config_validator.go cmd/server/config_test_scenarios.go + + # Set ownership and permissions + chown "$HMAC_USER:$HMAC_USER" "$INSTALL_DIR/hmac-file-server" + chmod 755 "$INSTALL_DIR/hmac-file-server" + + echo -e "${GREEN}HMAC File Server built successfully${NC}" +} + +# Generate configuration file +generate_config() { + echo -e "${YELLOW}Generating configuration file...${NC}" + + cat > "$DEFAULT_CONFIG_DIR/config.toml" << EOF +# HMAC File Server Configuration +# Generated by installer on $(date) + +[server] +bind_ip = "0.0.0.0" +listenport = "$SERVER_PORT" +unixsocket = false +storagepath = "$DATA_DIR/uploads" +metricsenabled = true +metricsport = "$METRICS_PORT" +deduplicationenabled = true +deduplicationpath = "$DATA_DIR/deduplication" +filenaming = "HMAC" +force_protocol = "auto" +pidfilepath = "$DATA_DIR/runtime/hmac-file-server.pid" +EOF + + if [[ $ENABLE_TLS == "true" ]]; then + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF +sslenabled = true +sslcert = "$SSL_CERT" +sslkey = "$SSL_KEY" +EOF + else + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF +sslenabled = false +EOF + fi + + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[security] +secret = "$HMAC_SECRET" +enablejwt = $ENABLE_JWT +EOF + + if [[ $ENABLE_JWT == "true" ]]; then + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF +jwtsecret = "$JWT_SECRET" +jwtalgorithm = "$JWT_ALGORITHM" +jwtexpiration = "$JWT_EXPIRATION" +EOF + fi + + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[uploads] +allowedextensions = [".txt", ".pdf", ".jpg", ".jpeg", ".png", ".gif", ".webp", ".zip", ".tar", ".gz", ".7z", ".mp4", ".webm", ".ogg", ".mp3", ".wav", ".flac", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx", ".odt", ".ods", ".odp"] +maxfilesize = "100MB" +chunkeduploadsenabled = true +chunksize = "10MB" +ttlenabled = false +ttl = "168h" + +[downloads] +chunkeddownloadsenabled = true +chunksize = "10MB" + +[logging] +level = "INFO" +file = "$DEFAULT_LOG_DIR/hmac-file-server.log" +max_size = 100 +max_backups = 3 +max_age = 30 +compress = true + +[workers] +numworkers = 10 +uploadqueuesize = 1000 +autoscaling = true + +[timeouts] +readtimeout = "30s" +writetimeout = "30s" +idletimeout = "120s" +shutdown = "30s" +EOF + + if [[ $ENABLE_CLAMAV == "true" ]]; then + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[clamav] +enabled = true +${CLAMAV_CONFIG} +timeout = "30s" +EOF + else + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[clamav] +enabled = false +EOF + fi + + if [[ $ENABLE_REDIS == "true" ]]; then + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[redis] +enabled = true +host = "$REDIS_HOST" +port = $REDIS_PORT +database = $REDIS_DB +password = "$REDIS_PASSWORD" +timeout = "5s" +EOF + else + cat >> "$DEFAULT_CONFIG_DIR/config.toml" << EOF + +[redis] +enabled = false +EOF + fi + + # Set ownership and permissions + chown "$HMAC_USER:$HMAC_USER" "$DEFAULT_CONFIG_DIR/config.toml" + chmod 640 "$DEFAULT_CONFIG_DIR/config.toml" + + echo -e "${GREEN}Configuration file created: $DEFAULT_CONFIG_DIR/config.toml${NC}" +} + +# Create systemd service +create_systemd_service() { + echo -e "${YELLOW}Creating systemd service...${NC}" + + cat > /etc/systemd/system/hmac-file-server.service << EOF +[Unit] +Description=HMAC File Server 3.2 +Documentation=https://github.com/PlusOne/hmac-file-server +After=network.target +Wants=network-online.target +EOF + + if [[ $ENABLE_REDIS == "true" ]]; then + echo "After=redis.service" >> /etc/systemd/system/hmac-file-server.service + fi + + if [[ $ENABLE_CLAMAV == "true" ]]; then + echo "After=clamav-daemon.service" >> /etc/systemd/system/hmac-file-server.service + fi + + cat >> /etc/systemd/system/hmac-file-server.service << EOF + +[Service] +Type=simple +User=$HMAC_USER +Group=$HMAC_USER +ExecStart=$INSTALL_DIR/hmac-file-server -config $DEFAULT_CONFIG_DIR/config.toml +ExecReload=/bin/kill -SIGHUP \$MAINPID +WorkingDirectory=$INSTALL_DIR +Restart=always +RestartSec=10 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=hmac-file-server + +# Security settings +NoNewPrivileges=true +PrivateTmp=true +ProtectSystem=strict +ProtectHome=true +ReadWritePaths=$DATA_DIR $DEFAULT_LOG_DIR +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +AmbientCapabilities=CAP_NET_BIND_SERVICE + +# Resource limits +LimitNOFILE=65536 +LimitNPROC=4096 + +[Install] +WantedBy=multi-user.target +EOF + + # Reload systemd and enable service + systemctl daemon-reload + systemctl enable hmac-file-server.service + + echo -e "${GREEN}Systemd service created and enabled${NC}" +} + +# Install dependencies +install_dependencies() { + echo -e "${YELLOW}Installing dependencies...${NC}" + + # Detect package manager and install dependencies + if command -v apt-get &> /dev/null; then + apt-get update + if [[ $ENABLE_REDIS == "true" ]]; then + apt-get install -y redis-server + systemctl enable redis-server + fi + if [[ $ENABLE_CLAMAV == "true" ]]; then + apt-get install -y clamav clamav-daemon + systemctl enable clamav-daemon + # Update virus definitions + freshclam || true + + # Detect ClamAV configuration and configure accordingly + echo -e "${YELLOW}Configuring ClamAV connection...${NC}" + + # Check if ClamAV daemon is running and detect socket/port + if systemctl is-active --quiet clamav-daemon; then + echo " โœ“ ClamAV daemon is running" + + # Check for Unix socket (preferred) + if [[ -S "/var/run/clamav/clamd.ctl" ]]; then + echo " โœ“ Unix socket detected: /var/run/clamav/clamd.ctl" + CLAMAV_CONFIG="socket = \"/var/run/clamav/clamd.ctl\"" + elif [[ -S "/run/clamav/clamd.ctl" ]]; then + echo " โœ“ Unix socket detected: /run/clamav/clamd.ctl" + CLAMAV_CONFIG="socket = \"/run/clamav/clamd.ctl\"" + elif [[ -S "/tmp/clamd" ]]; then + echo " โœ“ Unix socket detected: /tmp/clamd" + CLAMAV_CONFIG="socket = \"/tmp/clamd\"" + # Check for TCP port + elif netstat -ln | grep -q ":3310"; then + echo " โœ“ TCP port detected: 127.0.0.1:3310" + CLAMAV_CONFIG="address = \"127.0.0.1:3310\"" + else + echo " โš  ClamAV socket/port not detected, using default Unix socket" + CLAMAV_CONFIG="socket = \"/var/run/clamav/clamd.ctl\"" + fi + else + echo " โš  ClamAV daemon not running, using default configuration" + CLAMAV_CONFIG="socket = \"/var/run/clamav/clamd.ctl\"" + + # Try to start the daemon + echo " ๐Ÿ”„ Attempting to start ClamAV daemon..." + systemctl start clamav-daemon || echo " โš  Failed to start ClamAV daemon" + fi + fi + elif command -v yum &> /dev/null; then + if [[ $ENABLE_REDIS == "true" ]]; then + yum install -y redis + systemctl enable redis + fi + if [[ $ENABLE_CLAMAV == "true" ]]; then + yum install -y clamav clamav-update clamd + systemctl enable clamd + freshclam || true + fi + elif command -v dnf &> /dev/null; then + if [[ $ENABLE_REDIS == "true" ]]; then + dnf install -y redis + systemctl enable redis + fi + if [[ $ENABLE_CLAMAV == "true" ]]; then + dnf install -y clamav clamav-update clamd + systemctl enable clamd + freshclam || true + fi + else + echo -e "${YELLOW}Unknown package manager. Please install Redis and/or ClamAV manually if needed.${NC}" + fi +} + +# Generate secure random key +generate_random_key() { + local length=${1:-48} # Default 48 characters for extra security + local key="" + + # Try different methods in order of preference + if command -v openssl &> /dev/null; then + # Method 1: OpenSSL (most common and secure) + key=$(openssl rand -base64 $((length * 3 / 4 + 1)) | tr -d "=+/\n" | cut -c1-$length) + elif command -v head &> /dev/null && [[ -r /dev/urandom ]]; then + # Method 2: /dev/urandom with head (Linux/Unix) + key=$(head -c $((length * 3 / 4 + 1)) /dev/urandom | base64 | tr -d "=+/\n" | cut -c1-$length) + elif command -v dd &> /dev/null && [[ -r /dev/urandom ]]; then + # Method 3: dd with /dev/urandom + key=$(dd if=/dev/urandom bs=$((length * 3 / 4 + 1)) count=1 2>/dev/null | base64 | tr -d "=+/\n" | cut -c1-$length) + elif command -v date &> /dev/null; then + # Method 4: Fallback using date and process info (less secure but works) + local timestamp=$(date +%s%N) + local random_data="${timestamp}${RANDOM}${$}$(hostname)" + key=$(echo -n "$random_data" | sha256sum | cut -c1-$length) + else + # Method 5: Last resort - basic fallback + echo -e "${YELLOW}Warning: Using basic key generation (consider installing openssl)${NC}" >&2 + key="hmac-file-server-$(date +%s)-$(hostname | cut -c1-16)" + key=$(echo -n "$key" | sha256sum | cut -c1-$length) + fi + + # Ensure exact length + key=$(echo -n "$key" | cut -c1-$length) + + # If still too short, pad with additional random data + while [[ ${#key} -lt $length ]]; do + local padding=$(date +%s | sha256sum | cut -c1-$((length - ${#key}))) + key="${key}${padding}" + key=$(echo -n "$key" | cut -c1-$length) + done + + echo "$key" +} + +# Main installation function +main() { + echo -e "${BLUE}Starting HMAC File Server installation...${NC}" + echo "" + + # Run pre-installation checks + pre_installation_checks + + # Get user input + get_user_input + + echo "" + echo -e "${BLUE}Installation Summary:${NC}" + echo "User: $HMAC_USER" + echo "Install Directory: $INSTALL_DIR" + echo "Data Directory: $DATA_DIR" + echo "Config Directory: $DEFAULT_CONFIG_DIR" + echo "Server Port: $SERVER_PORT" + echo "Metrics Port: $METRICS_PORT" + echo "JWT Enabled: $ENABLE_JWT" + echo "Redis Enabled: $ENABLE_REDIS" + echo "ClamAV Enabled: $ENABLE_CLAMAV" + echo "TLS Enabled: $ENABLE_TLS" + echo "" + + read -p "Continue with installation? (y/N): " CONFIRM + if [[ ! $CONFIRM =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Installation cancelled.${NC}" + exit 0 + fi + + echo "" + echo -e "${BLUE}Installing...${NC}" + + # Installation steps + check_go + create_user + create_directories + install_dependencies + build_server + generate_config + create_systemd_service + + # Ask if user wants to start the service now + echo "" + read -p "Start HMAC File Server service now? (Y/n): " START_SERVICE + START_SERVICE=${START_SERVICE:-Y} + + if [[ $START_SERVICE =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Starting HMAC File Server service...${NC}" + systemctl start hmac-file-server.service + + # Wait a moment and check status + sleep 3 + if systemctl is-active --quiet hmac-file-server.service; then + echo -e "${GREEN}โœ… Service started successfully${NC}" + else + echo -e "${RED}โŒ Service failed to start. Check logs with: journalctl -u hmac-file-server.service${NC}" + fi + fi + + print_completion_info +} + +# Function to print completion information +print_completion_info() { + echo "" + echo -e "${GREEN} โ–ˆ Installation Complete! โ–ˆ${NC}" + echo -e "${GREEN}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo -e "${GREEN} HMAC File Server 3.2 Successfully Deployed! ${NC}" + echo -e "${GREEN}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo "" + echo -e "${BLUE}๐Ÿš€ Service Information:${NC}" + echo -e " Status: ${YELLOW}sudo systemctl status hmac-file-server${NC}" + echo -e " Logs: ${YELLOW}sudo journalctl -u hmac-file-server -f${NC}" + echo -e " Config: ${YELLOW}sudo nano $DEFAULT_CONFIG_DIR/config.toml${NC}" + echo -e " Reload: ${YELLOW}sudo systemctl reload hmac-file-server${NC}" + echo "" + echo -e "${BLUE}๐ŸŒ Service Endpoints:${NC}" + if [[ $ENABLE_TLS == "true" ]]; then + echo -e " Server: ${YELLOW}https://$(hostname -I | awk '{print $1}'):$SERVER_PORT${NC}" + else + echo -e " Server: ${YELLOW}http://$(hostname -I | awk '{print $1}'):$SERVER_PORT${NC}" + fi + echo -e " Metrics: ${YELLOW}http://$(hostname -I | awk '{print $1}'):$METRICS_PORT/metrics${NC}" + echo "" + echo -e "${BLUE}๐Ÿ“ File Locations:${NC}" + echo -e " Binary: ${YELLOW}$INSTALL_DIR/hmac-file-server${NC}" + echo -e " Config: ${YELLOW}$DEFAULT_CONFIG_DIR/config.toml${NC}" + echo -e " Uploads: ${YELLOW}$DATA_DIR/uploads${NC}" + echo -e " Logs: ${YELLOW}$DEFAULT_LOG_DIR/hmac-file-server.log${NC}" + echo "" + echo -e "${BLUE}โšก Quick Commands:${NC}" + echo -e " Start: ${YELLOW}sudo systemctl start hmac-file-server${NC}" + echo -e " Stop: ${YELLOW}sudo systemctl stop hmac-file-server${NC}" + echo -e " Restart: ${YELLOW}sudo systemctl restart hmac-file-server${NC}" + echo -e " Status: ${YELLOW}sudo systemctl status hmac-file-server${NC}" + echo "" + echo -e "${BLUE}๐Ÿ”ง Next Steps for XMPP Integration:${NC}" + echo -e "1. ${YELLOW}Configure firewall${NC} to allow ports $SERVER_PORT (server) and $METRICS_PORT (metrics)" + echo -e "2. Configure your reverse proxy (nginx/apache) with SSL" + echo -e "3. Update your Prosody/Ejabberd configuration:" + echo -e " ${YELLOW}http_file_share = \"http://localhost:$SERVER_PORT\"${NC}" + echo -e "4. Set up monitoring and log rotation" + echo -e "5. Test file uploads with your XMPP client" + echo "" + echo -e "${BLUE}๐Ÿ“š Documentation & Support:${NC}" + echo -e " README: https://github.com/PlusOne/hmac-file-server/blob/main/README.MD" + echo -e " Wiki: https://github.com/PlusOne/hmac-file-server/blob/main/WIKI.MD" + echo -e " Issues: https://github.com/PlusOne/hmac-file-server/issues" + echo "" + echo -e "${GREEN}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo -e "${GREEN} Thank you for choosing HMAC File Server for your XMPP setup! ${NC}" + echo -e "${GREEN}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" +} + +# Helper function to safely preserve a directory +preserve_directory() { + local source_dir="$1" + local backup_path="$2" + + if [[ -d "$source_dir" ]]; then + local parent_dir=$(dirname "$backup_path") + mkdir -p "$parent_dir" + + if mv "$source_dir" "$backup_path" 2>/dev/null; then + echo " โœ“ Preserved: $source_dir โ†’ $backup_path" + else + # Fallback to copy if move fails + if cp -r "$source_dir" "$backup_path" 2>/dev/null; then + echo " โœ“ Copied: $source_dir โ†’ $backup_path" + rm -rf "$source_dir" + echo " โœ“ Removed original: $source_dir" + else + echo " โš  Failed to preserve: $source_dir" + fi + fi + else + echo " โš  Directory not found: $source_dir" + fi +} + +# Custom data selection for option 4 +custom_data_selection() { + echo "" + echo -e "${BLUE}Custom Data Selection:${NC}" + echo "Choose which data directories to preserve:" + echo "" + + CUSTOM_PRESERVE_UPLOADS="" + CUSTOM_PRESERVE_DEDUP="" + CUSTOM_PRESERVE_LOGS="" + + # Ask about uploads + if [[ -d "$UPLOAD_DIR" ]]; then + FILE_COUNT=$(find "$UPLOAD_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$UPLOAD_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN}๐Ÿ“ค Upload Directory: ${UPLOAD_DIR}${NC} (Files: $FILE_COUNT, Size: $DIR_SIZE)" + read -p "Preserve upload directory? (y/N): " PRESERVE_UPLOADS + if [[ $PRESERVE_UPLOADS =~ ^[Yy]$ ]]; then + CUSTOM_PRESERVE_UPLOADS="yes" + echo " โœ“ Will preserve uploads" + else + echo " โœ— Will delete uploads" + fi + else + echo -e "${YELLOW}๐Ÿ“ค Upload Directory: Not found${NC}" + fi + + echo "" + + # Ask about deduplication + if [[ -d "$DEDUP_DIR" ]]; then + FILE_COUNT=$(find "$DEDUP_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$DEDUP_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN}๐Ÿ”— Deduplication Directory: ${DEDUP_DIR}${NC} (Files: $FILE_COUNT, Size: $DIR_SIZE)" + read -p "Preserve deduplication directory? (y/N): " PRESERVE_DEDUP + if [[ $PRESERVE_DEDUP =~ ^[Yy]$ ]]; then + CUSTOM_PRESERVE_DEDUP="yes" + echo " โœ“ Will preserve deduplication data" + else + echo " โœ— Will delete deduplication data" + fi + else + echo -e "${YELLOW}๐Ÿ”— Deduplication Directory: Not found${NC}" + fi + + echo "" + + # Ask about logs + if [[ -d "$LOG_DIR" ]]; then + FILE_COUNT=$(find "$LOG_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$LOG_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN}๐Ÿ“„ Log Directory: ${LOG_DIR}${NC} (Files: $FILE_COUNT, Size: $DIR_SIZE)" + read -p "Preserve log directory? (y/N): " PRESERVE_LOGS + if [[ $PRESERVE_LOGS =~ ^[Yy]$ ]]; then + CUSTOM_PRESERVE_LOGS="yes" + echo " โœ“ Will preserve logs" + else + echo " โœ— Will delete logs" + fi + else + echo -e "${YELLOW}๐Ÿ“„ Log Directory: Not found${NC}" + fi + + # Store custom selection for later processing + PRESERVE_DATA="custom" + + echo "" + echo -e "${BLUE}Custom selection complete:${NC}" + [[ "$CUSTOM_PRESERVE_UPLOADS" == "yes" ]] && echo " ๐Ÿ“ค Uploads: Preserve" || echo " ๐Ÿ“ค Uploads: Delete" + [[ "$CUSTOM_PRESERVE_DEDUP" == "yes" ]] && echo " ๐Ÿ”— Deduplication: Preserve" || echo " ๐Ÿ”— Deduplication: Delete" + [[ "$CUSTOM_PRESERVE_LOGS" == "yes" ]] && echo " ๐Ÿ“„ Logs: Preserve" || echo " ๐Ÿ“„ Logs: Delete" + echo "" +} + +# Handle custom preservation choices +handle_custom_preservation() { + # Check if any data needs to be preserved + if [[ "$CUSTOM_PRESERVE_UPLOADS" == "yes" || "$CUSTOM_PRESERVE_DEDUP" == "yes" || "$CUSTOM_PRESERVE_LOGS" == "yes" ]]; then + BACKUP_DIR="/var/backups/hmac-file-server-$(date +%Y%m%d-%H%M%S)" + mkdir -p "$BACKUP_DIR" + echo " โœ“ Created backup directory: $BACKUP_DIR" + fi + + # Handle uploads + if [[ "$CUSTOM_PRESERVE_UPLOADS" == "yes" ]]; then + preserve_directory "$UPLOAD_DIR" "$BACKUP_DIR/uploads" + elif [[ -d "$UPLOAD_DIR" ]]; then + rm -rf "$UPLOAD_DIR" + echo " โœ“ Removed uploads: $UPLOAD_DIR" + fi + + # Handle deduplication + if [[ "$CUSTOM_PRESERVE_DEDUP" == "yes" ]]; then + preserve_directory "$DEDUP_DIR" "$BACKUP_DIR/deduplication" + elif [[ -d "$DEDUP_DIR" ]]; then + rm -rf "$DEDUP_DIR" + echo " โœ“ Removed deduplication: $DEDUP_DIR" + fi + + # Handle logs + if [[ "$CUSTOM_PRESERVE_LOGS" == "yes" ]]; then + preserve_directory "$LOG_DIR" "$BACKUP_DIR/logs" + elif [[ -d "$LOG_DIR" ]]; then + rm -rf "$LOG_DIR" + echo " โœ“ Removed logs: $LOG_DIR" + fi + + # Remove the main data directory if it's separate and empty + if [[ -d "$DEFAULT_DATA_DIR" ]]; then + # Only remove if it's different from preserved directories and if it's empty or only contains subdirs we've handled + if [[ "$DEFAULT_DATA_DIR" != "$UPLOAD_DIR" && "$DEFAULT_DATA_DIR" != "$DEDUP_DIR" && "$DEFAULT_DATA_DIR" != "$LOG_DIR" ]]; then + # Check if directory is effectively empty (only contains directories we've already handled) + remaining_files=$(find "$DEFAULT_DATA_DIR" -type f 2>/dev/null | wc -l) + if [[ $remaining_files -eq 0 ]]; then + rm -rf "$DEFAULT_DATA_DIR" + echo " โœ“ Removed empty data directory: $DEFAULT_DATA_DIR" + else + echo " โš  Data directory contains additional files: $DEFAULT_DATA_DIR" + fi + fi + fi +} + +# Uninstaller function (can be called with ./installer.sh --uninstall) +uninstall() { + echo "" + echo -e "${RED} โ–ˆ HMAC File Server Uninstaller โ–ˆ${NC}" + echo -e "${RED}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo -e "${RED} Warning: This will remove the server installation! ${NC}" + echo -e "${RED}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo "" + + read -p "Are you sure you want to uninstall HMAC File Server? (y/N): " CONFIRM_UNINSTALL + if [[ ! $CONFIRM_UNINSTALL =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Uninstall cancelled${NC}" + exit 0 + fi + + echo "" + echo -e "${BLUE}๐Ÿ“ Data Preservation Options:${NC}" + echo -e "${BLUE}โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€${NC}" + echo "" + echo "The following data directories may contain important files:" + + # Check what data directories exist and show their contents + PRESERVE_DATA="" + UPLOAD_DIR="" + DEDUP_DIR="" + LOG_DIR="" + + # Find upload directory from config if it exists + if [[ -f "$DEFAULT_CONFIG_DIR/config.toml" ]]; then + UPLOAD_DIR=$(grep -E "^storagepath\s*=" "$DEFAULT_CONFIG_DIR/config.toml" 2>/dev/null | sed 's/.*=\s*"*\([^"]*\)"*.*/\1/' | xargs) + DEDUP_DIR=$(grep -E "^directory\s*=" "$DEFAULT_CONFIG_DIR/config.toml" 2>/dev/null | sed 's/.*=\s*"*\([^"]*\)"*.*/\1/' | xargs) + fi + + # Fallback to default locations + [[ -z "$UPLOAD_DIR" ]] && UPLOAD_DIR="$DEFAULT_DATA_DIR/uploads" + [[ -z "$DEDUP_DIR" ]] && DEDUP_DIR="$DEFAULT_DATA_DIR/deduplication" + LOG_DIR="$DEFAULT_LOG_DIR" + + # Show upload directory status + if [[ -d "$UPLOAD_DIR" ]]; then + FILE_COUNT=$(find "$UPLOAD_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$UPLOAD_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN} ๐Ÿ“ค Upload Directory: ${UPLOAD_DIR}${NC}" + echo -e " Files: $FILE_COUNT, Size: $DIR_SIZE" + else + echo -e "${YELLOW} ๐Ÿ“ค Upload Directory: Not found or empty${NC}" + fi + + # Show deduplication directory status + if [[ -d "$DEDUP_DIR" ]]; then + FILE_COUNT=$(find "$DEDUP_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$DEDUP_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN} ๐Ÿ”— Deduplication Directory: ${DEDUP_DIR}${NC}" + echo -e " Files: $FILE_COUNT, Size: $DIR_SIZE" + else + echo -e "${YELLOW} ๐Ÿ”— Deduplication Directory: Not found or empty${NC}" + fi + + # Show log directory status + if [[ -d "$LOG_DIR" ]]; then + FILE_COUNT=$(find "$LOG_DIR" -type f 2>/dev/null | wc -l) + DIR_SIZE=$(du -sh "$LOG_DIR" 2>/dev/null | cut -f1) + echo -e "${GREEN} ๐Ÿ“„ Log Directory: ${LOG_DIR}${NC}" + echo -e " Files: $FILE_COUNT, Size: $DIR_SIZE" + else + echo -e "${YELLOW} ๐Ÿ“„ Log Directory: Not found or empty${NC}" + fi + + echo "" + echo -e "${BLUE}Choose data handling option:${NC}" + echo " 1) ๐Ÿ—‘๏ธ Delete all data (complete removal)" + echo " 2) ๐Ÿ’พ Preserve upload and deduplication data only" + echo " 3) ๐Ÿ“‹ Preserve all data (uploads, deduplication, and logs)" + echo " 4) ๐ŸŽฏ Custom selection (choose what to preserve)" + echo " 5) โŒ Cancel uninstallation" + echo "" + + while true; do + read -p "Select option (1-5): " DATA_OPTION + case $DATA_OPTION in + 1) + echo -e "${RED}Selected: Delete all data${NC}" + PRESERVE_DATA="none" + break + ;; + 2) + echo -e "${GREEN}Selected: Preserve uploads and deduplication data${NC}" + PRESERVE_DATA="uploads_dedup" + break + ;; + 3) + echo -e "${GREEN}Selected: Preserve all data${NC}" + PRESERVE_DATA="all" + break + ;; + 4) + echo -e "${BLUE}Custom selection:${NC}" + custom_data_selection + break + ;; + 5) + echo -e "${YELLOW}Uninstall cancelled${NC}" + exit 0 + ;; + *) + echo -e "${RED}Invalid option. Please choose 1-5.${NC}" + ;; + esac + done + + # Final confirmation for complete deletion + if [[ "$PRESERVE_DATA" == "none" ]]; then + echo "" + echo -e "${RED}โš ๏ธ FINAL WARNING: This will permanently delete ALL data!${NC}" + echo -e "${RED} This includes all uploaded files, deduplication data, and logs.${NC}" + echo -e "${RED} This action cannot be undone!${NC}" + echo "" + read -p "Type 'DELETE' to confirm complete data removal: " FINAL_CONFIRM + if [[ "$FINAL_CONFIRM" != "DELETE" ]]; then + echo -e "${YELLOW}Uninstall cancelled - confirmation failed${NC}" + exit 0 + fi + fi + + echo "" + echo -e "${YELLOW}๐Ÿ”„ Starting uninstallation process...${NC}" + echo "" + + echo -e "${YELLOW}Stopping and disabling service...${NC}" + if systemctl is-active --quiet hmac-file-server.service; then + systemctl stop hmac-file-server.service || true + echo " โœ“ Service stopped" + else + echo " โš  Service was not running" + fi + + if systemctl is-enabled --quiet hmac-file-server.service 2>/dev/null; then + systemctl disable hmac-file-server.service || true + echo " โœ“ Service disabled" + else + echo " โš  Service was not enabled" + fi + + if [[ -f /etc/systemd/system/hmac-file-server.service ]]; then + rm -f /etc/systemd/system/hmac-file-server.service + echo " โœ“ Service file removed" + else + echo " โš  Service file not found" + fi + + systemctl daemon-reload + echo " โœ“ Systemd reloaded" + + echo -e "${YELLOW}Removing installation and configuration...${NC}" + + # Always remove installation directory + if [[ -d "$DEFAULT_INSTALL_DIR" ]]; then + rm -rf "$DEFAULT_INSTALL_DIR" + echo " โœ“ Removed installation directory: $DEFAULT_INSTALL_DIR" + else + echo " โš  Installation directory not found: $DEFAULT_INSTALL_DIR" + fi + + # Always remove configuration directory + if [[ -d "$DEFAULT_CONFIG_DIR" ]]; then + rm -rf "$DEFAULT_CONFIG_DIR" + echo " โœ“ Removed configuration directory: $DEFAULT_CONFIG_DIR" + else + echo " โš  Configuration directory not found: $DEFAULT_CONFIG_DIR" + fi + + # Handle data directories based on user choice + echo -e "${YELLOW}Processing data directories...${NC}" + + case $PRESERVE_DATA in + "none") + # Delete everything + for dir in "$UPLOAD_DIR" "$DEDUP_DIR" "$LOG_DIR" "$DEFAULT_DATA_DIR"; do + if [[ -d "$dir" ]]; then + rm -rf "$dir" + echo " โœ“ Removed: $dir" + fi + done + ;; + "uploads_dedup") + # Preserve uploads and deduplication, remove logs + if [[ -d "$LOG_DIR" ]]; then + rm -rf "$LOG_DIR" + echo " โœ“ Removed logs: $LOG_DIR" + fi + # Move preserved data to a safe location + BACKUP_DIR="/var/backups/hmac-file-server-$(date +%Y%m%d-%H%M%S)" + mkdir -p "$BACKUP_DIR" + preserve_directory "$UPLOAD_DIR" "$BACKUP_DIR/uploads" + preserve_directory "$DEDUP_DIR" "$BACKUP_DIR/deduplication" + # Remove original data directory structure but keep preserved data + if [[ -d "$DEFAULT_DATA_DIR" && "$DEFAULT_DATA_DIR" != "$UPLOAD_DIR" && "$DEFAULT_DATA_DIR" != "$DEDUP_DIR" ]]; then + rm -rf "$DEFAULT_DATA_DIR" + echo " โœ“ Removed data directory (preserved content moved to $BACKUP_DIR)" + fi + ;; + "all") + # Preserve everything + BACKUP_DIR="/var/backups/hmac-file-server-$(date +%Y%m%d-%H%M%S)" + mkdir -p "$BACKUP_DIR" + preserve_directory "$UPLOAD_DIR" "$BACKUP_DIR/uploads" + preserve_directory "$DEDUP_DIR" "$BACKUP_DIR/deduplication" + preserve_directory "$LOG_DIR" "$BACKUP_DIR/logs" + # Remove original data directory structure but keep preserved data + if [[ -d "$DEFAULT_DATA_DIR" ]]; then + rm -rf "$DEFAULT_DATA_DIR" + echo " โœ“ Removed data directory (all content preserved in $BACKUP_DIR)" + fi + ;; + "custom") + # Handle custom selection + handle_custom_preservation + ;; + esac + + echo -e "${YELLOW}Removing system user...${NC}" + if id "$DEFAULT_USER" &>/dev/null; then + userdel "$DEFAULT_USER" || true + echo " โœ“ User $DEFAULT_USER removed" + else + echo " โš  User $DEFAULT_USER not found" + fi + + # Remove any remaining binary in common locations + echo -e "${YELLOW}Cleaning up any remaining files...${NC}" + for location in "/usr/local/bin/hmac-file-server" "/usr/bin/hmac-file-server"; do + if [[ -f "$location" ]]; then + rm -f "$location" + echo " โœ“ Removed $location" + fi + done + + echo "" + if [[ "$PRESERVE_DATA" != "none" ]]; then + echo -e "${GREEN}โœ… HMAC File Server uninstalled successfully with data preservation${NC}" + if [[ -d "$BACKUP_DIR" ]]; then + echo -e "${BLUE}๐Ÿ“ Preserved data location: $BACKUP_DIR${NC}" + echo -e "${BLUE} You can safely delete this directory if you no longer need the data.${NC}" + fi + else + echo -e "${GREEN}โœ… HMAC File Server uninstalled completely${NC}" + echo -e "${BLUE}All files, services, and user accounts have been removed.${NC}" + fi + echo "" +} + +# Check for help flag +if [[ "$1" == "--help" || "$1" == "-h" ]]; then + show_help + exit 0 +fi + +# Check for uninstall flag +if [[ "$1" == "--uninstall" ]]; then + uninstall + exit 0 +fi + +# Run main function +main "$@" diff --git a/test/hmac_test.go b/test/hmac_test.go index 6600f6f..f9bbf37 100644 --- a/test/hmac_test.go +++ b/test/hmac_test.go @@ -16,7 +16,7 @@ import ( const ( serverURL = "http://[::1]:8080" // Replace with your actual server URL - secret = "a-orc-and-a-humans-is-drinking-ale" // Replace with your HMAC secret key + secret = "hmac-file-server-is-the-win" // Replace with your HMAC secret key uploadPath = "hmac_icon.png" // Test file to upload protocolType = "v2" // Use v2, v, or token as needed ) diff --git a/test/server_flags_test.go b/test/server_flags_test.go new file mode 100644 index 0000000..f514674 --- /dev/null +++ b/test/server_flags_test.go @@ -0,0 +1,39 @@ +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +// TestGenConfigFlag runs the server with --genconfig and checks output for expected config keys +func TestGenConfigFlag(t *testing.T) { + cmd := exec.Command("go", "run", "../cmd/server/main.go", "--genconfig") + output, err := cmd.CombinedOutput() + if err != nil && !strings.Contains(string(output), "[server]") { + t.Fatalf("Failed to run with --genconfig: %v\nOutput: %s", err, output) + } + if !strings.Contains(string(output), "[server]") || !strings.Contains(string(output), "bind_ip") { + t.Errorf("Example config missing expected keys. Output: %s", output) + } +} + +// TestIPv4IPv6Flag runs the server with forceprotocol=ipv4 and ipv6 and checks for startup errors +func TestIPv4IPv6Flag(t *testing.T) { + for _, proto := range []string{"ipv4", "ipv6", "auto"} { + cmd := exec.Command("go", "run", "../cmd/server/main.go", "--config", "../cmd/server/config.toml") + cmd.Env = append(os.Environ(), "FORCEPROTOCOL="+proto) + // Set Go module cache environment variables if not already set + if os.Getenv("GOMODCACHE") == "" { + cmd.Env = append(cmd.Env, "GOMODCACHE="+os.Getenv("HOME")+"/go/pkg/mod") + } + if os.Getenv("GOPATH") == "" { + cmd.Env = append(cmd.Env, "GOPATH="+os.Getenv("HOME")+"/go") + } + output, err := cmd.CombinedOutput() + if err != nil && !strings.Contains(string(output), "Configuration loaded successfully") { + t.Errorf("Server failed to start with forceprotocol=%s: %v\nOutput: %s", proto, err, output) + } + } +} diff --git a/test/test_installer_config.sh b/test/test_installer_config.sh new file mode 100755 index 0000000..6d51181 --- /dev/null +++ b/test/test_installer_config.sh @@ -0,0 +1,173 @@ +#!/bin/bash + +# Test script to validate installer configuration generation +# Tests that the installer generates config compatible with fixed struct definitions + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}๐Ÿ” Testing Installer Configuration Generation${NC}" +echo "=============================================" +echo "" + +# Test configuration values that simulate installer input +export HMAC_SECRET="test-hmac-secret-32-characters-long-minimum" +export JWT_SECRET="test-jwt-secret-also-32-characters-long-minimum" + +# Create a test directory +TEST_DIR="/tmp/hmac-installer-test-$$" +mkdir -p "$TEST_DIR" +cd "$TEST_DIR" + +echo -e "${YELLOW}๐Ÿ“ Test directory: $TEST_DIR${NC}" +echo "" + +# Copy necessary files for testing +cp /home/renz/source/hmac-file-server-uuxo/go.mod . +cp /home/renz/source/hmac-file-server-uuxo/go.sum . +cp -r /home/renz/source/hmac-file-server-uuxo/cmd . + +# Extract the generate_config function and create a test version +cat > test_config_generation.sh << 'EOF' +#!/bin/bash + +# Simulated installer variables +DEFAULT_CONFIG_DIR="./test-config" +DATA_DIR="./test-data" +DEFAULT_LOG_DIR="./test-logs" +SERVER_PORT="8080" +METRICS_PORT="9090" +ENABLE_TLS="false" +HMAC_SECRET="test-hmac-secret-32-characters-long-minimum" +ENABLE_JWT="false" +ENABLE_CLAMAV="false" +ENABLE_REDIS="false" + +# Create directories +mkdir -p "$DEFAULT_CONFIG_DIR" +mkdir -p "$DATA_DIR/runtime" +mkdir -p "$DEFAULT_LOG_DIR" + +# Generate configuration (extracted from installer) +generate_config() { + echo "Generating test configuration..." + + cat > "$DEFAULT_CONFIG_DIR/config.toml" << EOFCONFIG +# HMAC File Server Configuration +# Generated by installer test on $(date) + +[server] +bind_ip = "0.0.0.0" +listenport = "$SERVER_PORT" +unixsocket = false +storagepath = "$DATA_DIR/uploads" +metricsenabled = true +metricsport = "$METRICS_PORT" +deduplicationenabled = true +deduplicationpath = "$DATA_DIR/deduplication" +filenaming = "HMAC" +force_protocol = "auto" +pidfilepath = "$DATA_DIR/runtime/hmac-file-server.pid" +sslenabled = false + +[security] +secret = "$HMAC_SECRET" +enablejwt = false + +[uploads] +allowedextensions = [".txt", ".pdf", ".jpg", ".jpeg", ".png", ".gif", ".webp", ".zip", ".tar", ".gz", ".7z", ".mp4", ".webm", ".ogg", ".mp3", ".wav", ".flac", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx", ".odt", ".ods", ".odp"] +maxfilesize = "100MB" +chunkeduploadsenabled = true +chunksize = "10MB" +ttlenabled = false +ttl = "168h" + +[downloads] +chunkeddownloadsenabled = true +chunksize = "10MB" + +[logging] +level = "INFO" +file = "$DEFAULT_LOG_DIR/hmac-file-server.log" +max_size = 100 +max_backups = 3 +max_age = 30 +compress = true + +[workers] +numworkers = 10 +uploadqueuesize = 1000 +autoscaling = true + +[timeouts] +readtimeout = "30s" +writetimeout = "30s" +idletimeout = "120s" +shutdown = "30s" + +[clamav] +enabled = false + +[redis] +enabled = false +EOFCONFIG + + echo "Configuration file created: $DEFAULT_CONFIG_DIR/config.toml" +} + +# Call the function +generate_config +EOF + +chmod +x test_config_generation.sh +./test_config_generation.sh + +echo -e "${YELLOW}๐Ÿ“‹ Generated test configuration:${NC}" +echo "" +cat ./test-config/config.toml +echo "" + +# Build a test binary to validate the configuration +echo -e "${YELLOW}๐Ÿ”จ Building test binary...${NC}" +if go build -o hmac-test-server ./cmd/server/*.go; then + echo -e "${GREEN}โœ… Build successful${NC}" +else + echo -e "${RED}โŒ Build failed${NC}" + exit 1 +fi + +echo "" +echo -e "${YELLOW}๐Ÿ” Testing configuration validation...${NC}" + +# Test configuration validation +if ./hmac-test-server -config ./test-config/config.toml -validate-config -validate-quiet; then + echo -e "${GREEN}โœ… Configuration validation PASSED!${NC}" + echo "" + echo -e "${GREEN}๐ŸŽ‰ All critical fixes verified:${NC}" + echo -e "${GREEN} โœ“ Workers: numworkers/uploadqueuesize (not initial/max)${NC}" + echo -e "${GREEN} โœ“ Protocol: force_protocol (not forceprotocol)${NC}" + echo -e "${GREEN} โœ“ PID file: pidfilepath configured${NC}" + echo -e "${GREEN} โœ“ Timeouts: read/write/idle/shutdown${NC}" + echo -e "${GREEN} โœ“ Logging: level/file/max_size/max_backups/max_age${NC}" + VALIDATION_RESULT=0 +else + echo -e "${RED}โŒ Configuration validation FAILED!${NC}" + echo "" + echo -e "${YELLOW}Running detailed validation for diagnosis...${NC}" + ./hmac-test-server -config ./test-config/config.toml -validate-config -validate-verbose || true + VALIDATION_RESULT=1 +fi + +echo "" +echo -e "${YELLOW}๐Ÿงน Cleaning up test directory...${NC}" +cd / +rm -rf "$TEST_DIR" + +echo -e "${BLUE}Test completed.${NC}" +exit $VALIDATION_RESULT diff --git a/verify_installation.sh b/verify_installation.sh new file mode 100755 index 0000000..70995a3 --- /dev/null +++ b/verify_installation.sh @@ -0,0 +1,230 @@ +#!/bin/bash + +# HMAC File Server v3.2 - Installation Verification Script +# Run this script on your production server to verify the installation + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +echo -e "${BLUE}๐Ÿ” HMAC File Server v3.2 - Installation Verification${NC}" +echo "======================================================" +echo "" + +# Check if running as root +if [[ $EUID -ne 0 ]]; then + echo -e "${RED}โŒ This script must be run as root (use sudo)${NC}" + exit 1 +fi + +ERRORS=0 +WARNINGS=0 + +# Function to report status +report_status() { + local status=$1 + local message=$2 + local details=$3 + + case $status in + "OK") + echo -e "${GREEN}โœ… $message${NC}" + [[ -n "$details" ]] && echo -e " ${CYAN}$details${NC}" + ;; + "WARNING") + echo -e "${YELLOW}โš ๏ธ $message${NC}" + [[ -n "$details" ]] && echo -e " ${YELLOW}$details${NC}" + ((WARNINGS++)) + ;; + "ERROR") + echo -e "${RED}โŒ $message${NC}" + [[ -n "$details" ]] && echo -e " ${RED}$details${NC}" + ((ERRORS++)) + ;; + "INFO") + echo -e "${CYAN}โ„น๏ธ $message${NC}" + [[ -n "$details" ]] && echo -e " $details" + ;; + esac +} + +# 1. Check SystemD Service Status +echo -e "${YELLOW}๐Ÿ”ง Checking SystemD Service...${NC}" +if systemctl is-active --quiet hmac-file-server; then + service_status=$(systemctl status hmac-file-server --no-pager -l | head -10) + uptime=$(systemctl show hmac-file-server --property=ActiveEnterTimestamp --value) + report_status "OK" "HMAC File Server service is running" "Active since: $uptime" +else + service_status=$(systemctl status hmac-file-server --no-pager -l | head -10) + report_status "ERROR" "HMAC File Server service is not running" "$service_status" +fi + +if systemctl is-enabled --quiet hmac-file-server; then + report_status "OK" "Service is enabled (will start on boot)" +else + report_status "WARNING" "Service is not enabled for auto-start" +fi + +echo "" + +# 2. Check Installation Files +echo -e "${YELLOW}๐Ÿ“ Checking Installation Files...${NC}" + +# Binary +if [[ -f "/opt/hmac-file-server/hmac-file-server" ]]; then + binary_info=$(ls -lh /opt/hmac-file-server/hmac-file-server) + report_status "OK" "Binary installed" "$binary_info" + + # Check if binary has version flag (indicates correct build) + if /opt/hmac-file-server/hmac-file-server --version >/dev/null 2>&1; then + version=$(/opt/hmac-file-server/hmac-file-server --version 2>/dev/null || echo "Unknown") + report_status "OK" "Binary supports --version flag" "Version: $version" + else + report_status "WARNING" "Binary doesn't support --version flag (may be old build)" + fi +else + report_status "ERROR" "Binary not found at /opt/hmac-file-server/hmac-file-server" +fi + +# Configuration +if [[ -f "/etc/hmac-file-server/config.toml" ]]; then + config_info=$(ls -lh /etc/hmac-file-server/config.toml) + report_status "OK" "Configuration file exists" "$config_info" +else + report_status "ERROR" "Configuration file not found at /etc/hmac-file-server/config.toml" +fi + +# Data directories +data_dirs=("/var/lib/hmac-file-server" "/var/log/hmac-file-server") +for dir in "${data_dirs[@]}"; do + if [[ -d "$dir" ]]; then + dir_info=$(ls -lhd "$dir") + report_status "OK" "Directory exists: $dir" "$dir_info" + else + report_status "WARNING" "Directory missing: $dir" + fi +done + +echo "" + +# 3. Check Configuration Validation +echo -e "${YELLOW}โš™๏ธ Checking Configuration Validation...${NC}" +if [[ -f "/opt/hmac-file-server/hmac-file-server" ]]; then + echo -e "${CYAN}Running configuration validation...${NC}" + + # Run validation with timeout + if timeout 30s /opt/hmac-file-server/hmac-file-server -config /etc/hmac-file-server/config.toml --validate-config >/tmp/hmac_validation.log 2>&1; then + report_status "OK" "Configuration validation passed" + + # Check for warnings in validation output + if grep -q "WARNING\|WARN" /tmp/hmac_validation.log; then + warning_count=$(grep -c "WARNING\|WARN" /tmp/hmac_validation.log) + report_status "WARNING" "Configuration validation has $warning_count warnings" "Check logs for details" + fi + else + validation_error=$(tail -5 /tmp/hmac_validation.log) + report_status "ERROR" "Configuration validation failed" "$validation_error" + fi + + rm -f /tmp/hmac_validation.log +fi + +echo "" + +# 4. Check Network Connectivity +echo -e "${YELLOW}๐ŸŒ Checking Network Connectivity...${NC}" + +# Extract ports from config +if [[ -f "/etc/hmac-file-server/config.toml" ]]; then + server_port=$(grep -E "^listenport\s*=" /etc/hmac-file-server/config.toml | cut -d'"' -f2 | tr -d '"' || echo "8080") + metrics_port=$(grep -E "^metricsport\s*=" /etc/hmac-file-server/config.toml | cut -d'"' -f2 | tr -d '"' || echo "9090") + + # Check if ports are listening + if netstat -tln 2>/dev/null | grep -q ":$server_port "; then + report_status "OK" "Server port $server_port is listening" + else + report_status "ERROR" "Server port $server_port is not listening" + fi + + if netstat -tln 2>/dev/null | grep -q ":$metrics_port "; then + report_status "OK" "Metrics port $metrics_port is listening" + else + report_status "WARNING" "Metrics port $metrics_port is not listening" + fi + + # Test HTTP connectivity + if curl -s --connect-timeout 5 "http://localhost:$server_port" >/dev/null 2>&1; then + report_status "OK" "HTTP server responding on port $server_port" + elif curl -s --connect-timeout 5 "http://localhost:$server_port" 2>&1 | grep -q "404\|401\|403"; then + report_status "OK" "HTTP server responding (expected auth required)" + else + report_status "WARNING" "HTTP server not responding on port $server_port" + fi +fi + +echo "" + +# 5. Check System Resources +echo -e "${YELLOW}๐Ÿ’พ Checking System Resources...${NC}" + +# Memory usage +memory_usage=$(ps -o pid,ppid,cmd,%mem,%cpu --sort=-%mem -C hmac-file-server | tail -n +2) +if [[ -n "$memory_usage" ]]; then + report_status "OK" "Process running and using resources" "$memory_usage" +else + report_status "WARNING" "No process information available" +fi + +# Disk space +storage_path=$(grep -E "^storagepath\s*=" /etc/hmac-file-server/config.toml 2>/dev/null | cut -d'"' -f2 | tr -d '"' || echo "/var/lib/hmac-file-server") +if [[ -d "$storage_path" ]]; then + disk_usage=$(df -h "$storage_path" | tail -1) + report_status "INFO" "Storage directory disk usage" "$disk_usage" +fi + +echo "" + +# 6. Check Logs +echo -e "${YELLOW}๐Ÿ“‹ Checking Recent Logs...${NC}" + +# SystemD logs +recent_logs=$(journalctl -u hmac-file-server --since "5 minutes ago" --no-pager -q) +if [[ -n "$recent_logs" ]]; then + report_status "INFO" "Recent SystemD logs available" + echo -e "${CYAN}Last 5 log entries:${NC}" + echo "$recent_logs" | tail -5 +else + report_status "INFO" "No recent SystemD logs (service may be stable)" +fi + +echo "" + +# 7. Final Summary +echo -e "${BLUE}๐Ÿ“Š Verification Summary${NC}" +echo "========================" + +if [[ $ERRORS -eq 0 && $WARNINGS -eq 0 ]]; then + echo -e "${GREEN}๐ŸŽ‰ PERFECT! HMAC File Server installation is working correctly!${NC}" + echo -e "${GREEN} No errors or warnings found.${NC}" +elif [[ $ERRORS -eq 0 ]]; then + echo -e "${YELLOW}โœ… GOOD! HMAC File Server is working with $WARNINGS warning(s).${NC}" + echo -e "${YELLOW} Review warnings above for optimization opportunities.${NC}" +else + echo -e "${RED}โŒ ISSUES FOUND! $ERRORS error(s) and $WARNINGS warning(s) detected.${NC}" + echo -e "${RED} Please address the errors above before using in production.${NC}" +fi + +echo "" +echo -e "${CYAN}๐Ÿ’ก Additional Checks You Can Perform:${NC}" +echo " โ€ข Test file upload: curl -X POST -F \"file=@testfile.txt\" http://localhost:$server_port/" +echo " โ€ข Check metrics: curl http://localhost:$metrics_port/metrics" +echo " โ€ข Review full logs: journalctl -u hmac-file-server -f" +echo " โ€ข Test configuration: /opt/hmac-file-server/hmac-file-server --validate-config" + +exit $ERRORS