diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..9d2d0e76 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,89 @@ +name: CI + +on: + workflow_dispatch: {} + push: + branches: [ develop, main, 'feat/**' ] + pull_request: + branches: [ develop, main ] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Upgrade pip, setuptools, wheel + run: | + python -m pip install --upgrade pip setuptools wheel + + - name: Install build backend (hatchling) + run: | + python -m pip install hatchling + + - name: Install package (editable) with dev extras using constraints + run: | + python -m pip install -e '.[dev]' -c constraints.txt + + - name: Run tests + run: | + python -m pytest -q + + integration: + runs-on: ubuntu-latest + needs: test + if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop' || startsWith(github.ref, 'refs/heads/feat/') + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Upgrade pip, setuptools, wheel + run: | + python -m pip install --upgrade pip setuptools wheel + + - name: Install build backend (hatchling) + run: | + python -m pip install hatchling + + - name: Install providers extra (constrained) + run: | + python -m pip install -e '.[providers]' -c constraints.txt + + - name: Check for dependency issues + run: | + python -m pip check + + - name: Provider smoke check + run: | + python -c "import sys, pkgutil; print('Providers installed:', [m.name for m in pkgutil.iter_modules() if m.name.startswith(('anthropic','google','e2b'))])" + + - name: Write Google service account file (from secret) + if: ${{ secrets.GOOGLE_SERVICE_ACCOUNT_JSON != '' }} + env: + SA_JSON: ${{ secrets.GOOGLE_SERVICE_ACCOUNT_JSON }} + run: | + # write the multi-line JSON from the secret into a file + printf '%s' "$SA_JSON" > $GITHUB_WORKSPACE/gcloud_sa.json + chmod 600 $GITHUB_WORKSPACE/gcloud_sa.json + shell: bash + + - name: Run provider integration tests (guarded) + env: + # enable guarded integration tests in pytest + RUN_PROVIDERS_INTEGRATION: '1' + # point Google auth to the written service account file (if present) + GOOGLE_APPLICATION_CREDENTIALS: ${{ github.workspace }}/gcloud_sa.json + # provider API keys (set these in repository secrets if needed) + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + E2B_API_KEY: ${{ secrets.E2B_API_KEY }} + run: | + python -m pytest tests/integration -q diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..c9a374ff --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,60 @@ +## Contributing & Provider Integrations + +Thanks for contributing! This file contains a short note about the optional provider integrations and how CI handles them. + +### Provider integrations (`providers` extra) + +Some integrations (Google APIs, Anthropic, e2b code interpreter, and related libraries) are heavy and pull many transitive dependencies. To keep everyday development, contributor onboarding, and CI runs fast and deterministic these are provided as an optional extra named `providers`. + +When to opt into `providers`: +- If you are developing or testing code that interacts with those provider APIs (e.g., Google GenAI, Anthropic, e2b), install the `providers` extra locally. +- If you are working on core features, UI, or small tools that don't call external providers, you do not need to install `providers`. + +How to install locally (recommended with constraints): + +```powershell +# Install only providers (use the repo constraints for deterministic resolution) +python -m pip install -e "[.providers]" -c constraints.txt + +# Or install dev and providers when running tests that require providers +python -m pip install -e ".[dev]" -c constraints.txt +python -m pip install -e ".[providers]" -c constraints.txt +``` + +Notes: +- `constraints.txt` is included to pin heavy dependencies and avoid pip resolver backtracking. Use it in CI and locally when installing the `providers` extra. +- Provider integrations may require credentials or API keys. Do NOT add secrets to the repo. Use repository secrets in CI or local environment variables. + +### CI behavior + +- The default `test` job runs lightweight unit tests and installs the `dev` extras using `constraints.txt` to keep runs fast and deterministic. +- A separate `integration` job installs the `providers` extra (also using `constraints.txt`) and performs a quick smoke check and `pip check`. This job runs after `test` and is intended to verify provider installs in CI without running long integration suites by default. + +If you are adding provider-specific integration tests that require credentials, please: + +1. Add tests under `tests/integration/` (or a similar directory). Keep them separate from unit tests. +2. Guard tests that rely on external credentials with markers or environment checks so they are skipped when credentials are not present. +3. Add instructions in this file for any required secrets and which GitHub Actions secrets to set. + +Thanks for keeping the repo tidy — small, focused installs make the project easier for everyone to contribute to. + +## Repository secrets + +If you want the guarded provider integration tests to run in CI, add the following repository secrets (recommended names) in GitHub: + +- `GOOGLE_SERVICE_ACCOUNT_JSON` — the full multi-line service account JSON value. The CI job will write this into a file at runtime and set `GOOGLE_APPLICATION_CREDENTIALS` to point at it. +- `ANTHROPIC_API_KEY` — your Anthropic API key used by the Anthropic integration tests. +- `E2B_API_KEY` — your e2b/related provider API key used by e2b tests. + +How to add secrets (GitHub UI): + +1. Go to your repository on GitHub → `Settings` → `Secrets and variables` → `Actions`. +2. Click `New repository secret`. +3. Enter the **Name** (one of the names above) and paste the secret value (for `GOOGLE_SERVICE_ACCOUNT_JSON` paste the full JSON content). +4. Click `Add secret`. + +Notes and safety: +- Never commit credentials or service account files into the repository. +- Only enable provider integration runs in CI on protected branches or from trusted PRs, since these tests require secrets and network access. +- The CI `integration` job will only run provider tests if the secrets are present and `RUN_PROVIDERS_INTEGRATION=1` is set in the job environment. + diff --git a/README.md b/README.md index f292fdd2..4e4e3e6a 100644 --- a/README.md +++ b/README.md @@ -43,3 +43,22 @@ II-Agent Chat also feature within II-Agent that lets you work across multiple mo For the latest installation and deployment instructions, please refer to our [official guide](https://intelligent-internet.github.io/ii-agent-prod/) [![Installation Guide](https://img.youtube.com/vi/wPpeJMbdGi4/maxresdefault.jpg)](https://www.youtube.com/watch?v=wPpeJMbdGi4) + +## Optional provider integrations + +Some integrations (Google APIs, Anthropic, e2b code interpreter, etc.) are heavy and have many transitive dependencies. To keep development and CI installs fast and deterministic, these are provided as an optional extra named `providers`. + +To install the providers extra locally (recommended when you need provider integrations), use the `constraints.txt` included in the repository to avoid pip resolver backtracking: + +```powershell +python -m pip install -e "[.providers]" -c constraints.txt +``` + +Or install both dev and providers when you need tests and provider integrations: + +```powershell +python -m pip install -e ".[dev]" -c constraints.txt +python -m pip install -e ".[providers]" -c constraints.txt +``` + +If you prefer not to install providers, the project will still work for most development tasks without them. diff --git a/constraints.txt b/constraints.txt new file mode 100644 index 00000000..f9e6a526 --- /dev/null +++ b/constraints.txt @@ -0,0 +1,14 @@ +## Constraints to limit pip resolver backtracking for heavy deps +# Pin commonly large, interdependent packages to known-compatible versions +# Adjust versions if CI or local environments require different pins. +google-api-python-client==2.186.0 +google-api-core==2.28.1 +google-auth==2.42.1 +google-auth-httplib2==0.2.0 +google-auth-oauthlib==1.2.3 +googleapis-common-protos==1.72.0 +proto-plus==1.26.1 +protobuf==4.25.8 +uritemplate==4.2.0 +e2b-code-interpreter==1.2.0b5 +e2b==1.2.0b5 diff --git a/modules/sbll_knowledge_chain/__init__.py b/modules/sbll_knowledge_chain/__init__.py new file mode 100644 index 00000000..13323d1d --- /dev/null +++ b/modules/sbll_knowledge_chain/__init__.py @@ -0,0 +1,3 @@ +from .tool_wrapper import KnowledgeChainTool + +__all__ = ["KnowledgeChainTool"] diff --git a/modules/sbll_knowledge_chain/advanced_features.py b/modules/sbll_knowledge_chain/advanced_features.py new file mode 100644 index 00000000..a69dce5d --- /dev/null +++ b/modules/sbll_knowledge_chain/advanced_features.py @@ -0,0 +1,352 @@ +""" +Advanced features for the SBLL Philosophical Blockchain +Building on the existing implementation +""" + +import numpy as np +from typing import Dict, List, Any, Tuple +from enum import Enum + +class AdvancedPhilosophicalMetrics: + """Advanced metrics and analytics for philosophical content""" + + @staticmethod + def calculate_semantic_density(content: str) -> float: + """Calculate semantic density of philosophical content""" + words = content.split() + philosophical_terms = { + 'consciousness', 'reality', 'truth', 'meaning', 'ethics', + 'cosmic', 'optimization', 'constraint', 'emergence', 'verification' + } + + if not words: + return 0.0 + + philosophical_count = sum(1 for word in words if word.lower() in philosophical_terms) + return philosophical_count / len(words) + + @staticmethod + def calculate_inference_complexity(content: str) -> int: + """Calculate inference complexity based on logical connectors""" + connectors = { + 'because': 2, 'therefore': 2, 'thus': 2, 'hence': 2, + 'if': 1, 'then': 1, 'implies': 2, 'requires': 1, + 'emerges': 3, 'transcends': 3, 'manifests': 2 + } + + complexity = 0 + for connector, weight in connectors.items(): + if connector in content.lower(): + complexity += weight + + return complexity + +class CrossTraditionSynthesizer: + """Synthesize insights across philosophical traditions""" + + @staticmethod + def synthesize_insights(insights: List[Dict]) -> Dict: + """Synthesize multiple insights into unified understanding""" + if not insights: + return {'synthesis': 'No insights to synthesize', 'coherence': 0.0} + + # Group by tradition + by_tradition = {} + for insight in insights: + trad = insight.get('tradition', 'UNIVERSAL') + if trad not in by_tradition: + by_tradition[trad] = [] + by_tradition[trad].append(insight) + + # Find common themes + all_contents = [insight['content'] for insight in insights] + common_words = CrossTraditionSynthesizer._find_common_words(all_contents) + + # Calculate cross-traditional coherence + coherence = CrossTraditionSynthesizer._calculate_coherence(by_tradition) + + # Generate synthesis + synthesis = f"Cross-traditional synthesis ({len(by_tradition)} traditions):\n" + synthesis += f"Common themes: {', '.join(common_words[:3])}\n" + + for tradition, trad_insights in by_tradition.items(): + synthesis += f"\n• {tradition}: {len(trad_insights)} insights" + if trad_insights: + avg_confidence = sum(i.get('truth_confidence', 0) for i in trad_insights) / len(trad_insights) + synthesis += f" (avg confidence: {avg_confidence:.0f})" + + return { + 'synthesis': synthesis, + 'coherence': coherence, + 'traditions_count': len(by_tradition), + 'total_insights': len(insights), + 'common_themes': common_words[:5] + } + + @staticmethod + def _find_common_words(contents: List[str], top_n: int = 10) -> List[str]: + """Find most common words across multiple contents""" + from collections import Counter + + # Common philosophical words to ignore + stop_words = {'the', 'and', 'is', 'in', 'of', 'to', 'a', 'that', 'it', 'as'} + + all_words = [] + for content in contents: + words = [word.lower().strip('.,!?;:\"\'') for word in content.split() if len(word) > 4 and word.lower() not in stop_words] + all_words.extend(words) + + word_counts = Counter(all_words) + return [word for word, _ in word_counts.most_common(top_n)] + + @staticmethod + def _calculate_coherence(by_tradition: Dict) -> float: + """Calculate coherence across traditions""" + if len(by_tradition) < 2: + return 1.0 # Single tradition is perfectly coherent with itself + + # Compare average confidence across traditions + avg_confidences = [] + for trad_insights in by_tradition.values(): + avg_conf = sum(i.get('truth_confidence', 0) for i in trad_insights) / len(trad_insights) + avg_confidences.append(avg_conf) + + # Calculate variance (lower variance = higher coherence) + variance = np.var(avg_confidences) if avg_confidences else 0 + max_variance = 10000 # Arbitrary scaling + coherence = 1.0 - min(1.0, variance / max_variance) + + return coherence + +class BlockchainVisualizer: + """Visualize and analyze the blockchain""" + + @staticmethod + def generate_blockchain_report(chain: List[Dict]) -> Dict: + """Generate comprehensive blockchain report""" + if not chain: + return {'error': 'Empty chain'} + + total_blocks = len(chain) + total_insights = sum(len(block['data'].get('insights', [])) for block in chain) + total_claims = sum(len(block['data'].get('claims', [])) for block in chain) + + # Calculate metrics over time + confidences = [block['truth_confidence'] for block in chain] + densities = [block['philosophical_density'] for block in chain] + + # Find patterns + avg_confidence = np.mean(confidences) + avg_density = np.mean(densities) + + # Calculate chain growth + if len(chain) > 1: + time_span = chain[-1]['timestamp'] - chain[0]['timestamp'] + blocks_per_day = (len(chain) - 1) / (time_span / 86400) if time_span > 0 else 0 + else: + blocks_per_day = 0 + + return { + 'total_blocks': total_blocks, + 'total_insights': total_insights, + 'total_claims': total_claims, + 'avg_confidence': avg_confidence, + 'avg_density': avg_density, + 'blocks_per_day': blocks_per_day, + 'chain_health': BlockchainVisualizer._calculate_chain_health(chain), + 'most_prolific_tradition': BlockchainVisualizer._find_most_prolific_tradition(chain), + 'top_philosophical_themes': BlockchainVisualizer._extract_themes(chain) + } + + @staticmethod + def _calculate_chain_health(chain: List[Dict]) -> float: + """Calculate overall blockchain health""" + if len(chain) < 2: + return 1.0 + + # Factors: consistency, growth, content quality + consistency_score = BlockchainVisualizer._calculate_consistency(chain) + growth_score = min(1.0, len(chain) / 10) # Normalize to 10 blocks + quality_score = np.mean([block['truth_confidence'] for block in chain]) / 255 + + return (consistency_score + growth_score + quality_score) / 3 + + @staticmethod + def _calculate_consistency(chain: List[Dict]) -> float: + """Calculate consistency across blocks""" + if len(chain) < 2: + return 1.0 + + confidences = [block['truth_confidence'] for block in chain] + variance = np.var(confidences) + + # Lower variance = higher consistency + max_variance = 1000 + consistency = 1.0 - min(1.0, variance / max_variance) + + return consistency + + @staticmethod + def _find_most_prolific_tradition(chain: List[Dict]) -> str: + """Find which tradition contributes most content""" + tradition_counts = {} + + for block in chain: + for insight in block['data'].get('insights', []): + trad = insight.get('tradition', 'UNKNOWN') + tradition_counts[trad] = tradition_counts.get(trad, 0) + 1 + + if not tradition_counts: + return 'NONE' + + return max(tradition_counts.items(), key=lambda x: x[1])[0] + + @staticmethod + def _extract_themes(chain: List[Dict], top_n: int = 5) -> List[str]: + """Extract top philosophical themes from blockchain""" + from collections import Counter + + all_content = [] + for block in chain: + for insight in block['data'].get('insights', []): + all_content.append(insight['content']) + for claim in block['data'].get('claims', []): + all_content.append(claim['content']) + + # Simple word frequency analysis + philosophical_terms = { + 'consciousness', 'reality', 'truth', 'meaning', 'ethics', + 'cosmic', 'optimization', 'constraint', 'emergence', 'verification', + 'knowledge', 'wisdom', 'understanding', 'existence', 'being', + 'evolution', 'complexity', 'system', 'network', 'relation' + } + + word_counts = Counter() + for content in all_content: + words = content.lower().split() + for word in words: + if word in philosophical_terms: + word_counts[word] += 1 + + return [word for word, _ in word_counts.most_common(top_n)] + +# Enhanced KnowledgeChainTool with advanced features +class EnhancedKnowledgeChainTool: + """Enhanced version with advanced analytics""" + + def __init__(self): + from .tool_wrapper import KnowledgeChainTool + self.base_tool = KnowledgeChainTool() + self.metrics = AdvancedPhilosophicalMetrics() + self.synthesizer = CrossTraditionSynthesizer() + self.visualizer = BlockchainVisualizer() + + def analyze_content(self, content: str) -> Dict: + """Advanced analysis of philosophical content""" + return { + 'semantic_density': self.metrics.calculate_semantic_density(content), + 'inference_complexity': self.metrics.calculate_inference_complexity(content), + 'estimated_confidence': self._estimate_confidence(content), + 'tradition_suggestions': self._suggest_tradition(content) + } + + def synthesize_blockchain_wisdom(self, topic: str) -> Dict: + """Synthesize all wisdom on a topic from blockchain""" + # Query blockchain for topic + results = self.base_tool.oracle.seek_wisdom(topic, min_confidence=100) + + if results['total_found'] == 0: + return {'synthesis': f"No wisdom found on '{topic}'", 'insights': []} + + # Get all insights + all_insights = [] + for block in self.base_tool.blockchain.chain: + for insight in block['data'].get('insights', []): + if topic.lower() in insight['content'].lower(): + all_insights.append(insight) + + # Synthesize + synthesis = self.synthesizer.synthesize_insights(all_insights) + synthesis['topic'] = topic + synthesis['source'] = 'SBLL Blockchain' + + return synthesis + + def get_blockchain_analytics(self) -> Dict: + """Get comprehensive blockchain analytics""" + return self.visualizer.generate_blockchain_report(self.base_tool.blockchain.chain) + + def _estimate_confidence(self, content: str) -> int: + """Estimate truth confidence based on content analysis""" + base_confidence = 180 + + # Boosters + if any(word in content.lower() for word in ['verified', 'evidence', 'proof', 'demonstrated']): + base_confidence += 20 + + if any(word in content.lower() for word in ['universal', 'fundamental', 'necessary', 'essential']): + base_confidence += 15 + + # Penalties + if any(word in content.lower() for word in ['maybe', 'possibly', 'perhaps', 'might']): + base_confidence -= 10 + + return min(255, max(100, base_confidence)) + + def _suggest_tradition(self, content: str) -> List[str]: + """Suggest philosophical traditions based on content""" + tradition_keywords = { + 'WESTERN_ANALYTIC': ['logic', 'analysis', 'empirical', 'verification', 'computation'], + 'EASTERN_BUDDHIST': ['impermanence', 'emptiness', 'mindfulness', 'suffering', 'nirvana'], + 'EASTERN_TAOIST': ['flow', 'harmony', 'natural', 'balance', 'wu wei'], + 'MODERN_SCIENTIFIC': ['experiment', 'data', 'theory', 'evidence', 'hypothesis'] + } + + suggestions = [] + content_lower = content.lower() + + for tradition, keywords in tradition_keywords.items(): + matches = sum(1 for keyword in keywords if keyword in content_lower) + if matches >= 2: # At least 2 keywords match + suggestions.append(tradition) + + return suggestions if suggestions else ['WESTERN_ANALYTIC'] # Default + +# Integration helper to enhance existing tool (returns a subclass) +def enhance_existing_tool(): + """Return an EnhancedTool subclass of KnowledgeChainTool""" + from .tool_wrapper import KnowledgeChainTool as OriginalTool + + class EnhancedTool(OriginalTool): + def __init__(self): + super().__init__() + self.enhanced_features = EnhancedKnowledgeChainTool() + + def advanced_query(self, topic: str) -> str: + """Enhanced query with synthesis and analytics""" + synthesis = self.enhanced_features.synthesize_blockchain_wisdom(topic) + + if synthesis.get('total_insights', 0): + result = f"🔮 ADVANCED SYNTHESIS ON: {topic}\n" + result += f"Found {synthesis['total_insights']} insights across {synthesis['traditions_count']} traditions\n" + result += f"Cross-traditional coherence: {synthesis['coherence']:.1%}\n\n" + result += synthesis['synthesis'] + return result + else: + return f"No synthesized wisdom found on '{topic}'" + + def get_analytics(self) -> str: + """Get blockchain analytics""" + analytics = self.enhanced_features.get_blockchain_analytics() + + result = "📊 BLOCKCHAIN ANALYTICS\n" + result += f"• Total Blocks: {analytics['total_blocks']}\n" + result += f"• Total Insights: {analytics['total_insights']}\n" + result += f"• Average Confidence: {analytics['avg_confidence']:.0f}/255\n" + result += f"• Chain Health: {analytics['chain_health']:.1%}\n" + result += f"• Most Prolific Tradition: {analytics['most_prolific_tradition']}\n" + result += f"• Top Themes: {', '.join(analytics['top_philosophical_themes'])}\n" + + return result + + return EnhancedTool diff --git a/modules/sbll_knowledge_chain/chain.py b/modules/sbll_knowledge_chain/chain.py new file mode 100644 index 00000000..4fcf3fdf --- /dev/null +++ b/modules/sbll_knowledge_chain/chain.py @@ -0,0 +1,240 @@ +import hashlib +import time +import json +import os +import logging +from typing import Dict, List, Any, Optional +from enum import Enum + +# Configure module-level logger +logger = logging.getLogger(__name__) + +class PhilosophicalTradition(Enum): + WESTERN_ANALYTIC = 0x01 + EASTERN_BUDDHIST = 0x02 + EASTERN_TAOIST = 0x03 + WESTERN_CONTINENTAL = 0x04 + MODERN_SCIENTIFIC = 0x05 + +class QuantumPhilosophicalEncoder: + """Quantum-inspired encoder for philosophical content""" + + def encode_insight(self, insight_data: Dict) -> Dict: + encoded = { + 'content': insight_data['content'], + 'tradition': insight_data.get('tradition', 'WESTERN_ANALYTIC'), + 'consciousness_level': insight_data.get('consciousness_level', 'SAPIENCE'), + 'truth_confidence': insight_data.get('truth_confidence', 200), + 'cosmic_alignment': insight_data.get('cosmic_alignment', 200), + 'timestamp': insight_data.get('timestamp', int(time.time())), + 'quantum_hash': self._quantum_hash(insight_data['content']), + 'compression_ratio': self._calculate_compression(insight_data['content']) + } + if 'derivation_chain' in insight_data: + encoded['derivation_chain'] = [ + self._quantum_hash(deriv) for deriv in insight_data['derivation_chain'] + ] + return encoded + + def encode_claim(self, claim_data: Dict) -> Dict: + encoded = { + 'content': claim_data['content'], + 'verification_methods': claim_data.get('verification_methods', ['CORRESPONDENCE']), + 'confidence_score': claim_data.get('confidence_score', 180), + 'reality_contact': claim_data.get('reality_contact', 190), + 'timestamp': claim_data.get('timestamp', int(time.time())), + 'quantum_hash': self._quantum_hash(claim_data['content']), + 'evidence_references': claim_data.get('evidence_hashes', []) + } + return encoded + + def _quantum_hash(self, content: str) -> str: + sha3_hash = hashlib.sha3_256(content.encode()).hexdigest() + blake_hash = hashlib.blake2b(content.encode()).hexdigest() + combined = sha3_hash + blake_hash + return hashlib.sha3_256(combined.encode()).hexdigest()[:32] + + def _calculate_compression(self, content: str) -> float: + original_size = len(content.encode('utf-8')) + encoded_size = len(self._quantum_hash(content)) + 100 + return original_size / encoded_size if original_size > 0 else 0.0 + +class SBBLBlockchain: + """Complete SBLL Philosophical Blockchain Implementation""" + + def __init__(self): + self.chain = [] + self.pending_insights = [] + self.pending_claims = [] + self.current_difficulty = 1 + self.quantum_encoder = QuantumPhilosophicalEncoder() + self._create_genesis_block() + + def _create_genesis_block(self): + genesis_data = { + 'block_type': 'GENESIS', + 'timestamp': int(time.time()), + 'philosophical_principles': [ + "Reality manifests as universal constraint optimization", + "Truth verification requires multi-method reality contact" + ], + 'version': 'SBLL-1.0' + } + + genesis_block = { + 'index': 0, + 'timestamp': genesis_data['timestamp'], + 'data': genesis_data, + 'previous_hash': '0' * 64, + 'hash': self._calculate_block_hash(genesis_data, '0' * 64), + 'philosophical_density': 5, + 'truth_confidence': 250, + 'nonce': 0 + } + self.chain.append(genesis_block) + logger.info(f"🌌 Genesis Block created. Hash: {genesis_block['hash'][:16]}...") + + def add_philosophical_insight(self, insight_data: Dict): + encoded_insight = self.quantum_encoder.encode_insight(insight_data) + self.pending_insights.append(encoded_insight) + logger.debug(f"Insight added to mempool: {insight_data['content'][:30]}...") + + def add_truth_claim(self, claim_data: Dict): + encoded_claim = self.quantum_encoder.encode_claim(claim_data) + self.pending_claims.append(encoded_claim) + logger.debug(f"Claim added to mempool: {claim_data['content'][:30]}...") + + def mine_block(self, difficulty: int = None): + if difficulty is None: + difficulty = self.current_difficulty + + if not self.pending_insights and not self.pending_claims: + logger.info("No pending philosophical content to mine.") + return None + + previous_block = self.chain[-1] + new_block = self._create_new_block(previous_block, difficulty) + + # Philosophical proof-of-work + try: + new_block = self._philosophical_proof_of_work(new_block, difficulty) + except Exception as e: + logger.error(f"Mining failed: {e}") + return None + + self.chain.append(new_block) + self.pending_insights = [] + self.pending_claims = [] + + logger.info(f"⛏️ Block #{new_block['index']} mined! Hash: {new_block['hash'][:16]}... (Confidence: {new_block['truth_confidence']})") + return new_block + + def _create_new_block(self, previous_block: Dict, difficulty: int) -> Dict: + block_data = { + 'insights': self.pending_insights.copy(), + 'claims': self.pending_claims.copy(), + 'timestamp': int(time.time()), + 'block_height': previous_block['index'] + 1, + 'previous_block_hash': previous_block['hash'], + 'difficulty': difficulty, + 'merkle_root': self._calculate_merkle_root(self.pending_insights + self.pending_claims), + 'philosophical_metrics': self._calculate_philosophical_metrics() + } + + return { + 'index': previous_block['index'] + 1, + 'timestamp': block_data['timestamp'], + 'data': block_data, + 'previous_hash': previous_block['hash'], + 'hash': '', + 'philosophical_density': block_data['philosophical_metrics']['density'], + 'truth_confidence': block_data['philosophical_metrics']['confidence'], + 'nonce': 0 + } + + def _philosophical_proof_of_work(self, block: Dict, difficulty: int) -> Dict: + logger.debug(f"Starting Proof-of-Work (Difficulty: {difficulty})") + target_truth = 200 + (difficulty * 10) + max_nonce = 1000000 + + for nonce in range(max_nonce): + block['nonce'] = nonce + block_hash = self._calculate_block_hash(block['data'], block['previous_hash'], nonce) + philosophical_fitness = self._calculate_philosophical_fitness(block) + + if philosophical_fitness >= target_truth: + block['hash'] = block_hash + logger.debug(f"PoW found. Nonce: {nonce}, Fitness: {philosophical_fitness}") + return block + + raise Exception("Philosophical proof-of-work failed: Max nonce reached") + + def _calculate_philosophical_fitness(self, block: Dict) -> int: + base_fitness = block['truth_confidence'] + density_bonus = block['philosophical_density'] * 5 + traditions = set() + for insight in block['data'].get('insights', []): + if 'tradition' in insight: + traditions.add(insight['tradition']) + tradition_bonus = len(traditions) * 10 + return min(255, base_fitness + density_bonus + tradition_bonus) + + def _calculate_block_hash(self, data: Dict, previous_hash: str, nonce: int = 0) -> str: + block_string = json.dumps(data, sort_keys=True) + previous_hash + str(nonce) + return hashlib.sha3_256(block_string.encode()).hexdigest() + + def _calculate_merkle_root(self, transactions: List[Dict]) -> str: + if not transactions: + return hashlib.sha3_256(b"").hexdigest() + transaction_hashes = [hashlib.sha3_256(json.dumps(tx).encode()).hexdigest() for tx in transactions] + while len(transaction_hashes) > 1: + new_hashes = [] + for i in range(0, len(transaction_hashes), 2): + combined = transaction_hashes[i] + (transaction_hashes[i+1] if i+1 < len(transaction_hashes) else transaction_hashes[i]) + new_hashes.append(hashlib.sha3_256(combined.encode()).hexdigest()) + transaction_hashes = new_hashes + return transaction_hashes[0] + + def _calculate_philosophical_metrics(self) -> Dict: + total_items = len(self.pending_insights) + len(self.pending_claims) + if total_items == 0: + return {'density': 0, 'confidence': 0, 'cosmic_alignment': 0} + + confidences = [i.get('truth_confidence', 0) for i in self.pending_insights] + \ + [c.get('confidence_score', 0) for c in self.pending_claims] + + avg_confidence = sum(confidences) / len(confidences) if confidences else 0 + density = min(10, total_items) + + return { + 'density': density, + 'confidence': int(avg_confidence), + 'cosmic_alignment': 0 # Simplified for brevity + } + + def save_chain_state(self, filename="sbll_ledger.json"): + state = { + 'chain': self.chain, + 'pending_insights': self.pending_insights, + 'pending_claims': self.pending_claims, + 'difficulty': self.current_difficulty + } + try: + with open(filename, 'w') as f: + json.dump(state, f, indent=2) + logger.info(f"Ledger saved to {filename}") + except IOError as e: + logger.error(f"Failed to save ledger: {e}") + + def load_chain_state(self, filename="sbll_ledger.json"): + if os.path.exists(filename): + try: + with open(filename, 'r') as f: + state = json.load(f) + self.chain = state['chain'] + self.pending_insights = state.get('pending_insights', []) + self.pending_claims = state.get('pending_claims', []) + self.current_difficulty = state.get('difficulty', 1) + logger.info(f"Ledger loaded from {filename}. Height: {len(self.chain)}") + except Exception as e: + logger.error(f"Failed to load ledger: {e}") diff --git a/modules/sbll_knowledge_chain/deployment.py b/modules/sbll_knowledge_chain/deployment.py new file mode 100644 index 00000000..8d6783c8 --- /dev/null +++ b/modules/sbll_knowledge_chain/deployment.py @@ -0,0 +1,133 @@ +""" +Deployment-ready SBLL Philosophical Blockchain +""" + +import json +import time +import logging + +from .chain import SBBLBlockchain +from .oracle import TruthOracle +from .tool_wrapper import KnowledgeChainTool +from .advanced_features import EnhancedKnowledgeChainTool +from .optimized_chain import OptimizedSBBLBlockchain + +logger = logging.getLogger(__name__) + +class ProductionKnowledgeChainTool(KnowledgeChainTool): + """Production-ready tool with all enhancements""" + + def __init__(self, use_optimized: bool = True, cache_size: int = 100): + # Initialize blockchain + if use_optimized: + self.blockchain = OptimizedSBBLBlockchain(cache_size=cache_size) + else: + self.blockchain = SBBLBlockchain() + + # Load existing state + try: + self.blockchain.load_chain_state() + except Exception as e: + logger.warning(f"Starting fresh blockchain. Reason: {e}") + + self.oracle = TruthOracle(self.blockchain) + self.enhanced_tool = EnhancedKnowledgeChainTool() + + def run(self, action: str, content: str, tradition: str = "MODERN_SCIENTIFIC", **kwargs) -> str: + """Enhanced run method with more options""" + if action == "contribute": + # Analyze content first + analysis = self.enhanced_tool.analyze_content(content) + estimated_confidence = analysis['estimated_confidence'] + + insight = { + 'content': content, + 'tradition': tradition, + 'truth_confidence': estimated_confidence, + 'cosmic_alignment': min(255, estimated_confidence + 20), + 'semantic_density': analysis['semantic_density'], + 'inference_complexity': analysis['inference_complexity'] + } + + self.blockchain.add_philosophical_insight(insight) + new_block = self.blockchain.mine_block() + + # Save with compression if available + try: + self.blockchain.save_chain_state_compressed() + except Exception: + # Fallback + self.blockchain.save_chain_state() + + if new_block: + return ( + f"✅ Insight anchored in Block #{new_block['index']}\n" + f" Hash: {new_block['hash'][:12]}...\n" + f" Confidence: {estimated_confidence}/255\n" + f" Semantic Density: {analysis['semantic_density']:.1%}" + ) + else: + return "⏳ Mining deferred: Accumulating more philosophical entropy" + + elif action == "query": + min_confidence = kwargs.get('min_confidence', 150) + synthesize = kwargs.get('synthesize', True) + + results = self.oracle.seek_wisdom(content, min_confidence=min_confidence) + + if synthesize and results['total_found'] > 0: + # Get enhanced synthesis + synthesis = self.enhanced_tool.synthesize_blockchain_wisdom(content) + base_result = self.oracle.synthesize_answer(results) + return f"{base_result}\n\n🔮 ENHANCED SYNTHESIS:\n{synthesis.get('synthesis', '')}" + else: + return self.oracle.synthesize_answer(results) + + elif action == "analyze": + analysis = self.enhanced_tool.analyze_content(content) + result = f"📊 CONTENT ANALYSIS:\n" + result += f"• Semantic Density: {analysis['semantic_density']:.1%}\n" + result += f"• Inference Complexity: {analysis['inference_complexity']}/10\n" + result += f"• Estimated Confidence: {analysis['estimated_confidence']}/255\n" + result += f"• Suggested Traditions: {', '.join(analysis['tradition_suggestions'])}" + return result + + elif action == "analytics": + analytics = self.enhanced_tool.get_blockchain_analytics() + result = f"📈 BLOCKCHAIN ANALYTICS:\n" + result += f"• Total Blocks: {analytics['total_blocks']}\n" + result += f"• Total Insights: {analytics['total_insights']}\n" + result += f"• Chain Health: {analytics['chain_health']:.1%}\n" + result += f"• Most Prolific Tradition: {analytics['most_prolific_tradition']}\n" + result += f"• Top Themes: {', '.join(analytics['top_philosophical_themes'][:3])}" + return result + + else: + return "❌ Unknown action. Use: 'contribute', 'query', 'analyze', or 'analytics'" + + def export_blockchain(self, format: str = "json") -> str: + """Export blockchain in various formats""" + if format == "json": + return json.dumps(self.blockchain.chain, indent=2) + elif format == "csv": + import csv + import io + + output = io.StringIO() + writer = csv.writer(output) + writer.writerow(['Block', 'Timestamp', 'Insights', 'Claims', 'Confidence', 'Density', 'Hash']) + + for block in self.blockchain.chain: + writer.writerow([ + block['index'], + time.ctime(block['timestamp']), + len(block['data'].get('insights', [])), + len(block['data'].get('claims', [])), + block['truth_confidence'], + block['philosophical_density'], + block['hash'][:16] + ]) + + return output.getvalue() + else: + return "Unsupported format. Use 'json' or 'csv'" diff --git a/modules/sbll_knowledge_chain/optimized_chain.py b/modules/sbll_knowledge_chain/optimized_chain.py new file mode 100644 index 00000000..8ebec503 --- /dev/null +++ b/modules/sbll_knowledge_chain/optimized_chain.py @@ -0,0 +1,134 @@ +""" +Optimized version of the SBLL blockchain implementation +""" + +import hashlib +import time +import json +import os +import logging +from typing import Dict, List, Any, Optional +from enum import Enum +import numpy as np + +from .chain import SBBLBlockchain + +logger = logging.getLogger(__name__) + +class OptimizedSBBLBlockchain(SBBLBlockchain): + """Optimized version with performance improvements""" + + def __init__(self, cache_size: int = 100): + # Initialize caches early because parent constructor calls methods that may use them + self.block_cache = {} # Cache for frequently accessed blocks + self.cache_size = cache_size + self.metrics_cache = {} # Cache for calculated metrics + self.hash_cache = {} # Cache for content hashes + super().__init__() + + def _calculate_block_hash(self, data: Dict, previous_hash: str, nonce: int = 0) -> str: + """Optimized hash calculation with caching""" + cache_key = f"{json.dumps(data, sort_keys=True)}{previous_hash}{nonce}" + + if cache_key in self.hash_cache: + return self.hash_cache[cache_key] + + block_string = json.dumps(data, sort_keys=True) + previous_hash + str(nonce) + block_hash = hashlib.sha3_256(block_string.encode()).hexdigest() + + # Cache the result + if len(self.hash_cache) < 1000: # Limit cache size + self.hash_cache[cache_key] = block_hash + + return block_hash + + def get_block(self, index: int, use_cache: bool = True) -> Optional[Dict]: + """Get block with optional caching""" + if use_cache and index in self.block_cache: + return self.block_cache[index] + + if 0 <= index < len(self.chain): + block = self.chain[index] + if use_cache: + self._add_to_cache(index, block) + return block + + return None + + def _add_to_cache(self, index: int, block: Dict): + """Add block to cache with LRU eviction""" + if index in self.block_cache: + # Move to front (most recently used) + del self.block_cache[index] + + self.block_cache[index] = block + + # Evict if cache is full + if len(self.block_cache) > self.cache_size: + # Remove oldest (first inserted) + oldest_key = next(iter(self.block_cache)) + del self.block_cache[oldest_key] + + def batch_add_insights(self, insights: List[Dict]): + """Batch add insights for better performance""" + for insight in insights: + self.add_philosophical_insight(insight) + + def get_chain_metrics(self, recalculate: bool = False) -> Dict: + """Get chain metrics with caching""" + if not recalculate and 'chain_metrics' in self.metrics_cache: + return self.metrics_cache['chain_metrics'] + + metrics = { + 'total_blocks': len(self.chain), + 'total_insights': sum(len(block['data'].get('insights', [])) for block in self.chain), + 'total_claims': sum(len(block['data'].get('claims', [])) for block in self.chain), + 'avg_confidence': np.mean([block['truth_confidence'] for block in self.chain]) if self.chain else 0, + 'avg_density': np.mean([block['philosophical_density'] for block in self.chain]) if self.chain else 0, + 'chain_integrity': self._calculate_chain_integrity() + } + + self.metrics_cache['chain_metrics'] = metrics + return metrics + + def _calculate_chain_integrity(self) -> float: + """Calculate chain integrity score""" + if len(self.chain) <= 1: + return 1.0 + + valid_links = 0 + for i in range(1, len(self.chain)): + if self.chain[i]['previous_hash'] == self.chain[i-1]['hash']: + valid_links += 1 + + return valid_links / (len(self.chain) - 1) + + def save_chain_state_compressed(self, filename: str = "sbll_ledger_compressed.json"): + """Save chain state with compression""" + import gzip + + state = { + 'chain': self.chain, + 'pending_insights': self.pending_insights, + 'pending_claims': self.pending_claims, + 'difficulty': self.current_difficulty, + 'version': 'SBLL-2.0', + 'saved_at': time.time() + } + + try: + # Save regular JSON + with open(filename, 'w') as f: + json.dump(state, f, separators=(',', ':')) # Minified + + # Also save compressed version + compressed_filename = filename.replace('.json', '.json.gz') + with gzip.open(compressed_filename, 'wt', encoding='utf-8') as f: + json.dump(state, f) + + logger.info(f"Saved compressed ledger to {compressed_filename}") + + except Exception as e: + logger.error(f"Failed to save compressed ledger: {e}") + # Fall back to regular save + super().save_chain_state(filename) diff --git a/modules/sbll_knowledge_chain/oracle.py b/modules/sbll_knowledge_chain/oracle.py new file mode 100644 index 00000000..91825493 --- /dev/null +++ b/modules/sbll_knowledge_chain/oracle.py @@ -0,0 +1,65 @@ +import logging +from typing import Dict, List + +logger = logging.getLogger(__name__) + +class TruthOracle: + """ + The Retrieval Layer: Allows II-Agents to query the SBLL Blockchain + for verified philosophical truth. + """ + def __init__(self, blockchain_instance): + self.blockchain = blockchain_instance + + def seek_wisdom(self, query_topic: str, min_confidence: int = 0, specific_tradition: str = None) -> Dict: + """ + Scans the immutable chain for insights related to a topic. + """ + logger.info(f"Oracle Query: '{query_topic}' (Min Conf: {min_confidence})") + + results = [] + + for block in self.blockchain.chain: + content_pool = block['data'].get('insights', []) + block['data'].get('claims', []) + + for item in content_pool: + if query_topic.lower() in item['content'].lower(): + + item_confidence = item.get('truth_confidence', item.get('confidence_score', 0)) + item_tradition = item.get('tradition', 'UNIVERSAL') + + if item_confidence >= min_confidence: + if specific_tradition is None or specific_tradition == item_tradition: + alignment = item.get('cosmic_alignment', item.get('reality_contact', 0)) + relevance_score = item_confidence + (alignment * 0.5) + + results.append({ + 'content': item['content'], + 'source_block': block['index'], + 'tradition': item_tradition, + 'confidence': item_confidence, + 'score': relevance_score, + 'type': 'INSIGHT' if 'tradition' in item else 'CLAIM' + }) + + results.sort(key=lambda x: x['score'], reverse=True) + logger.debug(f"Oracle found {len(results)} matches for '{query_topic}'") + + return { + 'query': query_topic, + 'total_found': len(results), + 'top_results': results[:3] + } + + def synthesize_answer(self, oracle_result: Dict) -> str: + if oracle_result['total_found'] == 0: + return "The Oracle is silent. No verified truth found on this topic." + + synthesis = f"Verified Wisdom ({oracle_result['total_found']} records found):\n" + for idx, res in enumerate(oracle_result['top_results']): + icon = "🧘" if res['type'] == 'INSIGHT' else "🔬" + synthesis += f"\n{idx+1}. {icon} [{res['tradition']}] (Conf: {res['confidence']})\n" + synthesis += f" \"{res['content']}\"\n" + synthesis += f" (Source: Block #{res['source_block']})\n" + + return synthesis diff --git a/modules/sbll_knowledge_chain/tool_wrapper.py b/modules/sbll_knowledge_chain/tool_wrapper.py new file mode 100644 index 00000000..aec467ee --- /dev/null +++ b/modules/sbll_knowledge_chain/tool_wrapper.py @@ -0,0 +1,61 @@ +import logging +from typing import Dict, Any +from .chain import SBBLBlockchain +from .oracle import TruthOracle + +logger = logging.getLogger(__name__) + +class KnowledgeChainTool: + """ + The Interface that allows II-Agent to interact with the + Philosophical Blockchain. + """ + def __init__(self): + self.blockchain = SBBLBlockchain() + # Attempt to load existing state + try: + self.blockchain.load_chain_state() + except Exception as e: + logger.warning(f"Could not load existing ledger, starting fresh. Reason: {e}") + + self.oracle = TruthOracle(self.blockchain) + + def name(self) -> str: + return "knowledge_blockchain" + + def description(self) -> str: + return ( + "Use this tool to either STORE verified philosophical insights " + "or QUERY the immutable ledger for high-confidence truth. " + "Actions: 'contribute' or 'query'." + ) + + def run(self, action: str, content: str, tradition: str = "MODERN_SCIENTIFIC") -> str: + """ + Main entry point for the Agent. + """ + logger.info(f"Agent Action: {action} | Content: {content[:20]}...") + + if action == "contribute": + insight = { + 'content': content, + 'tradition': tradition, + 'truth_confidence': 200, + 'cosmic_alignment': 200 + } + + self.blockchain.add_philosophical_insight(insight) + new_block = self.blockchain.mine_block() + self.blockchain.save_chain_state() + + if new_block: + return f"Success: Insight anchored in Block #{new_block['index']} (Hash: {new_block['hash'][:8]}...)" + else: + return "Mining deferred: Not enough entropy." + + elif action == "query": + results = self.oracle.seek_wisdom(content, min_confidence=150) + return self.oracle.synthesize_answer(results) + + else: + return "Error: Unknown action. Use 'contribute' or 'query'." diff --git a/pyproject.toml b/pyproject.toml index 1651a016..0e69b148 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "mammoth>=1.9.0", "markdownify>=1.1.0", "pandas>=2.2.3", + "numpy>=1.26.0", "pathvalidate>=3.2.3", "pdfminer-six>=20250506", "openai>=1.68.2", @@ -46,10 +47,9 @@ dependencies = [ "cryptography>=42.0.0", "fastapi-sso>=0.16.0", "aiosqlite>=0.21.0", - "pydantic==2.11.7", - "e2b-code-interpreter==1.2.0b5", + "pydantic>=2.11.7,<3.0.0", + # moved heavy provider deps into optional `providers` extra "python-socketio>=5.13.0", - "gcloud-aio-storage==9.5.0", "transformers>=4.44.0", "weasyprint>=66.0", "python-magic>=0.4.27", @@ -63,10 +63,7 @@ dependencies = [ "pypdf>=6.1.1", "apscheduler>=3.11.0", "litellm>=1.63.14", - "anthropic[vertex]>=0.72.0", "langchain-text-splitters>=1.0.0", - "google-auth-oauthlib>=1.2.3", - "google-api-python-client>=2.150.0", "ddgs>=9.9.1", ] @@ -81,6 +78,23 @@ gaia = [ "uvicorn[standard]>=0.29.0", ] +# Development / test extras installed by CI with `pip install -e '.[dev]'` +dev = [ + "pytest>=8.3.5", + "pytest-asyncio>=1.0.0", +] + +providers = [ + # Heavy / optional provider and API integrations + "anthropic[vertex]>=0.72.0", + "e2b-code-interpreter==1.2.0b5", + "google-api-python-client>=2.150.0", + "google-auth-oauthlib>=1.2.3", + "google-cloud-aiplatform>=1.90.0", + "google-genai>=1.14.0", + "gcloud-aio-storage==9.5.0", +] + [project.scripts] ii-agent = "ii_agent.cli.main:main" ii-tool = "ii_tool.mcp.server:main" diff --git a/scripts/test_production_chain.py b/scripts/test_production_chain.py new file mode 100644 index 00000000..f03b085a --- /dev/null +++ b/scripts/test_production_chain.py @@ -0,0 +1,44 @@ +"""Smoke test for ProductionKnowledgeChainTool (enhanced features).""" +import logging +import os +import sys + +logging.basicConfig(level=logging.INFO) + +# Ensure repo root is on sys.path +ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +if ROOT not in sys.path: + sys.path.insert(0, ROOT) + +try: + from modules.sbll_knowledge_chain.deployment import ProductionKnowledgeChainTool +except Exception as e: + print(f"Failed to import ProductionKnowledgeChainTool: {e}") + raise + +def main(): + tool = ProductionKnowledgeChainTool(use_optimized=True) + + # Contribute an insight using enhanced tool + content = "Consciousness emerges from recursive computational self-modeling through constraint optimization" + res = tool.run('contribute', content, tradition='WESTERN_ANALYTIC') + print('Contribute result:') + print(res) + + # Query with synthesis + wisdom = tool.run('query', 'consciousness', min_confidence=100, synthesize=True) + print('\nQuery result:') + print(wisdom) + + # Analyze content + analysis = tool.run('analyze', 'Truth is correspondence with reality through multi-method verification') + print('\nAnalysis result:') + print(analysis) + + # Analytics + analytics = tool.run('analytics', '') + print('\nAnalytics result:') + print(analytics) + +if __name__ == '__main__': + main() diff --git a/scripts/test_sbll_chain.py b/scripts/test_sbll_chain.py new file mode 100644 index 00000000..5cd048a2 --- /dev/null +++ b/scripts/test_sbll_chain.py @@ -0,0 +1,37 @@ +"""Simple smoke test for the SBLL Knowledge Chain tool. + +Runs a contribute action then a query and prints results. +""" +import logging +import os +import sys + +logging.basicConfig(level=logging.INFO) + +# Ensure repo root is on sys.path so we can import the module package +ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +if ROOT not in sys.path: + sys.path.insert(0, ROOT) + +try: + from modules.sbll_knowledge_chain import KnowledgeChainTool +except Exception as e: + print(f"Failed to import KnowledgeChainTool: {e}") + raise + +def main(): + tool = KnowledgeChainTool() + + # Contribute an insight + contribution = "Impermanence: everything changes; all is flux." + res = tool.run('contribute', contribution, tradition='WESTERN_ANALYTIC') + print('Contribute result:') + print(res) + + # Query the oracle for 'flux' + query_res = tool.run('query', 'flux') + print('\nQuery result:') + print(query_res) + +if __name__ == '__main__': + main() diff --git a/src/ii_agent/config/agent_types.py b/src/ii_agent/config/agent_types.py index 2ed79806..6fff9264 100644 --- a/src/ii_agent/config/agent_types.py +++ b/src/ii_agent/config/agent_types.py @@ -47,6 +47,7 @@ from ii_tool.tools.web.web_batch_search_tool import WebBatchSearchTool from ii_tool.tools.file_system.str_replace_editor import StrReplaceEditorTool # from ii_tool.tools.codex import CodexExecuteTool # Now using MCP stdio versions +from ii_tool.tools.sbll_knowledge_tool import SBLLKnowledgeTool class AgentType(str, Enum): @@ -75,6 +76,7 @@ class AgentTypeConfig: ShellRunCommand.name, ShellView.name, MessageUserTool.name, + SBLLKnowledgeTool.name, # ShellStopCommand.name, ShellList.name, # File system tools @@ -192,6 +194,7 @@ class AgentTypeConfig: MessageUserTool.name, WebBatchSearchTool.name, WebVisitCompressTool.name, + SBLLKnowledgeTool.name, ], AgentType.DESIGN_DOCUMENT: [ # File system tools for creating design docs diff --git a/src/ii_tool/tools/manager.py b/src/ii_tool/tools/manager.py index de469c79..a3a707fc 100644 --- a/src/ii_tool/tools/manager.py +++ b/src/ii_tool/tools/manager.py @@ -61,6 +61,7 @@ BrowserEnterMultipleTextsTool, ) from ii_tool.browser.browser import Browser +from ii_tool.tools.sbll_knowledge_tool import SBLLKnowledgeTool def get_common_tools( @@ -70,6 +71,7 @@ def get_common_tools( # Sandbox tools RegisterPort(sandbox=sandbox), MessageUserTool(), + SBLLKnowledgeTool(), ] return tools diff --git a/src/ii_tool/tools/sbll_knowledge_tool.py b/src/ii_tool/tools/sbll_knowledge_tool.py new file mode 100644 index 00000000..70b7ffa2 --- /dev/null +++ b/src/ii_tool/tools/sbll_knowledge_tool.py @@ -0,0 +1,52 @@ +"""SBLL Knowledge Chain tool wrapper for ii_tool framework.""" +import json +from typing import Any, Dict +from ii_tool.tools.base import BaseTool, ToolResult + + +class SBLLKnowledgeTool(BaseTool): + """Wraps the ProductionKnowledgeChainTool for use as an ii_tool.""" + # class-level name so it can be referenced in AgentTypeConfig.TOOLSETS + name: str = "sbll_knowledge_chain" + + def __init__(self): + # Lazy import to avoid import-time cost + from modules.sbll_knowledge_chain.deployment import ProductionKnowledgeChainTool + + self._impl = ProductionKnowledgeChainTool(use_optimized=True) + self._name = "sbll_knowledge_chain" + self.name = self._name + self.description = "Store and query philosophical insights on the SBLL blockchain. Actions: contribute, query, analyze, analytics." + self.input_schema = { + "type": "object", + "properties": { + "action": {"type": "string"}, + "content": {"type": "string"}, + "tradition": {"type": "string"}, + "min_confidence": {"type": "integer"}, + "synthesize": {"type": "boolean"}, + }, + "required": ["action", "content"], + } + self.read_only = False + self.display_name = "SBLL Knowledge Chain" + + async def execute(self, tool_input: Dict[str, Any]) -> ToolResult: + # tool_input is expected to be a dict; some callers pass JSON strings + try: + if isinstance(tool_input, str): + params = json.loads(tool_input) + else: + params = tool_input + + action = params.get("action") + content = params.get("content", "") + tradition = params.get("tradition", "MODERN_SCIENTIFIC") + # pass through other kwargs + kwargs = {k: v for k, v in params.items() if k not in ("action", "content", "tradition")} + + result = self._impl.run(action=action, content=content, tradition=tradition, **kwargs) + + return ToolResult(llm_content=result, user_display_content=result, is_error=False) + except Exception as e: + return ToolResult(llm_content=f"Error executing SBLL tool: {e}", user_display_content=str(e), is_error=True) diff --git a/tests/integration/test_providers_end_to_end.py b/tests/integration/test_providers_end_to_end.py new file mode 100644 index 00000000..c116684e --- /dev/null +++ b/tests/integration/test_providers_end_to_end.py @@ -0,0 +1,61 @@ +import os +import pytest +import requests + + +RUN_INTEGRATION = os.getenv("RUN_PROVIDERS_INTEGRATION") == "1" + + +def _require_env(var): + val = os.getenv(var) + return bool(val and val.strip()) + + +@pytest.mark.skipif(not RUN_INTEGRATION, reason="Providers integration tests disabled; set RUN_PROVIDERS_INTEGRATION=1 to enable") +@pytest.mark.skipif(not _require_env("GOOGLE_APPLICATION_CREDENTIALS"), reason="Google service account JSON not provided") +def test_google_service_account_can_refresh_token(): + """Validate Google service account credentials can be loaded and refreshed. + + This verifies that the service account file is valid and can obtain an access token. + """ + try: + from google.oauth2 import service_account + from google.auth.transport.requests import Request + except Exception as e: + pytest.skip(f"google-auth not installed: {e}") + + sa_path = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") + creds = service_account.Credentials.from_service_account_file(sa_path, scopes=["https://www.googleapis.com/auth/cloud-platform"]) + request = Request() + # attempt to refresh to obtain an access token + creds.refresh(request) + assert creds.token is not None and len(creds.token) > 0 + + +@pytest.mark.skipif(not RUN_INTEGRATION, reason="Providers integration tests disabled; set RUN_PROVIDERS_INTEGRATION=1 to enable") +@pytest.mark.skipif(not _require_env("ANTHROPIC_API_KEY"), reason="Anthropic API key not provided") +def test_anthropic_models_endpoint(): + """Simple smoke test calling Anthropic models endpoint to validate the API key and network access.""" + key = os.getenv("ANTHROPIC_API_KEY") + headers = {"Authorization": f"Bearer {key}", "Accept": "application/json"} + url = "https://api.anthropic.com/v1/models" + try: + resp = requests.get(url, headers=headers, timeout=15) + except Exception as e: + pytest.skip(f"Network/request failed for Anthropic endpoint: {e}") + + assert resp.status_code == 200, f"Anthropic models endpoint returned {resp.status_code}: {resp.text}" + + +@pytest.mark.skipif(not RUN_INTEGRATION, reason="Providers integration tests disabled; set RUN_PROVIDERS_INTEGRATION=1 to enable") +@pytest.mark.skipif(not _require_env("E2B_API_KEY"), reason="E2B API key not provided") +def test_e2b_import_and_version(): + """Basic import check for the `e2b` package. If available, this confirms the providers extra installed it.""" + try: + import e2b + except Exception as e: + pytest.skip(f"e2b package not importable: {e}") + + # try to read a version attribute if available + ver = getattr(e2b, "__version__", None) + assert ver is None or isinstance(ver, str) diff --git a/tests/integration/test_providers_smoke.py b/tests/integration/test_providers_smoke.py new file mode 100644 index 00000000..7edf70d4 --- /dev/null +++ b/tests/integration/test_providers_smoke.py @@ -0,0 +1,42 @@ +import os +import importlib +import pytest + + +RUN_INTEGRATION = os.getenv("RUN_PROVIDERS_INTEGRATION") == "1" + + +def _has_provider_credentials(): + # Basic check: look for any of the common provider env vars used by the project + keys = [ + "ANTHROPIC_API_KEY", + "GOOGLE_API_KEY", + "GOOGLE_APPLICATION_CREDENTIALS", + "E2B_API_KEY", + ] + return any(os.getenv(k) for k in keys) + + +@pytest.mark.skipif(not RUN_INTEGRATION, reason="Providers integration tests disabled; set RUN_PROVIDERS_INTEGRATION=1 to enable") +@pytest.mark.skipif(not _has_provider_credentials(), reason="Provider credentials not found in environment") +def test_providers_importable(): + """Lightweight smoke test that imports provider packages installed via `.[providers]`. + + This test is intentionally conservative: it only runs when explicitly enabled + and when at least one provider credential is present. It verifies the packages + can be imported in CI or locally after installing `.[providers]`. + """ + modules = [ + ("anthropic", "anthropic"), + ("e2b", "e2b"), + ("googleapiclient", "googleapiclient.discovery"), + ] + + for name, mod in modules: + try: + importlib.import_module(mod) + except Exception as e: + pytest.skip(f"Provider module {mod} not available: {e}") + + # If we reach here, imports succeeded + assert True diff --git a/tests/sbll_knowledge_chain/test_chain.py b/tests/sbll_knowledge_chain/test_chain.py new file mode 100644 index 00000000..8bd31baf --- /dev/null +++ b/tests/sbll_knowledge_chain/test_chain.py @@ -0,0 +1,61 @@ +import pytest +import os +from modules.sbll_knowledge_chain.chain import SBBLBlockchain, QuantumPhilosophicalEncoder +from modules.sbll_knowledge_chain.oracle import TruthOracle +from modules.sbll_knowledge_chain.tool_wrapper import KnowledgeChainTool + +def test_blockchain_genesis(): + chain = SBBLBlockchain() + assert len(chain.chain) == 1 + assert chain.chain[0]['index'] == 0 + assert chain.chain[0]['data']['block_type'] == 'GENESIS' + +def test_add_and_mine_insight(): + chain = SBBLBlockchain() + chain.add_philosophical_insight({ + 'content': 'Test insight', + 'tradition': 'MODERN_SCIENTIFIC', + 'truth_confidence': 210, + 'cosmic_alignment': 180 + }) + block = chain.mine_block() + assert block is not None + assert block['index'] == 1 + assert block['data']['insights'][0]['content'] == 'Test insight' + +def test_oracle_query(): + chain = SBBLBlockchain() + chain.add_philosophical_insight({ + 'content': 'Wisdom of change', + 'tradition': 'EASTERN_TAOIST', + 'truth_confidence': 220, + 'cosmic_alignment': 200 + }) + chain.mine_block() + oracle = TruthOracle(chain) + result = oracle.seek_wisdom('change', min_confidence=200) + assert result['total_found'] == 1 + assert 'Wisdom of change' in result['top_results'][0]['content'] + answer = oracle.synthesize_answer(result) + assert 'Verified Wisdom' in answer + +def test_tool_wrapper_contribute_and_query(tmp_path): + # Use a temp ledger file to avoid polluting workspace + ledger_file = tmp_path / 'sbll_ledger.json' + tool = KnowledgeChainTool() + tool.blockchain.save_chain_state(str(ledger_file)) + res = tool.run('contribute', 'Knowledge is justified true belief.', tradition='WESTERN_ANALYTIC') + assert 'Success' in res + query = tool.run('query', 'justified') + assert 'Verified Wisdom' in query + # Clean up + if os.path.exists(ledger_file): + os.remove(ledger_file) + +def test_encoder_hash_and_compression(): + encoder = QuantumPhilosophicalEncoder() + content = 'The only constant is change.' + h = encoder._quantum_hash(content) + assert isinstance(h, str) and len(h) == 32 + ratio = encoder._calculate_compression(content) + assert ratio > 0