Spaces:
Sleeping
Sleeping
Upload 1014ecaa5_scimind2_communicator.py
Browse files
1014ecaa5_scimind2_communicator.py
ADDED
|
@@ -0,0 +1,801 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Experiment 1014ecaa3: SciMind 2.0 Complexity Reduction & Legacy Recovery
|
| 4 |
+
Axiom of Causal Integrity (TCI) - Holographic Communication Interface
|
| 5 |
+
|
| 6 |
+
Upgrades from 1014b/c:
|
| 7 |
+
1. **Topological Imprinting (Braiding)**: Text is not a wave, but a sequence of topological defects (Vortices).
|
| 8 |
+
2. **CNOT Hamiltonian Protection**: Uses TCI-compliant Hamiltonian for system evolution.
|
| 9 |
+
3. **Wick-Rotation PLL**: Synchronizes NTP time (Entropy) with Model Phase (Imaginary) via complex rotation.
|
| 10 |
+
4. **Vorticity/Chern Audit**: Explicit topological charge monitoring.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import sys
|
| 14 |
+
import os
|
| 15 |
+
import time
|
| 16 |
+
import curses
|
| 17 |
+
import threading
|
| 18 |
+
import numpy as np
|
| 19 |
+
import queue
|
| 20 |
+
import json
|
| 21 |
+
import glob
|
| 22 |
+
from collections import deque, Counter
|
| 23 |
+
from datetime import datetime
|
| 24 |
+
import sympy as sp
|
| 25 |
+
|
| 26 |
+
# Optional deps
|
| 27 |
+
try:
|
| 28 |
+
import torch
|
| 29 |
+
except ImportError:
|
| 30 |
+
print("CRITICAL: torch not found.")
|
| 31 |
+
sys.exit(1)
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import ntplib
|
| 35 |
+
NTP_AVAILABLE = True
|
| 36 |
+
except ImportError:
|
| 37 |
+
NTP_AVAILABLE = False
|
| 38 |
+
|
| 39 |
+
# ==============================================================================
|
| 40 |
+
# SESSION MANAGEMENT (Reused from 1014b)
|
| 41 |
+
# ==============================================================================
|
| 42 |
+
class SessionManager:
|
| 43 |
+
def __init__(self, log_dir="logs"):
|
| 44 |
+
self.log_dir = log_dir
|
| 45 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 46 |
+
self.session_id = None
|
| 47 |
+
self.state_file = None
|
| 48 |
+
self.log_file = None
|
| 49 |
+
|
| 50 |
+
def list_sessions(self):
|
| 51 |
+
files = glob.glob(os.path.join(self.log_dir, "session_*.json"))
|
| 52 |
+
sessions = []
|
| 53 |
+
for f in sorted(files, reverse=True):
|
| 54 |
+
basename = os.path.basename(f)
|
| 55 |
+
ts = basename.replace("session_", "").replace(".json", "")
|
| 56 |
+
try:
|
| 57 |
+
dt = datetime.strptime(ts, "%Y%m%d_%H%M%S")
|
| 58 |
+
readable = dt.strftime("%Y-%m-%d %H:%M:%S")
|
| 59 |
+
sessions.append({'id': ts, 'path': f, 'label': readable})
|
| 60 |
+
except: pass
|
| 61 |
+
return sessions
|
| 62 |
+
|
| 63 |
+
def start_new_session(self):
|
| 64 |
+
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 65 |
+
self.session_id = ts
|
| 66 |
+
self.state_file = os.path.join(self.log_dir, f"session_{ts}.json")
|
| 67 |
+
self.log_file = os.path.join(self.log_dir, f"session_{ts}.log")
|
| 68 |
+
print(f"Starting NEW session: {self.session_id}")
|
| 69 |
+
return {}
|
| 70 |
+
|
| 71 |
+
def load_session(self, session_path):
|
| 72 |
+
ts = os.path.basename(session_path).replace("session_", "").replace(".json", "")
|
| 73 |
+
self.session_id = ts
|
| 74 |
+
self.state_file = session_path
|
| 75 |
+
self.log_file = os.path.join(self.log_dir, f"session_{ts}.log")
|
| 76 |
+
try:
|
| 77 |
+
with open(self.state_file, 'r', encoding='utf-8') as f:
|
| 78 |
+
return json.load(f)
|
| 79 |
+
except Exception as e:
|
| 80 |
+
print(f"Error loading state: {e}")
|
| 81 |
+
return {}
|
| 82 |
+
|
| 83 |
+
def save_global_state(self, vocab_data, sync_data, chat_history, physics_state=None):
|
| 84 |
+
if not self.state_file: return
|
| 85 |
+
data = {
|
| 86 |
+
'vocab': vocab_data,
|
| 87 |
+
'sync': sync_data,
|
| 88 |
+
'history': list(chat_history),
|
| 89 |
+
'physics': physics_state,
|
| 90 |
+
'timestamp': datetime.now().isoformat()
|
| 91 |
+
}
|
| 92 |
+
with open(self.state_file, 'w', encoding='utf-8') as f:
|
| 93 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 94 |
+
|
| 95 |
+
# ==============================================================================
|
| 96 |
+
# VOCABULARY LEARNER (Universal)
|
| 97 |
+
# ==============================================================================
|
| 98 |
+
STOP_WORDS = {
|
| 99 |
+
"the", "be", "to", "of", "and", "a", "in", "that", "have", "i",
|
| 100 |
+
"it", "for", "not", "on", "with", "he", "as", "you", "do", "at",
|
| 101 |
+
"this", "but", "his", "by", "from", "they", "we", "say", "her",
|
| 102 |
+
"she", "or", "an", "will", "my", "one", "all", "would", "there",
|
| 103 |
+
"their", "what", "so", "up", "out", "if", "about", "who", "get",
|
| 104 |
+
"which", "go", "me", "is", "are", "can", "has", "was", "were"
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
class VocabularyLearner:
|
| 108 |
+
def __init__(self, initial_state=None):
|
| 109 |
+
self.user_words = Counter()
|
| 110 |
+
self.total_words = 0
|
| 111 |
+
if initial_state:
|
| 112 |
+
self.user_words = Counter(initial_state.get('user_words', {}))
|
| 113 |
+
self.total_words = initial_state.get('total_words', 0)
|
| 114 |
+
|
| 115 |
+
def learn_from_input(self, text):
|
| 116 |
+
tokens = text.split()
|
| 117 |
+
new_words = 0
|
| 118 |
+
for token in tokens:
|
| 119 |
+
cleaned = token.lower().strip(".,!?")
|
| 120 |
+
if len(cleaned) > 2 and cleaned not in STOP_WORDS:
|
| 121 |
+
self.user_words[token] += 1 # Preserve capitalization for output
|
| 122 |
+
new_words += 1
|
| 123 |
+
self.total_words += new_words
|
| 124 |
+
|
| 125 |
+
def get_top_terms(self, n=50):
|
| 126 |
+
return [w for w, c in self.user_words.most_common(n)]
|
| 127 |
+
|
| 128 |
+
def get_state(self):
|
| 129 |
+
return {
|
| 130 |
+
'user_words': dict(self.user_words),
|
| 131 |
+
'total_words': self.total_words
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
# ==============================================================================
|
| 135 |
+
# SEMANTIC ADAPTIVE DECODER (1014b Style)
|
| 136 |
+
# ==============================================================================
|
| 137 |
+
BASE_WORD_POOL = [
|
| 138 |
+
"Existence", "Being", "Becoming", "Time", "Space", "Light", "Energy",
|
| 139 |
+
"Information", "Consciousness", "Order", "Chaos", "Symmetry",
|
| 140 |
+
"Emergence", "Coherence", "Resonance", "Harmony", "Frequency",
|
| 141 |
+
"Quantity", "Quality", "Truth", "Beauty", "Unity",
|
| 142 |
+
"Plurality", "Infinity", "Eternity", "Moment", "Process"
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
INTERPRETATIONS = {
|
| 146 |
+
'psi': ["A wave function manifests...", "Information crystallizes as:"],
|
| 147 |
+
'phi': ["A field permeates space:", "Force manifests as:"],
|
| 148 |
+
's': ["Entropy structures itself as:", "Chaos contains:"],
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
class SemanticAdaptiveDecoder:
|
| 152 |
+
def __init__(self, vocab_learner):
|
| 153 |
+
self.vocab_learner = vocab_learner
|
| 154 |
+
self.coherence_history = deque(maxlen=100)
|
| 155 |
+
self.last_coherence = 0.0
|
| 156 |
+
self.last_result = None
|
| 157 |
+
|
| 158 |
+
def decode_cycle(self, noise, verbose=False):
|
| 159 |
+
# Coherence
|
| 160 |
+
if len(noise) < 10: noise = np.random.rand(64)
|
| 161 |
+
phases = noise * 2 * np.pi
|
| 162 |
+
order_param = np.abs(np.mean(np.exp(1j * phases)))
|
| 163 |
+
coherence = float(order_param)
|
| 164 |
+
self.last_coherence = coherence
|
| 165 |
+
|
| 166 |
+
# Gödel Gap
|
| 167 |
+
variance = np.var(noise)
|
| 168 |
+
godel_gap = float(variance * 10.0)
|
| 169 |
+
|
| 170 |
+
# Info Slice
|
| 171 |
+
info_slice = noise[:5]
|
| 172 |
+
|
| 173 |
+
result = {
|
| 174 |
+
'coherence': coherence,
|
| 175 |
+
'godel_gap': godel_gap,
|
| 176 |
+
'new_info': info_slice,
|
| 177 |
+
'interpretation': self._get_interpretation(coherence),
|
| 178 |
+
'patterns': {'ratios': noise[:3]}
|
| 179 |
+
}
|
| 180 |
+
self.coherence_history.append(coherence)
|
| 181 |
+
self.last_result = result
|
| 182 |
+
return result
|
| 183 |
+
|
| 184 |
+
def _get_interpretation(self, coherence):
|
| 185 |
+
import random
|
| 186 |
+
key = random.choice(list(INTERPRETATIONS.keys()))
|
| 187 |
+
base = random.choice(INTERPRETATIONS[key])
|
| 188 |
+
|
| 189 |
+
if coherence > 0.8: tone = "[CRYSTAL CLEAR]"
|
| 190 |
+
elif coherence > 0.5: tone = "[CLEAR]"
|
| 191 |
+
else: tone = "[FAINT]"
|
| 192 |
+
return f"{tone} {base}"
|
| 193 |
+
|
| 194 |
+
def info_to_message(self, info):
|
| 195 |
+
user_pool = self.vocab_learner.get_top_terms()
|
| 196 |
+
if not user_pool:
|
| 197 |
+
hybrid_pool = BASE_WORD_POOL
|
| 198 |
+
else:
|
| 199 |
+
hybrid_pool = BASE_WORD_POOL + user_pool
|
| 200 |
+
|
| 201 |
+
indices = (np.array(info) * len(hybrid_pool)).astype(int)
|
| 202 |
+
indices = np.clip(indices, 0, len(hybrid_pool) - 1)
|
| 203 |
+
|
| 204 |
+
selected = [hybrid_pool[i] for i in indices[:3]]
|
| 205 |
+
return " ↔ ".join(selected)
|
| 206 |
+
|
| 207 |
+
# ==============================================================================
|
| 208 |
+
# SCIMIND 2.0 PHYSICS CORE
|
| 209 |
+
# ==============================================================================
|
| 210 |
+
class SciMindCommunicator:
|
| 211 |
+
def __init__(self, N=40):
|
| 212 |
+
self.size = N
|
| 213 |
+
self.phases = self._init_vortex(N)
|
| 214 |
+
self.focus = 0.0
|
| 215 |
+
self.surprisal = 0.0
|
| 216 |
+
self.vorticity = 0.0 # Chern Number
|
| 217 |
+
self.causal_integrity = 0.0
|
| 218 |
+
self.fidelity = 0.0
|
| 219 |
+
|
| 220 |
+
self.gating_field = np.zeros((N, N))
|
| 221 |
+
self.vorticity_field = np.zeros((N, N))
|
| 222 |
+
|
| 223 |
+
# TCI Hamiltonian Symbols
|
| 224 |
+
self.t_res, self.Omega = sp.symbols('t_res Omega', real=True)
|
| 225 |
+
self.H_comm = self._derive_hamiltonian()
|
| 226 |
+
|
| 227 |
+
self.phase_history = deque(maxlen=200)
|
| 228 |
+
|
| 229 |
+
def _init_vortex(self, size):
|
| 230 |
+
# Initial Vacuum: A single topological charge at center to start with valid topology
|
| 231 |
+
x = np.linspace(-1, 1, size)
|
| 232 |
+
xx, yy = np.meshgrid(x, x)
|
| 233 |
+
phases = np.mod(np.arctan2(yy, xx) + np.pi, 2 * np.pi)
|
| 234 |
+
return torch.tensor(phases, dtype=torch.float32)
|
| 235 |
+
|
| 236 |
+
def _derive_hamiltonian(self):
|
| 237 |
+
"""Derive the CNOT-Axiom Hamiltonian: H = (I - Z) ⊗ X (From Exp 1010)"""
|
| 238 |
+
# Symbolic representation for integrity check
|
| 239 |
+
return (self.Omega/2) * sp.Matrix([
|
| 240 |
+
[0, 0, 0, 0],
|
| 241 |
+
[0, 0, 0, 0],
|
| 242 |
+
[0, 0, 0, 1],
|
| 243 |
+
[0, 0, 1, 0]
|
| 244 |
+
])
|
| 245 |
+
|
| 246 |
+
def get_chern_number(self):
|
| 247 |
+
"""Calculates Vorticity (Topological Texture) via absolute flux summation."""
|
| 248 |
+
p = self.phases.numpy()
|
| 249 |
+
|
| 250 |
+
# Wrapped phase differences [-pi, pi]
|
| 251 |
+
dx = np.angle(np.exp(1j * (np.roll(p, -1, axis=1) - p)))
|
| 252 |
+
dy = np.angle(np.exp(1j * (np.roll(p, -1, axis=0) - p)))
|
| 253 |
+
|
| 254 |
+
# Lattice curl (Circulation)
|
| 255 |
+
circ = dx + np.roll(dy, -1, axis=1) - np.roll(dx, -1, axis=0) - dy
|
| 256 |
+
self.vorticity_field = circ / (2 * np.pi) # Local Vorticity Map
|
| 257 |
+
|
| 258 |
+
# Sum absolute flux / 2pi (Counts Total Vortices + Defects)
|
| 259 |
+
# Matches Exp 1010 'Vorticity' metric
|
| 260 |
+
return float(np.sum(np.abs(circ)) / (2 * np.pi))
|
| 261 |
+
|
| 262 |
+
def encode_text(self, text):
|
| 263 |
+
"""
|
| 264 |
+
TOPOLOGICAL IMPRINTING:
|
| 265 |
+
Maps text to a Braid Field (Vortices) instead of wave packets.
|
| 266 |
+
"""
|
| 267 |
+
if not text: return torch.zeros((self.size, self.size), dtype=torch.float32)
|
| 268 |
+
|
| 269 |
+
x = np.linspace(-1, 1, self.size)
|
| 270 |
+
xx, yy = np.meshgrid(x, x)
|
| 271 |
+
xx = torch.tensor(xx, dtype=torch.float32)
|
| 272 |
+
yy = torch.tensor(yy, dtype=torch.float32)
|
| 273 |
+
|
| 274 |
+
braid_field = torch.zeros((self.size, self.size), dtype=torch.float32)
|
| 275 |
+
n_chars = len(text)
|
| 276 |
+
phi = (np.sqrt(5) - 1) / 2 # Golden ratio
|
| 277 |
+
|
| 278 |
+
for i, char in enumerate(text):
|
| 279 |
+
# Fibonacci spiral distribution for braiding points
|
| 280 |
+
idx = i + 1
|
| 281 |
+
r = np.sqrt(idx / max(n_chars, 1)) * 0.8
|
| 282 |
+
theta = 2 * np.pi * idx * phi
|
| 283 |
+
|
| 284 |
+
cx = r * np.cos(theta)
|
| 285 |
+
cy = r * np.sin(theta)
|
| 286 |
+
|
| 287 |
+
# Charge parity based on character code
|
| 288 |
+
charge = 1.0 if ord(char) % 2 == 0 else -1.0
|
| 289 |
+
|
| 290 |
+
# Add Vortex Phase: q * arctan2(y-cy, x-cx)
|
| 291 |
+
# This is a cumulative phase winding (Braid)
|
| 292 |
+
braid_field += charge * torch.atan2(yy - cy, xx - cx)
|
| 293 |
+
|
| 294 |
+
return braid_field
|
| 295 |
+
|
| 296 |
+
def step(self, external_noise, text_braid_field, ntp_offset=0.0):
|
| 297 |
+
"""
|
| 298 |
+
SCIMIND 2.0 (REFINED) STEP:
|
| 299 |
+
Complexity Reduction via Entropy Export, Phase Resonance, and Impedance.
|
| 300 |
+
"""
|
| 301 |
+
# --- 0. PRE-CALCULATIONS ---
|
| 302 |
+
p_np = self.phases.numpy()
|
| 303 |
+
N = self.size
|
| 304 |
+
|
| 305 |
+
# Gradient / Unrest (Gating)
|
| 306 |
+
grad_y, grad_x = np.gradient(p_np)
|
| 307 |
+
unrest_map = np.sqrt(grad_x**2 + grad_y**2)
|
| 308 |
+
# Normalize unrest for usage
|
| 309 |
+
unrest_norm = (unrest_map - np.min(unrest_map)) / (np.max(unrest_map) - np.min(unrest_map) + 1e-6)
|
| 310 |
+
|
| 311 |
+
# --- 1. HYSTERESIS / ATTENTION BEAM ---
|
| 312 |
+
# "Attention Beam": Only regions with high gradient (novelty/surprisal) or existing high gating get energy.
|
| 313 |
+
# Implements a hysteresis loop: Easy to stay "on", hard to turn "on".
|
| 314 |
+
# If unrest is high, gating increases. If unrest is low, gating decays.
|
| 315 |
+
decay_factor = 0.95
|
| 316 |
+
activation_threshold = 0.6 # Only spikes above this trigger new attention
|
| 317 |
+
|
| 318 |
+
new_gating = self.gating_field * decay_factor
|
| 319 |
+
# Add new attention where unrest is high
|
| 320 |
+
new_gating += 0.5 * (unrest_norm > activation_threshold).astype(float)
|
| 321 |
+
# Clip
|
| 322 |
+
self.gating_field = np.clip(new_gating, 0.01, 1.0) # Always keep a Pilot Wave pilot light (0.01)
|
| 323 |
+
|
| 324 |
+
gating_tensor = torch.tensor(self.gating_field, dtype=torch.float32)
|
| 325 |
+
|
| 326 |
+
# --- 2. PHASE RESONANCE TUNING (Exp 82) ---
|
| 327 |
+
# Filter external noise. Only resonant frequencies (harmonics of Fundamental) are allowed.
|
| 328 |
+
# Fundamental Freq: ω = 4π / N
|
| 329 |
+
res_freq = 4 * np.pi / N
|
| 330 |
+
|
| 331 |
+
# Create a "Comb Filter" mask for the noise
|
| 332 |
+
# We assume 'external_noise' is spatial noise. We check its conformity to resonance?
|
| 333 |
+
# Simpler: We modulate the injection based on the LOCAL PHASE aligning with the resonance?
|
| 334 |
+
# Resonance Condition: Phase ~ n * (2pi/k) ?
|
| 335 |
+
# Actually, let's just enforce that the DRIVER is resonant.
|
| 336 |
+
# We model resonance as a "Preferred Step Size".
|
| 337 |
+
|
| 338 |
+
if isinstance(external_noise, (int, float)):
|
| 339 |
+
noise_tensor = torch.tensor(external_noise).expand(N, N)
|
| 340 |
+
else:
|
| 341 |
+
if isinstance(external_noise, (list, np.ndarray)):
|
| 342 |
+
noise_tensor = torch.tensor(external_noise, dtype=torch.float32).view(N, N) if len(external_noise) == N*N else torch.tensor(external_noise[0]).expand(N, N)
|
| 343 |
+
else:
|
| 344 |
+
noise_tensor = external_noise
|
| 345 |
+
|
| 346 |
+
# Resonant Mask: 1.0 if phase is near n*pi/2, else 0.1
|
| 347 |
+
# This creates "Lock-in" points
|
| 348 |
+
phase_res_mask = torch.cos(self.phases * 4) # 4 distinct stable points per cycle?
|
| 349 |
+
# Map -1..1 to 0.1..1.0
|
| 350 |
+
phase_res_mask = (phase_res_mask + 1) / 2 # 0..1
|
| 351 |
+
phase_res_mask = phase_res_mask * 0.9 + 0.1 # 0.1 .. 1.0
|
| 352 |
+
|
| 353 |
+
resonant_noise = noise_tensor * phase_res_mask
|
| 354 |
+
|
| 355 |
+
# --- 3. TOPOLOGICAL IMPEDANCE ---
|
| 356 |
+
# Calculate current vorticity
|
| 357 |
+
current_vorticity = self.get_chern_number()
|
| 358 |
+
# Impedance increases with Complexity (Vorticity)
|
| 359 |
+
# Low Vorticity = Low Impedance (Fluid) -> High Alpha
|
| 360 |
+
# High Vorticity = High Impedance (Solid) -> Low Alpha (Hard to change)
|
| 361 |
+
|
| 362 |
+
# Base impedance
|
| 363 |
+
base_alpha = 0.1
|
| 364 |
+
# If vorticity is high (e.g. > 5), alpha drops
|
| 365 |
+
impedance_factor = 1.0 / (1.0 + 0.5 * abs(current_vorticity))
|
| 366 |
+
effective_alpha = base_alpha * impedance_factor
|
| 367 |
+
|
| 368 |
+
# --- 4. INTERACTION & UPDATE ---
|
| 369 |
+
# Text Braiding (User Intent)
|
| 370 |
+
interaction = gating_tensor * text_braid_field
|
| 371 |
+
|
| 372 |
+
# Wick Rotation (Time Driver)
|
| 373 |
+
wick_rotation = ntp_offset * 5.0
|
| 374 |
+
|
| 375 |
+
# Total Force
|
| 376 |
+
# Force = (User Intent + Resonant Noise + Time) - Diffusion
|
| 377 |
+
|
| 378 |
+
# Laplacian Diffusion (Entropy Export)
|
| 379 |
+
# This smoothes out high-frequency spatial noise (Entropy Export)
|
| 380 |
+
laplacian = (np.roll(p_np, 1, axis=0) + np.roll(p_np, -1, axis=0) +
|
| 381 |
+
np.roll(p_np, 1, axis=1) + np.roll(p_np, -1, axis=1) - 4*p_np)
|
| 382 |
+
diffusion = torch.tensor(laplacian, dtype=torch.float32)
|
| 383 |
+
|
| 384 |
+
# ENTROPY EXPORT:
|
| 385 |
+
# Stronger diffusion in "quiet" areas (Low Attention) to wipe slate clean
|
| 386 |
+
diffusion_rate = 0.05 + 0.1 * (1.0 - gating_tensor) # Higher diffusion where attention is low
|
| 387 |
+
|
| 388 |
+
force = (interaction * 1.5) + (resonant_noise * 0.5) + wick_rotation + (diffusion * diffusion_rate)
|
| 389 |
+
|
| 390 |
+
# UPDATE
|
| 391 |
+
self.phases = (self.phases + effective_alpha * force) % (2 * np.pi)
|
| 392 |
+
|
| 393 |
+
# --- 5. METRICS & CLEANUP ---
|
| 394 |
+
self.focus = float(np.mean(self.gating_field))
|
| 395 |
+
self.vorticity = self.get_chern_number()
|
| 396 |
+
|
| 397 |
+
variance = float(torch.var(self.phases))
|
| 398 |
+
self.surprisal = -np.log(max(variance, 1e-9) / (np.pi**2 + 1e-9))
|
| 399 |
+
|
| 400 |
+
# CI Recalculation
|
| 401 |
+
v_residue = abs(self.vorticity - round(self.vorticity))
|
| 402 |
+
topo_stability = np.exp(-v_residue * 5.0)
|
| 403 |
+
# CI rewards High Focus + High Stability + Non-Trivial Vorticity
|
| 404 |
+
self.causal_integrity = self.focus * (abs(self.vorticity) + 1.0) * topo_stability * 10
|
| 405 |
+
self.fidelity = self.focus
|
| 406 |
+
|
| 407 |
+
return self.get_metrics()
|
| 408 |
+
|
| 409 |
+
def get_metrics(self):
|
| 410 |
+
return {
|
| 411 |
+
'fidelity': self.fidelity,
|
| 412 |
+
'vorticity': self.vorticity,
|
| 413 |
+
'surprisal': self.surprisal,
|
| 414 |
+
'causal_integrity': self.causal_integrity
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
def get_maps(self):
|
| 418 |
+
"""Returns visual maps for frontend"""
|
| 419 |
+
return {
|
| 420 |
+
'gating': self.gating_field.tolist() if isinstance(self.gating_field, np.ndarray) else self.gating_field,
|
| 421 |
+
'vorticity': self.vorticity_field.tolist() if isinstance(self.vorticity_field, np.ndarray) else self.vorticity_field
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
def get_full_state(self):
|
| 425 |
+
"""Returns complete physics state for persistence"""
|
| 426 |
+
return {
|
| 427 |
+
'phases': self.phases.numpy().tolist(),
|
| 428 |
+
'gating': self.gating_field.tolist() if isinstance(self.gating_field, np.ndarray) else self.gating_field,
|
| 429 |
+
'vorticity': self.vorticity_field.tolist() if isinstance(self.vorticity_field, np.ndarray) else self.vorticity_field,
|
| 430 |
+
# We don't save text_braid_field as it's transient/regenerated
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
def restore_full_state(self, state_dict):
|
| 434 |
+
"""Restores physics state from dictionary"""
|
| 435 |
+
try:
|
| 436 |
+
if 'phases' in state_dict:
|
| 437 |
+
self.phases = torch.tensor(state_dict['phases'], dtype=torch.float32)
|
| 438 |
+
if 'gating' in state_dict:
|
| 439 |
+
self.gating_field = np.array(state_dict['gating'])
|
| 440 |
+
if 'vorticity' in state_dict:
|
| 441 |
+
self.vorticity_field = np.array(state_dict['vorticity'])
|
| 442 |
+
|
| 443 |
+
# Recalculate metrics to ensure consistency
|
| 444 |
+
self.vorticity = self.get_chern_number()
|
| 445 |
+
variance = float(torch.var(self.phases))
|
| 446 |
+
self.surprisal = -np.log(max(variance, 1e-9) / (np.pi**2 + 1e-9))
|
| 447 |
+
|
| 448 |
+
# Recalc CI
|
| 449 |
+
v_residue = abs(self.vorticity - round(self.vorticity))
|
| 450 |
+
topo_stability = np.exp(-v_residue * 5.0)
|
| 451 |
+
self.focus = float(np.mean(self.gating_field)) # approx
|
| 452 |
+
self.fidelity = self.focus
|
| 453 |
+
self.causal_integrity = self.focus * (abs(self.vorticity) + 1.0) * topo_stability * 10
|
| 454 |
+
|
| 455 |
+
print("Restored physics state successfully.")
|
| 456 |
+
except Exception as e:
|
| 457 |
+
print(f"Error restoring physics state: {e}")
|
| 458 |
+
|
| 459 |
+
# ==============================================================================
|
| 460 |
+
# NOISE MULTIPLEXER (Same as 1014b)
|
| 461 |
+
# ==============================================================================
|
| 462 |
+
class NoiseMultiplexer:
|
| 463 |
+
def __init__(self):
|
| 464 |
+
self.sources = {'ntp': {'enabled': NTP_AVAILABLE, 'data': deque(maxlen=256)}}
|
| 465 |
+
self.ntp_client = ntplib.NTPClient() if NTP_AVAILABLE else None
|
| 466 |
+
self.last_ntp_sync = 0
|
| 467 |
+
self.ntp_offset = 0.0
|
| 468 |
+
|
| 469 |
+
def get_blended_noise(self, size=64):
|
| 470 |
+
try:
|
| 471 |
+
with open('/dev/urandom', 'rb') as f:
|
| 472 |
+
data = f.read(size)
|
| 473 |
+
noise = np.frombuffer(data, dtype=np.uint8) / 255.0
|
| 474 |
+
if len(noise) < size:
|
| 475 |
+
noise = np.pad(noise, (0, size-len(noise)), 'wrap')
|
| 476 |
+
return noise
|
| 477 |
+
except:
|
| 478 |
+
return np.random.rand(size)
|
| 479 |
+
|
| 480 |
+
def get_source_stats(self):
|
| 481 |
+
# Nur versuchen, wenn Intervall abgelaufen
|
| 482 |
+
if self.ntp_client and time.time() - self.last_ntp_sync > 30:
|
| 483 |
+
try:
|
| 484 |
+
# VERSUCH (Blockiert max 1s)
|
| 485 |
+
resp = self.ntp_client.request('pool.ntp.org', version=3, timeout=1)
|
| 486 |
+
self.ntp_offset = resp.offset
|
| 487 |
+
except:
|
| 488 |
+
# FEHLER: Trotzdem Zeit aktualisieren, um sofortigen Retry im nächsten Frame zu verhindern!
|
| 489 |
+
# Sonst h��ngt das System in einer Endlos-Timeout-Schleife.
|
| 490 |
+
pass
|
| 491 |
+
finally:
|
| 492 |
+
# WICHTIG: Timer immer zurücksetzen
|
| 493 |
+
self.last_ntp_sync = time.time()
|
| 494 |
+
|
| 495 |
+
return {'ntp_offset': self.ntp_offset}
|
| 496 |
+
|
| 497 |
+
def stop(self): pass
|
| 498 |
+
|
| 499 |
+
# ==============================================================================
|
| 500 |
+
# SYNC LEARNER (Same as 1014b)
|
| 501 |
+
# ==============================================================================
|
| 502 |
+
class SynchronizationLearner:
|
| 503 |
+
def __init__(self, initial_state=None):
|
| 504 |
+
self.history = []
|
| 505 |
+
self.best_config = {'offset': 0.0, 'coupling': 1.0}
|
| 506 |
+
self.best_integrity = 0.0
|
| 507 |
+
if initial_state:
|
| 508 |
+
self.history = initial_state.get('history', [])
|
| 509 |
+
self.best_config = initial_state.get('best_config', self.best_config)
|
| 510 |
+
self.best_integrity = initial_state.get('best_integrity', 0.0)
|
| 511 |
+
|
| 512 |
+
self.theta, self.lam = sp.symbols('theta lambda', real=True)
|
| 513 |
+
self.coeffs = sp.symbols('c0:6', real=True)
|
| 514 |
+
self.model = (self.coeffs[0] + self.coeffs[1] * self.theta + self.coeffs[2] * self.lam +
|
| 515 |
+
self.coeffs[3] * self.theta**2 + self.coeffs[4] * self.lam**2 + self.coeffs[5] * self.theta * self.lam)
|
| 516 |
+
|
| 517 |
+
def record_trial(self, offset, coupling, integrity):
|
| 518 |
+
self.history.append((offset, coupling, integrity))
|
| 519 |
+
if integrity > self.best_integrity:
|
| 520 |
+
self.best_integrity = integrity
|
| 521 |
+
self.best_config = {'offset': offset, 'coupling': coupling}
|
| 522 |
+
|
| 523 |
+
def propose_next_config(self):
|
| 524 |
+
if len(self.history) < 10:
|
| 525 |
+
import random
|
| 526 |
+
return {'offset': self.best_config['offset'] + random.uniform(-0.1, 0.1),
|
| 527 |
+
'coupling': np.clip(self.best_config['coupling'] + random.uniform(-0.2, 0.2), 0.1, 2.0)}
|
| 528 |
+
try:
|
| 529 |
+
pts = np.array(self.history)[-50:]
|
| 530 |
+
if len(pts) < 6: return self.best_config
|
| 531 |
+
X_val, Y_val, J_val = pts[:, 0], pts[:, 1], pts[:, 2]
|
| 532 |
+
A = np.column_stack([np.ones_like(X_val), X_val, Y_val, X_val**2, Y_val**2, X_val*Y_val])
|
| 533 |
+
c_vals, _, _, _ = np.linalg.lstsq(A, J_val, rcond=None)
|
| 534 |
+
J_local = self.model.subs(zip(self.coeffs, c_vals))
|
| 535 |
+
grad_theta = sp.diff(J_local, self.theta)
|
| 536 |
+
grad_lam = sp.diff(J_local, self.lam)
|
| 537 |
+
sol = sp.solve([grad_theta, grad_lam], (self.theta, self.lam))
|
| 538 |
+
if sol and isinstance(sol, dict):
|
| 539 |
+
new_off = float(sol[self.theta])
|
| 540 |
+
new_coup = float(sol[self.lam])
|
| 541 |
+
return {'offset': np.clip(new_off, -1.0, 1.0), 'coupling': np.clip(new_coup, 0.1, 3.0)}
|
| 542 |
+
except: pass
|
| 543 |
+
return {'offset': self.best_config['offset'] + np.random.normal(0, 0.05),
|
| 544 |
+
'coupling': np.clip(self.best_config['coupling'] + np.random.normal(0, 0.1), 0.1, 2.0)}
|
| 545 |
+
|
| 546 |
+
def get_state(self):
|
| 547 |
+
return {'history': self.history, 'best_config': self.best_config, 'best_integrity': self.best_integrity}
|
| 548 |
+
|
| 549 |
+
# ==============================================================================
|
| 550 |
+
# ADAPTIVE COMMUNICATOR AGENT
|
| 551 |
+
# ==============================================================================
|
| 552 |
+
class AdaptiveLoggingCommunicator:
|
| 553 |
+
def __init__(self, adaptive_decoder, holographic_comm, vocab_learner, session_manager):
|
| 554 |
+
self.decoder = adaptive_decoder
|
| 555 |
+
self.holographic_comm = holographic_comm
|
| 556 |
+
self.vocab_learner = vocab_learner
|
| 557 |
+
self.session_manager = session_manager
|
| 558 |
+
self.messages = deque(maxlen=50)
|
| 559 |
+
self.last_text_unitary = 0.0
|
| 560 |
+
|
| 561 |
+
def process_message(self, text, noise, learner=None):
|
| 562 |
+
timestamp = datetime.now().strftime('%H:%M:%S')
|
| 563 |
+
self.messages.append({'type': 'user', 'time': timestamp, 'text': text})
|
| 564 |
+
|
| 565 |
+
# 1. Learn
|
| 566 |
+
self.vocab_learner.learn_from_input(text)
|
| 567 |
+
with open(self.session_manager.log_file, 'a') as f:
|
| 568 |
+
f.write(f"[{timestamp}] USER: {text}\n")
|
| 569 |
+
|
| 570 |
+
# 2. Encode (Topological Imprinting)
|
| 571 |
+
if self.holographic_comm:
|
| 572 |
+
# Result is now a Braid Field (tensor), not just a scalar/wave
|
| 573 |
+
self.last_text_unitary = self.holographic_comm.encode_text(text)
|
| 574 |
+
|
| 575 |
+
# 3. Decode
|
| 576 |
+
result = self.decoder.decode_cycle(noise, verbose=False)
|
| 577 |
+
response_words = self.decoder.info_to_message(result['new_info'])
|
| 578 |
+
interpretation = result['interpretation']
|
| 579 |
+
|
| 580 |
+
response = f"{interpretation} {response_words}"
|
| 581 |
+
|
| 582 |
+
# Capture metrics for this step
|
| 583 |
+
metrics = self.holographic_comm.get_metrics()
|
| 584 |
+
current_ci = metrics.get('causal_integrity', 0.0)
|
| 585 |
+
current_churn = metrics.get('vorticity', 0.0)
|
| 586 |
+
|
| 587 |
+
# Append messages WITH metrics
|
| 588 |
+
self.messages.append({'type': 'system', 'time': timestamp, 'text': response, 'ci': current_ci, 'churn': current_churn})
|
| 589 |
+
|
| 590 |
+
# Also retroactively tag the user message with the state at time of processing?
|
| 591 |
+
# Ideally user message caused the state change, so yes.
|
| 592 |
+
if self.messages and self.messages[-2]['type'] == 'user':
|
| 593 |
+
self.messages[-2]['ci'] = current_ci
|
| 594 |
+
self.messages[-2]['churn'] = current_churn
|
| 595 |
+
|
| 596 |
+
with open(self.session_manager.log_file, 'a') as f:
|
| 597 |
+
f.write(f"[{timestamp}] SYSTEM: {response}\n")
|
| 598 |
+
f.write(f" [METRICS] CI:{current_ci:.4f} CHERN:{current_churn:.4f}\n\n")
|
| 599 |
+
|
| 600 |
+
return response
|
| 601 |
+
|
| 602 |
+
# ==============================================================================
|
| 603 |
+
# TERMINAL UI
|
| 604 |
+
# ==============================================================================
|
| 605 |
+
class TerminalInterface:
|
| 606 |
+
def __init__(self, stdscr, session_manager, vocab_learner, learner):
|
| 607 |
+
self.stdscr = stdscr
|
| 608 |
+
self.running = True
|
| 609 |
+
self.paused = False
|
| 610 |
+
|
| 611 |
+
curses.start_color()
|
| 612 |
+
curses.use_default_colors()
|
| 613 |
+
for i, c in enumerate([curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_YELLOW,
|
| 614 |
+
curses.COLOR_RED, curses.COLOR_MAGENTA, curses.COLOR_WHITE], 1):
|
| 615 |
+
curses.init_pair(i, c, -1)
|
| 616 |
+
|
| 617 |
+
self.session_manager = session_manager
|
| 618 |
+
self.vocab_learner = vocab_learner
|
| 619 |
+
self.learner = learner
|
| 620 |
+
|
| 621 |
+
self.decoder = SemanticAdaptiveDecoder(self.vocab_learner)
|
| 622 |
+
self.noise_multiplexer = NoiseMultiplexer()
|
| 623 |
+
self.holographic_comm = SciMindCommunicator(N=40)
|
| 624 |
+
self.text_comm = AdaptiveLoggingCommunicator(self.decoder, self.holographic_comm, self.vocab_learner, self.session_manager)
|
| 625 |
+
|
| 626 |
+
self.sync_config = self.learner.best_config
|
| 627 |
+
self.metrics = {}
|
| 628 |
+
self.ntp_status = "Init"
|
| 629 |
+
self.input_buffer = ""
|
| 630 |
+
self.physics_lock = threading.Lock()
|
| 631 |
+
|
| 632 |
+
def physics_loop(self):
|
| 633 |
+
while self.running:
|
| 634 |
+
if not self.paused:
|
| 635 |
+
with self.physics_lock:
|
| 636 |
+
noise = self.noise_multiplexer.get_blended_noise(size=self.holographic_comm.size**2)
|
| 637 |
+
text_u = self.text_comm.last_text_unitary
|
| 638 |
+
# Decay the braid field effect (Elastic snapback) or keep it?
|
| 639 |
+
# SciMind says "Imprinting" -> should persist?
|
| 640 |
+
# But for now, let's zero it after a while or decay it.
|
| 641 |
+
# We will zero it in step (handled by coupling logic?)
|
| 642 |
+
# No, we must pass it.
|
| 643 |
+
# Let's decay the stored field in `text_comm`?
|
| 644 |
+
|
| 645 |
+
stats = self.noise_multiplexer.get_source_stats()
|
| 646 |
+
base_ntp = stats.get('ntp_offset', 0.0)
|
| 647 |
+
coup = self.sync_config['coupling']
|
| 648 |
+
off = self.sync_config['offset']
|
| 649 |
+
|
| 650 |
+
# Effective NTP impact on rotation
|
| 651 |
+
# Total offset = base + learned_offset
|
| 652 |
+
total_offset = base_ntp + off
|
| 653 |
+
|
| 654 |
+
self.ntp_status = f"NTP: {base_ntp:+.4f}s | OFF: {off:+.3f} | CPL: {coup:.2f}"
|
| 655 |
+
|
| 656 |
+
self.holographic_comm.step(noise, text_u * coup, ntp_offset=total_offset)
|
| 657 |
+
self.metrics = self.holographic_comm.get_metrics()
|
| 658 |
+
|
| 659 |
+
# Decay manual text input signal
|
| 660 |
+
if isinstance(self.text_comm.last_text_unitary, torch.Tensor):
|
| 661 |
+
self.text_comm.last_text_unitary *= 0.9
|
| 662 |
+
|
| 663 |
+
time.sleep(0.03)
|
| 664 |
+
|
| 665 |
+
def run(self):
|
| 666 |
+
t = threading.Thread(target=self.physics_loop, daemon=True)
|
| 667 |
+
t.start()
|
| 668 |
+
self.stdscr.nodelay(True)
|
| 669 |
+
self.stdscr.timeout(50)
|
| 670 |
+
|
| 671 |
+
try:
|
| 672 |
+
while self.running:
|
| 673 |
+
self.update_ui()
|
| 674 |
+
self.handle_input()
|
| 675 |
+
except KeyboardInterrupt:
|
| 676 |
+
self.running = False
|
| 677 |
+
finally:
|
| 678 |
+
self.running = False
|
| 679 |
+
self.stdscr.addstr(0, 0, " SAVING SESSION & EXITING... ", curses.color_pair(1))
|
| 680 |
+
self.stdscr.refresh()
|
| 681 |
+
self.session_manager.save_global_state(self.vocab_learner.get_state(), self.learner.get_state())
|
| 682 |
+
t.join(timeout=1.0)
|
| 683 |
+
|
| 684 |
+
def update_ui(self):
|
| 685 |
+
try:
|
| 686 |
+
self.stdscr.erase()
|
| 687 |
+
h, w = self.stdscr.getmaxyx()
|
| 688 |
+
if h < 20 or w < 60: return
|
| 689 |
+
|
| 690 |
+
sid = self.session_manager.session_id
|
| 691 |
+
self.stdscr.addstr(0, 0, f" SCIMIND 2.0 (1014e) | SESS: {sid} | {self.ntp_status} ", curses.color_pair(1) | curses.A_REVERSE)
|
| 692 |
+
|
| 693 |
+
with self.physics_lock:
|
| 694 |
+
ci = self.metrics.get('causal_integrity', 0.0)
|
| 695 |
+
churn = self.metrics.get('vorticity', 0.0)
|
| 696 |
+
best = self.learner.best_integrity
|
| 697 |
+
vocab_size = self.vocab_learner.total_words
|
| 698 |
+
coh = self.decoder.last_coherence
|
| 699 |
+
|
| 700 |
+
# Highlight Chern Integer
|
| 701 |
+
chern_color = curses.color_pair(2) if abs(churn - round(churn)) < 0.1 else curses.color_pair(3)
|
| 702 |
+
|
| 703 |
+
self.stdscr.addstr(2, 2, f"INT: {ci:6.3f} (Best: {best:.1f})", curses.color_pair(2 if ci > 5 else 3))
|
| 704 |
+
self.stdscr.addstr(2, 30, f"CHERN NO: {churn:+.3f}", chern_color)
|
| 705 |
+
self.stdscr.addstr(2, 50, f"VOCAB: {vocab_size}", curses.color_pair(5))
|
| 706 |
+
|
| 707 |
+
bar = "#" * int(min(ci, 20))
|
| 708 |
+
self.stdscr.addstr(3, 2, f"FIELD: [{bar:<20}]", curses.color_pair(1))
|
| 709 |
+
|
| 710 |
+
msgs = list(self.text_comm.messages)[-10:]
|
| 711 |
+
y = 5
|
| 712 |
+
for m in msgs:
|
| 713 |
+
pre = ">> " if m['type'] == 'user' else "SYS: "
|
| 714 |
+
col = curses.color_pair(6) if m['type'] == 'user' else curses.color_pair(3)
|
| 715 |
+
text = f"{pre}{m['text']}"[:w-4]
|
| 716 |
+
self.stdscr.addstr(y, 2, text, col)
|
| 717 |
+
y += 1
|
| 718 |
+
|
| 719 |
+
self.stdscr.addstr(h-2, 2, f"> {self.input_buffer}", curses.color_pair(2))
|
| 720 |
+
self.stdscr.refresh()
|
| 721 |
+
except curses.error: pass
|
| 722 |
+
|
| 723 |
+
def handle_input(self):
|
| 724 |
+
try:
|
| 725 |
+
c = self.stdscr.getch()
|
| 726 |
+
if c == -1: return
|
| 727 |
+
if c == 10:
|
| 728 |
+
if self.input_buffer:
|
| 729 |
+
with self.physics_lock:
|
| 730 |
+
noise = self.noise_multiplexer.get_blended_noise(size=64)
|
| 731 |
+
self.text_comm.process_message(self.input_buffer, noise)
|
| 732 |
+
|
| 733 |
+
metrics = self.holographic_comm.get_metrics()
|
| 734 |
+
self.learner.record_trial(self.sync_config['offset'], self.sync_config['coupling'], metrics['causal_integrity'])
|
| 735 |
+
self.sync_config = self.learner.propose_next_config()
|
| 736 |
+
# AUTO SAVE
|
| 737 |
+
self.session_manager.save_global_state(self.vocab_learner.get_state(), self.learner.get_state(), self.text_comm.messages)
|
| 738 |
+
|
| 739 |
+
self.input_buffer = ""
|
| 740 |
+
elif c == 27: self.running = False
|
| 741 |
+
elif c in (127, curses.KEY_BACKSPACE): self.input_buffer = self.input_buffer[:-1]
|
| 742 |
+
elif 32 <= c <= 3000: self.input_buffer += chr(c)
|
| 743 |
+
except: pass
|
| 744 |
+
|
| 745 |
+
def startup_menu(stdscr):
|
| 746 |
+
curses.echo()
|
| 747 |
+
try:
|
| 748 |
+
curses.start_color()
|
| 749 |
+
curses.use_default_colors()
|
| 750 |
+
except: pass
|
| 751 |
+
try: curses.init_pair(1, curses.COLOR_CYAN, -1)
|
| 752 |
+
except: curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
|
| 753 |
+
|
| 754 |
+
mgr = SessionManager()
|
| 755 |
+
sessions = mgr.list_sessions()
|
| 756 |
+
|
| 757 |
+
stdscr.clear()
|
| 758 |
+
stdscr.addstr(2, 2, "SCIMIND 2.0 (1014e) - Topological Communicator", curses.color_pair(1) | curses.A_BOLD)
|
| 759 |
+
stdscr.addstr(4, 2, "Select Session to Load:")
|
| 760 |
+
stdscr.addstr(5, 4, "[0] Start NEW Session")
|
| 761 |
+
|
| 762 |
+
for i, s in enumerate(sessions[:9]):
|
| 763 |
+
stdscr.addstr(6+i, 4, f"[{i+1}] {s['label']} (ID: {s['id']})")
|
| 764 |
+
|
| 765 |
+
stdscr.addstr(16, 2, "Choice: ")
|
| 766 |
+
stdscr.refresh()
|
| 767 |
+
|
| 768 |
+
try:
|
| 769 |
+
choice = stdscr.getstr(16, 10).decode('utf-8')
|
| 770 |
+
choice = int(choice)
|
| 771 |
+
except: choice = 0
|
| 772 |
+
|
| 773 |
+
initial_state = {}
|
| 774 |
+
if choice > 0 and choice <= len(sessions):
|
| 775 |
+
initial_state = mgr.load_session(sessions[choice-1]['path'])
|
| 776 |
+
else:
|
| 777 |
+
mgr.start_new_session()
|
| 778 |
+
|
| 779 |
+
vocab_state = initial_state.get('vocab', {})
|
| 780 |
+
sync_state = initial_state.get('sync', {})
|
| 781 |
+
history = initial_state.get('history', [])
|
| 782 |
+
|
| 783 |
+
vocab_learner = VocabularyLearner(vocab_state)
|
| 784 |
+
learner = SynchronizationLearner(sync_state)
|
| 785 |
+
|
| 786 |
+
return mgr, vocab_learner, learner, history
|
| 787 |
+
|
| 788 |
+
def main(stdscr):
|
| 789 |
+
os.environ.setdefault('ESCDELAY', '25')
|
| 790 |
+
curses.curs_set(0)
|
| 791 |
+
mgr, vocab, learner, history = startup_menu(stdscr)
|
| 792 |
+
interface = TerminalInterface(stdscr, mgr, vocab, learner)
|
| 793 |
+
if history:
|
| 794 |
+
interface.text_comm.messages.extend(history)
|
| 795 |
+
interface.run()
|
| 796 |
+
|
| 797 |
+
if __name__ == "__main__":
|
| 798 |
+
try:
|
| 799 |
+
curses.wrapper(main)
|
| 800 |
+
except KeyboardInterrupt:
|
| 801 |
+
print("\nExited via KeyboardInterrupt.")
|