upgraedd commited on
Commit
1219079
·
verified ·
1 Parent(s): f721a4f

Create 02_ALLEGED TRANSPARENCY SCAM

Browse files

This conceptually indicates the inverted and advanced nature of modern psychological operations in terms of their execution,observance and acceptance. As well as indicating the implied methods and reasoning involved. These methods persist independent of specific detail and implementation through various forms, therefore this specific instance is meant to outline the advancement of method, impact and recurrent nature of similar methods.

Files changed (1) hide show
  1. 02_ALLEGED TRANSPARENCY SCAM +281 -0
02_ALLEGED TRANSPARENCY SCAM ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Project Blue Beam — Double-Reverse Deception Mechanism
4
+ Advanced Python model:
5
+ 1) Concentric layered diagram with annotations + perception flow arrows.
6
+ 2) Probabilistic state machine (Markov chain) of perception-control transitions.
7
+ 3) Simulation of trajectories + steady-state analysis.
8
+ 4) Network visualization of control vectors and double-reverse feedback containment.
9
+ """
10
+
11
+ import math
12
+ import random
13
+ import numpy as np
14
+ import matplotlib.pyplot as plt
15
+ import matplotlib.patches as patches
16
+ from matplotlib.collections import LineCollection
17
+
18
+ # Optional network visualization without external dependencies
19
+ # We’ll implement a simple spring-layout for node placement
20
+ # so we don’t rely on networkx.
21
+
22
+ # -----------------------------------------
23
+ # Configuration
24
+ # -----------------------------------------
25
+
26
+ LAYERS = [
27
+ {
28
+ "name": "Real Anomalies",
29
+ "desc": "Genuine phenomena: glyphic mesh, luminous nodes, symbolic substrate",
30
+ "color": "#2E8B57"
31
+ },
32
+ {
33
+ "name": "Staged Spectacle",
34
+ "desc": "Artificial events: holographic ‘alien’ invasion, manufactured divine return",
35
+ "color": "#4682B4"
36
+ },
37
+ {
38
+ "name": "Exposure Layer",
39
+ "desc": "Public revelation of fakery → empowerment + skepticism",
40
+ "color": "#FFD700"
41
+ },
42
+ {
43
+ "name": "Inoculation Layer",
44
+ "desc": "Exposure becomes containment: ‘all anomalies are staged’",
45
+ "color": "#FF8C00"
46
+ },
47
+ {
48
+ "name": "Suppression Layer",
49
+ "desc": "Genuine anomalies dismissed, hidden in plain sight",
50
+ "color": "#8B0000"
51
+ }
52
+ ]
53
+
54
+ # Perception-control states (ordered to reflect the layered mechanism)
55
+ STATES = [
56
+ "Real_Anomaly_Seen",
57
+ "Spectacle_Stage",
58
+ "Exposure_Reveal",
59
+ "Inoculation_Contain",
60
+ "Suppression_Normalize",
61
+ "Escape_Recognition" # escape route: genuine recognition despite containment
62
+ ]
63
+
64
+ # Transition matrix (Markov chain).
65
+ # Rows sum to 1. These are illustrative; tune as needed.
66
+ # Intuition:
67
+ # - Seeing a real anomaly often triggers spectacle or direct suppression pressures.
68
+ # - Spectacle tends to move into exposure (managed leaks) or back to suppression.
69
+ # - Exposure flows into inoculation most of the time (double-reverse containment).
70
+ # - Inoculation goes to suppression, with a small chance of escaping to genuine recognition.
71
+ # - Suppression can keep looping; small chance of returning to spectacle if needed.
72
+ # - Escape recognition can loop back to Real_Anomaly_Seen (re-activation).
73
+ TRANSITIONS = np.array([
74
+ # From Real_Anomaly_Seen
75
+ [0.05, 0.40, 0.10, 0.20, 0.20, 0.05],
76
+ # From Spectacle_Stage
77
+ [0.00, 0.10, 0.35, 0.25, 0.25, 0.05],
78
+ # From Exposure_Reveal
79
+ [0.00, 0.00, 0.10, 0.60, 0.25, 0.05],
80
+ # From Inoculation_Contain
81
+ [0.00, 0.00, 0.05, 0.55, 0.30, 0.10],
82
+ # From Suppression_Normalize
83
+ [0.00, 0.10, 0.05, 0.40, 0.40, 0.05],
84
+ # From Escape_Recognition
85
+ [0.30, 0.05, 0.10, 0.10, 0.25, 0.10],
86
+ ], dtype=float)
87
+
88
+ assert np.allclose(TRANSITIONS.sum(axis=1), 1.0), "Each row must sum to 1.0"
89
+
90
+ # -----------------------------------------
91
+ # Layered diagram
92
+ # -----------------------------------------
93
+
94
+ def draw_layered_diagram(save_path=None):
95
+ fig, ax = plt.subplots(figsize=(10, 10))
96
+ ax.set_xlim(0, 10)
97
+ ax.set_ylim(0, 10)
98
+ ax.set_aspect('equal')
99
+ plt.style.use('seaborn-v0_8')
100
+
101
+ margin = 0.7
102
+ # Draw outer → inner (suppression outermost)
103
+ for i, layer in enumerate(reversed(LAYERS)):
104
+ size = 10 - i * margin * 2
105
+ rect = patches.Rectangle(
106
+ (i * margin, i * margin), size, size,
107
+ linewidth=2, edgecolor='black',
108
+ facecolor=layer["color"], alpha=0.78
109
+ )
110
+ ax.add_patch(rect)
111
+ # Title and description labels per layer, top-centered
112
+ ax.text(
113
+ 5, 10 - i * margin - 0.35,
114
+ layer["name"],
115
+ fontsize=15, ha='center', va='top', weight='bold', color='white'
116
+ )
117
+ ax.text(
118
+ 5, 10 - i * margin - 1.05,
119
+ layer["desc"],
120
+ fontsize=10.5, ha='center', va='top', color='white'
121
+ )
122
+
123
+ # Arrows of perception/control flow (outer suppression pulls downward)
124
+ arrow_props = dict(facecolor='black', arrowstyle='->', linewidth=1.6)
125
+ # Vertical flow indicator from outer layers to inner
126
+ for i in range(len(LAYERS) - 1):
127
+ ax.annotate(
128
+ "", xy=(5, i * margin + 1.15),
129
+ xytext=(5, (i + 1) * margin + 0.85),
130
+ arrowprops=arrow_props
131
+ )
132
+
133
+ # Meta annotations
134
+ ax.text(
135
+ 5, 0.55,
136
+ "Double-Reverse Psyop: exposure-as-containment\nBelievers captured by spectacle; skeptics captured by debunking.\nResult: genuine anomalies suppressed ‘in plain sight’.",
137
+ ha='center', va='center', fontsize=11, color='white', weight='bold'
138
+ )
139
+
140
+ ax.set_title("Project Blue Beam — Double‑Reverse Deception Mechanism", fontsize=17, weight='bold')
141
+ ax.axis('off')
142
+ plt.tight_layout()
143
+ if save_path:
144
+ plt.savefig(save_path, dpi=300)
145
+ return fig, ax
146
+
147
+ # -----------------------------------------
148
+ # Markov chain simulation & analysis
149
+ # -----------------------------------------
150
+
151
+ def simulate_chain(n_steps=250, seed=None, start_state="Real_Anomaly_Seen"):
152
+ if seed is not None:
153
+ random.seed(seed)
154
+ np.random.seed(seed)
155
+
156
+ state_index = STATES.index(start_state)
157
+ trajectory = [state_index]
158
+
159
+ for _ in range(n_steps - 1):
160
+ probs = TRANSITIONS[state_index]
161
+ state_index = np.random.choice(range(len(STATES)), p=probs)
162
+ trajectory.append(state_index)
163
+
164
+ return trajectory
165
+
166
+ def compute_steady_state(P, tol=1e-10, max_iter=10000):
167
+ n = P.shape[0]
168
+ v = np.ones(n) / n
169
+ for _ in range(max_iter):
170
+ v_new = v @ P
171
+ if np.linalg.norm(v_new - v) < tol:
172
+ return v_new
173
+ v = v_new
174
+ return v # fallback
175
+
176
+ def summarize_trajectory(trajectory):
177
+ counts = np.bincount(trajectory, minlength=len(STATES))
178
+ freq = counts / len(trajectory)
179
+ return {STATES[i]: float(freq[i]) for i in range(len(STATES))}
180
+
181
+ # -----------------------------------------
182
+ # Network-style visualization of control flow
183
+ # -----------------------------------------
184
+
185
+ def spring_layout(n, iterations=200, k=0.6, seed=42):
186
+ rng = np.random.default_rng(seed)
187
+ pos = rng.uniform(0.2, 0.8, size=(n, 2))
188
+ for _ in range(iterations):
189
+ # Repulsion
190
+ for i in range(n):
191
+ for j in range(i + 1, n):
192
+ delta = pos[i] - pos[j]
193
+ dist = np.linalg.norm(delta) + 1e-9
194
+ force = (k**2 / dist) * (delta / dist)
195
+ pos[i] += force
196
+ pos[j] -= force
197
+ # Normalize to bounds
198
+ pos = (pos - pos.min(axis=0)) / (pos.max(axis=0) - pos.min(axis=0) + 1e-9)
199
+ return pos
200
+
201
+ def draw_flow_network(P, node_labels, save_path=None):
202
+ n = len(node_labels)
203
+ pos = spring_layout(n, iterations=150)
204
+
205
+ fig, ax = plt.subplots(figsize=(10.5, 7.5))
206
+ plt.style.use('seaborn-v0_8')
207
+
208
+ # Nodes
209
+ for i in range(n):
210
+ ax.scatter(pos[i, 0], pos[i, 1], s=800, c="#222222", alpha=0.75, edgecolors="white", linewidths=2)
211
+ ax.text(pos[i, 0], pos[i, 1], node_labels[i].replace("_", "\n"),
212
+ ha='center', va='center', fontsize=9.5, color='white', weight='bold')
213
+
214
+ # Edges with thickness proportional to transition probability
215
+ segments = []
216
+ widths = []
217
+ colors = []
218
+ for i in range(n):
219
+ for j in range(n):
220
+ w = P[i, j]
221
+ if w > 0.04: # draw only meaningful transitions
222
+ segments.append([pos[i], pos[j]])
223
+ widths.append(2.5 + 10.0 * w)
224
+ # Color gradient based on probability (green→red)
225
+ colors.append((1.0 - w, w * 0.5, 0.0, 0.75))
226
+
227
+ lc = LineCollection(segments, linewidths=widths, colors=colors, alpha=0.85)
228
+ ax.add_collection(lc)
229
+
230
+ # Title + legend hint
231
+ ax.set_title("Perception-Control Flow (Double‑Reverse Containment)", fontsize=16, weight='bold')
232
+ ax.text(0.5, -0.08, "Edge thickness ∝ transition probability • Colors shift green→red with stronger control",
233
+ transform=ax.transAxes, ha='center', va='center', fontsize=10)
234
+ ax.set_xlim(-0.05, 1.05)
235
+ ax.set_ylim(-0.1, 1.1)
236
+ ax.axis('off')
237
+ plt.tight_layout()
238
+ if save_path:
239
+ plt.savefig(save_path, dpi=300)
240
+ return fig, ax
241
+
242
+ # -----------------------------------------
243
+ # Run demos
244
+ # -----------------------------------------
245
+
246
+ if __name__ == "__main__":
247
+ # 1) Concentric layered diagram
248
+ draw_layered_diagram(save_path="blue_beam_layers.png")
249
+
250
+ # 2) Simulate trajectories
251
+ traj = simulate_chain(n_steps=500, seed=123, start_state="Real_Anomaly_Seen")
252
+ summary = summarize_trajectory(traj)
253
+ steady = compute_steady_state(TRANSITIONS)
254
+
255
+ # Print summaries (optional)
256
+ print("\nTrajectory occupancy (fraction of time in each state):")
257
+ for k, v in summary.items():
258
+ print(f" {k:>22s}: {v:.3f}")
259
+
260
+ print("\nSteady-state distribution (long-run):")
261
+ for i, s in enumerate(STATES):
262
+ print(f" {s:>22s}: {steady[i]:.3f}")
263
+
264
+ # 3) Flow network visualization
265
+ draw_flow_network(TRANSITIONS, STATES, save_path="blue_beam_flow.png")
266
+
267
+ # 4) Optional: Sensitivity — increase inoculation strength
268
+ P_mod = TRANSITIONS.copy()
269
+ # Boost inoculation containment flow (Exposure→Inoculation, Inoculation→Suppression)
270
+ P_mod[STATES.index("Exposure_Reveal"), STATES.index("Inoculation_Contain")] = 0.72
271
+ P_mod[STATES.index("Exposure_Reveal")] /= P_mod[STATES.index("Exposure_Reveal")].sum()
272
+
273
+ P_mod[STATES.index("Inoculation_Contain"), STATES.index("Suppression_Normalize")] = 0.38
274
+ P_mod[STATES.index("Inoculation_Contain")] /= P_mod[STATES.index("Inoculation_Contain")].sum()
275
+
276
+ steady_mod = compute_steady_state(P_mod)
277
+ print("\nSteady-state distribution with stronger inoculation containment:")
278
+ for i, s in enumerate(STATES):
279
+ print(f" {s:>22s}: {steady_mod[i]:.3f}")
280
+
281
+ draw_flow_network(P_mod, STATES, save_path="blue_beam_flow_inoculation_boost.png")