Consciousness Descent
="""
consciousness_descent_v3.py
===========================
A 60-second structured AV descent. Version 2.
CHANGES FROM V1:
- Smoother transitions: 3-second crossfade zones with weighted blending
- Coherent soundtrack: drone root note (A=110Hz) persists across all 5 movements,
each section harmonically related. No more jarring cuts.
- YTP chaos layer: random esoteric text overlays, glitch entity faces,
forbidden geometry flickers, YTP-style frame artifacts
- Esoteric references: Kabbalistic tree, 8-circuit model, Bardo Thodol,
Terence McKenna timewave, Jungian shadow, Gurdjieff enneagram,
Ouspensky fourth way, Crowley Aeon, ayahuasca icaros, pineal DMT,
Berghain Regel 1, samsara wheel, Indra's net
5 Movements:
I. The Veil (0-12s) Simulation recognised. Heartbeat drone.
II. Ego Death (12-28s) Industrial fracture. Identity dissolves.
III. Bardo (28-40s) Between worlds. No tempo. Pure geometry.
IV. The Return (40-52s) Icaros bloom. Compassion frequency.
V. Recognise (52-60s) You were always here. Grid remembers.
Output: consciousness_descent_v3.mp4
"""
import numpy as np
from scipy.io import wavfile
import subprocess, os, sys, math, random
import cv2
SR = 44100
DURATION = 60
N_AUDIO = SR * DURATION
FPS = 30
N_FRAMES = FPS * DURATION
W, H = 1280, 720
INT16_MAX = 32767
SEED = 23
random.seed(SEED)
np.random.seed(SEED)
# Movement boundaries (seconds)
M1_S, M1_E = 0, 12
M2_S, M2_E = 12, 28
M3_S, M3_E = 28, 40
M4_S, M4_E = 40, 52
M5_S, M5_E = 52, 60
# Crossfade overlap (seconds each side of boundary)
XF = 2.5
def tn(t, s, e):
"""Normalised position within section [0,1]."""
return float(np.clip((t - s) / max(e - s, 0.001), 0.0, 1.0))
def xfade_in(t, s):
"""Fade-in: starts XF seconds BEFORE boundary s, peaks AT s.
Ensures no black frames: a section is already partially visible
before it officially starts."""
return float(np.clip((t - s + XF) / XF, 0.0, 1.0)) ** 0.5
def xfade_out(t, e):
"""Fade-out: starts AT boundary e, finishes XF seconds AFTER.
Ensures no black frames: outgoing section overlaps incoming."""
return float(np.clip((e + XF - t) / XF, 0.0, 1.0)) ** 0.5
def blend(a, b, w):
"""Blend two uint8 images: w=0 -> a, w=1 -> b."""
w = float(np.clip(w, 0, 1))
return cv2.addWeighted(a, 1.0 - w, b, w, 0)
# ============================================================================
# AUDIO ENGINE
# ============================================================================
def sine(freq, dur, amp=1.0, phase=0.0):
t = np.linspace(0, dur, int(SR * dur), endpoint=False)
return amp * np.sin(2 * np.pi * freq * t + phase)
def saw(freq, dur, amp=1.0):
t = np.linspace(0, dur, int(SR * dur), endpoint=False)
return amp * (2.0 * ((t * freq) % 1.0) - 1.0)
def sqr(freq, dur, amp=1.0, duty=0.5):
t = np.linspace(0, dur, int(SR * dur), endpoint=False)
return amp * np.where((t * freq) % 1.0 < duty, 1.0, -1.0).astype(np.float64)
def wnoise(n, amp=1.0):
return amp * (np.random.random(n) * 2.0 - 1.0)
def pnoise(n, amp=1.0):
w = np.fft.rfft(np.random.randn(n))
f = np.fft.rfftfreq(n); f[0] = 1e-9
p = np.fft.irfft(w / np.sqrt(f), n=n)
return amp * p[:n] / (np.max(np.abs(p)) + 1e-9)
def env_adsr(sig, a=0.01, d=0.05, s=0.7, r=0.08):
n = len(sig)
ai = max(1, int(a * n)); di = max(1, int(d * n)); ri = max(1, int(r * n))
si = max(0, n - ai - di - ri)
e = np.concatenate([np.linspace(0, 1, ai), np.linspace(1, s, di),
np.full(si, s), np.linspace(s, 0, ri)])
return sig * e[:n]
def place(buf, sig, start):
end = min(start + len(sig), len(buf))
buf[start:end] += sig[:end - start]
def place_st(L, R, sl, sr, start):
end = min(start + len(sl), len(L))
L[start:end] += sl[:end - start]
R[start:end] += sr[:end - start]
def pan(sig, p):
gl = np.sqrt(np.clip((1 - p) / 2, 0, 1))
gr = np.sqrt(np.clip((1 + p) / 2, 0, 1))
return sig * gl, sig * gr
def crush(sig, bits=8):
lvl = 2 ** max(1, bits - 1)
return np.round(sig * lvl) / lvl
def lp_filter(sig, k=30):
"""Crude low-pass via moving average."""
return np.convolve(sig, np.ones(k) / k, mode='same')
def reverb_simple(sig, delay_ms=80, fb=0.4, mix=0.3):
"""Simple feedback delay reverb."""
delay_n = int(SR * delay_ms / 1000)
out = sig.copy()
buf = np.zeros(delay_n)
for i in range(len(out)):
idx = i % delay_n
d = buf[idx]
out[i] += d * mix
buf[idx] = sig[i] + d * fb
return out
def synthesize_audio():
"""
Single-pass coherent soundtrack. Every layer is a continuous
function of t (0..60s). No hard section cuts anywhere.
Architecture:
- All layers built as full-length arrays, shaped by smooth
automation envelopes. Mix = weighted sum at every sample.
- Rhythmic events (kicks, bells) placed individually but their
volume envelope is also a smooth automation curve.
- Five functional zones share overlapping crossfades of 3-4s.
Nothing starts or stops abruptly.
Tonal centre: A minor (A=110 Hz root throughout).
Harmonic journey:
0-12s A55 sub + heartbeat. Sparse. Sparse. Sparse.
10-28s Industrial 140BPM kick fades in; FM anxiety swells from silence.
20-40s Kick fades away; FM dissolves; infra breath takes over.
28-52s Sacred bell field (sparse to dense) overlaps void + return.
38-52s Warm A-major choir blooms under the bells.
48-60s Choir fades; pure harmonic sine tone rises alone to end.
"""
L = np.zeros(N_AUDIO)
R = np.zeros(N_AUDIO)
t = np.linspace(0, DURATION, N_AUDIO, endpoint=False)
# ------------------------------------------------------------------
# AUTOMATION HELPERS
# A smooth_env is built from breakpoints: list of (time_s, value)
# Interpolated with cosine easing between each pair.
# ------------------------------------------------------------------
def auto(breakpoints):
"""Build a per-sample envelope from (time, value) breakpoints."""
env = np.zeros(N_AUDIO)
bp = breakpoints
for i in range(len(bp) - 1):
t0, v0 = bp[i]
t1, v1 = bp[i+1]
s = int(t0 * SR)
e = int(t1 * SR)
if e <= s: continue
e = min(e, N_AUDIO)
n = e - s
# Cosine ease
x = np.linspace(0.0, 1.0, n)
fade = 0.5 - 0.5 * np.cos(np.pi * x)
env[s:e] = v0 + (v1 - v0) * fade
# Hold last value to end
last_t, last_v = bp[-1]
env[int(last_t*SR):] = last_v
return env
# ------------------------------------------------------------------
# LAYER 1: SUB DRONE A27.5 + A55 + A110
# The skeleton. Never fully silent. Breathes the whole 60s.
# ------------------------------------------------------------------
# Volume arc: whisper -> swell -> peak -> exhale -> hush -> bloom -> fade
sub_vol = auto([
(0, 0.00),
(2, 0.18), # born from silence
(8, 0.35), # steady heartbeat world
(14, 0.55), # industrial kicks enter - sub thickens
(22, 0.65), # peak of anxiety
(26, 0.40), # beginning to let go
(30, 0.08), # void - near silence
(36, 0.05), # deepest void
(40, 0.22), # something stirs
(46, 0.50), # return, warmth
(52, 0.40), # simplifying
(56, 0.30), # just the tone now
(60, 0.00),
])
# Very slow pitch drift on sub for psychedelic unstable feel (+/- 0.3 Hz)
pitch_drift = 1.0 + 0.005 * np.sin(2*np.pi*0.07*t) * sub_vol
phase_sub = 2*np.pi * np.cumsum(55.0 * pitch_drift) / SR
sub = (0.60 * np.sin(phase_sub) +
0.30 * np.sin(phase_sub * 0.5) + # A27.5 sub-sub
0.18 * np.sin(phase_sub * 2.0) + # A110
0.08 * np.sin(phase_sub * 3.0)) # E165 fifth
sub *= sub_vol
# Very gentle low-pass
sub = lp_filter(sub, k=8)
L += sub * 0.88; R += sub * 0.88
# ------------------------------------------------------------------
# LAYER 2: NEURAL CRACKLE (textural pink noise, high-pass)
# Present throughout, very quiet. Represents raw consciousness.
# ------------------------------------------------------------------
crackle_vol = auto([
(0, 0.00), (3, 0.04), (10, 0.06),
(14, 0.14), (22, 0.20), (28, 0.04),
(34, 0.01), (40, 0.05), (50, 0.08),
(56, 0.02), (60, 0.00),
])
crackle_raw = pnoise(N_AUDIO, 0.12)
lo = np.convolve(crackle_raw, np.ones(40)/40, mode='same')
hi = np.convolve(crackle_raw, np.ones(6)/6, mode='same')
crackle = (hi - lo) * crackle_vol
L += crackle * 0.55; R += crackle * 0.58
# ------------------------------------------------------------------
# LAYER 3: HEARTBEAT -- accelerates 72->140 BPM from t=8 to t=12
# This is the musical bridge: the body becomes the machine.
# The kick drum at 140 BPM locks on exactly as the heartbeat arrives
# at 140, so the listener feels the tempo as biological before
# the industrial takes over. Returns as a ghost in section IV.
# ------------------------------------------------------------------
hb_vol = auto([
(0, 0.00), (2, 0.00), (6, 0.90),
(8, 1.00), (11, 0.85), (12, 0.60),
(13, 0.20), (14, 0.00), # fades under kick
(38, 0.00), (42, 0.25), # ghost heartbeat in return
(50, 0.12), (54, 0.00),
])
# BPM curve: steady 72 until t=8, then accelerates to 140 by t=12
# After that the kick carries the tempo.
bpm_curve = auto([
(0, 72.0), (8, 72.0),
(10, 95.0), (11, 120.0), (12, 140.0),
(60, 140.0),
])
# Place heartbeat events by integrating the BPM curve
# beat_phase accumulates; each time it crosses 1.0 = a beat
beat_phase_hb = 0.0
lub_pending = False
dub_pending = False
dub_offset_samples = 0
for samp in range(N_AUDIO):
bpm_here = float(bpm_curve[samp])
beat_phase_hb += bpm_here / (60.0 * SR)
if beat_phase_hb >= 1.0:
beat_phase_hb -= 1.0
vol_here = float(hb_vol[samp])
if vol_here < 0.01: continue
# LUB
dur = 0.26; n2 = int(SR*dur); t2 = np.linspace(0,dur,n2)
freq_hb = 60*np.exp(-t2*22)+26
seg = np.sin(2*np.pi*np.cumsum(freq_hb)/SR)*np.exp(-t2*18)
seg = env_adsr(seg, a=0.002, d=0.13, s=0.0, r=0.05)
seg = np.tanh(seg*2.0)/np.tanh(2.0)
place(L, seg*vol_here, samp)
place(R, seg*vol_here, samp)
# DUB: 0.22 of a beat later
dub_samp = samp + int(0.22 * 60.0/bpm_here * SR)
if dub_samp < N_AUDIO:
vol_d = vol_here * 0.46
dur2 = 0.22; n3 = int(SR*dur2); t3 = np.linspace(0,dur2,n3)
freq2 = 48*np.exp(-t3*28)+22
seg2 = np.sin(2*np.pi*np.cumsum(freq2)/SR)*np.exp(-t3*22)
seg2 = env_adsr(seg2, a=0.002, d=0.10, s=0.0, r=0.04)
seg2 = np.tanh(seg2*1.8)/np.tanh(1.8)
place(L, seg2*vol_d, dub_samp)
place(R, seg2*vol_d, dub_samp)
# ------------------------------------------------------------------
# LAYER 3b: TENSION PEDAL TONE -- A110, enters t=7, peaks t=11-12
# A held organ-like A note that swells under the accelerating heartbeat,
# creating harmonic anticipation of the drop. Uses slight FM shimmer
# so it feels alive, not static. This is the "held breath" before
# the machine takes over.
# ------------------------------------------------------------------
pedal_vol = auto([
(0, 0.00), (7, 0.00), (9, 0.28),
(11, 0.60), (12, 0.55), (14, 0.15),
(16, 0.00),
])
pedal_t = t
pedal_mod = 0.8 * np.sin(2*np.pi*110*2.0*pedal_t) # gentle FM shimmer
pedal_phase = 2*np.pi*np.cumsum(110.0 + pedal_mod*0.5)/SR
pedal = (0.55*np.sin(pedal_phase) +
0.30*np.sin(pedal_phase*2) +
0.15*np.sin(pedal_phase*3)) * pedal_vol
# Slight stereo spread
pedal_L = pedal * 0.85
pedal_R = pedal * np.roll(np.ones(N_AUDIO)*0.85, int(SR*0.008))[:N_AUDIO]
L += pedal_L * 0.45; R += pedal_R * 0.45
# ------------------------------------------------------------------
# LAYER 3c: SNARE BUILD -- sparse noise hits on the off-beat,
# entering at t=9 and getting denser to t=12.
# Gives the acceleration a snare-roll quality -- drum kit waking up.
# ------------------------------------------------------------------
snare_vol = auto([
(0, 0.0), (9, 0.0), (10, 0.20),
(11, 0.55), (12, 0.75), (13, 0.20),
(14, 0.00),
])
# Snare hits on off-beats relative to accelerating BPM
beat_phase_sn = 0.5 # start on off-beat
for samp in range(int(SR*9), int(SR*14)):
bpm_here = float(bpm_curve[samp])
beat_phase_sn += bpm_here / (60.0 * SR)
if beat_phase_sn >= 1.0:
beat_phase_sn -= 1.0
vol_here = float(snare_vol[samp])
if vol_here < 0.02: continue
dur = 0.10; n2 = int(SR*dur)
seg = pnoise(n2, 0.6) * np.exp(-np.linspace(0,1,n2)*18)
seg = env_adsr(seg, a=0.003, d=0.12, s=0.0, r=0.08)
pp = np.random.uniform(-0.5, 0.5)
sl, sr = pan(seg*vol_here, pp)
place_st(L, R, sl, sr, samp)
# ------------------------------------------------------------------
# LAYER 4: INDUSTRIAL KICK 140 BPM
# Fades in over 3s starting at t=11, peaks 14-24, fades out over 5s.
# Continues as ghost at low level through void, gone by 34s.
# ------------------------------------------------------------------
kick_vol = auto([
(0, 0.00), (11, 0.00), (14, 0.88),
(24, 0.88), (28, 0.40), (32, 0.12),
(34, 0.00), (60, 0.00),
])
bpm_k = 140.0; beat_k = int(SR * 60.0 / bpm_k)
n_beats_k = int(DURATION * bpm_k / 60) + 2
for i in range(n_beats_k):
pos = i * beat_k
if pos >= N_AUDIO: break
vol_here = float(kick_vol[min(pos, N_AUDIO-1)])
if vol_here < 0.01: continue
dur = 0.52; n2 = int(SR*dur); t2 = np.linspace(0, dur, n2)
freq_k = 108 * np.exp(-t2 * 10) + 30
k = np.sin(2*np.pi*np.cumsum(freq_k)/SR) * np.exp(-t2*7)
k = env_adsr(k, a=0.001, d=0.24, s=0.0, r=0.07)
k = np.tanh(k * 2.8) / np.tanh(2.8)
# Occasional bit-crush
if np.random.random() < 0.18:
k = crush(k, bits=np.random.randint(5, 9))
place(L, k * vol_here * 0.88, pos)
place(R, k * vol_here * 0.88, pos)
# ------------------------------------------------------------------
# LAYER 5: FM ANXIETY carrier A220, modulator A220*phi
# Mod index sweeps 0->15->0 as a smooth arc.
# Stereo spread also sweeps 0 -> 0.85 -> 0.
# ------------------------------------------------------------------
fm_vol = auto([
(0, 0.00), (10, 0.00), (13, 0.28),
(20, 0.45), (26, 0.45), (30, 0.22),
(36, 0.00), (60, 0.00),
])
fm_spread = auto([
(0, 0.00), (13, 0.05), (22, 0.80),
(28, 0.80), (32, 0.20), (36, 0.00),
])
# Mod index arc: 0 -> 15 -> 0
fm_idx = auto([
(0, 0.0), (10, 0.0), (13, 0.5),
(20, 9.0), (25, 14.0),(28, 10.0),
(33, 2.0), (36, 0.0),
])
fm_carrier = 220.0
fm_mod_phase = 2*np.pi * fm_carrier * 1.618 * t
fm_inst_phase = 2*np.pi * fm_carrier * t + fm_idx * np.sin(fm_mod_phase)
fm_sig = np.sin(fm_inst_phase) * fm_vol
# Apply stereo spread per sample (vectorised approximation)
gl = fm_sig * np.sqrt(np.clip((1 - fm_spread)/2, 0, 1))
gr = fm_sig * np.sqrt(np.clip((1 + fm_spread)/2, 0, 1))
L += gl * 0.42; R += gr * 0.42
# ------------------------------------------------------------------
# LAYER 6: SAW BASS A55, distortion tracks anxiety arc
# Fades in with kicks, fades out before void.
# ------------------------------------------------------------------
bass_vol = auto([
(0, 0.00), (11, 0.00), (14, 0.55),
(22, 0.70), (26, 0.45), (30, 0.10),
(33, 0.00), (60, 0.00),
])
bass_drive_env = auto([
(0, 1.0), (14, 1.5), (22, 5.5),
(28, 4.0), (33, 1.0),
])
t_bass_phase = np.cumsum(
55.0 * (1.0 + 0.04 * np.sin(2*np.pi*0.25*t))
) / SR * 2*np.pi
bass_raw = (0.60 * (2.0*((t_bass_phase/(2*np.pi)*55)%1.0)-1.0) + # saw approx
0.25 * np.where((t_bass_phase%(2*np.pi)) < np.pi, 1.0, -1.0) + # sqr
0.18 * np.sin(t_bass_phase * 0.5))
bass_sat = np.tanh(bass_raw * bass_drive_env) / (np.tanh(bass_drive_env) + 1e-9)
bass_sat = lp_filter(bass_sat, k=22)
bass_sat *= bass_vol * 0.62
L += bass_sat; R += bass_sat
# ------------------------------------------------------------------
# LAYER 7: NOISE SHARDS (dissociation / impact / glitch texture)
# Scattered randomly across 11-34s, volume tracks anxiety.
# ------------------------------------------------------------------
shard_vol = auto([
(0, 0.0), (11, 0.0), (14, 0.3),
(22, 0.6), (28, 0.3), (34, 0.0),
])
for _ in range(40):
pos = np.random.randint(int(SR*11), int(SR*34))
if pos >= N_AUDIO: continue
vol_here = float(shard_vol[pos])
if vol_here < 0.02: continue
dur = np.random.uniform(0.012, 0.10)
n2 = int(SR * dur)
seg = wnoise(n2, np.random.uniform(0.15, 0.50))
seg = env_adsr(seg, a=0.005, d=0.4, s=0.0, r=0.3)
seg = crush(seg, bits=np.random.randint(3, 8))
pp = np.random.uniform(-1, 1)
sl, sr = pan(seg * vol_here, pp)
place_st(L, R, sl, sr, pos)
# ------------------------------------------------------------------
# LAYER 8: INFRA BREATH A27.5 with slow breathing LFO
# Begins overlapping void entry, fades out mid-return.
# This is the "between worlds" oscillation.
# ------------------------------------------------------------------
infra_vol = auto([
(0, 0.00), (24, 0.00), (28, 0.18),
(32, 0.35), (38, 0.35), (44, 0.20),
(48, 0.05), (52, 0.00),
])
breath_lfo = (0.50 + 0.50 * np.sin(2*np.pi*0.17*t)) ** 2
infra = (0.35 * np.sin(2*np.pi*27.5*t) +
0.20 * np.sin(2*np.pi*55*t)) * breath_lfo * infra_vol
L += infra * 0.80; R += infra * 0.80
# ------------------------------------------------------------------
# LAYER 9: SACRED BELLS sparse -> dense -> sparse
# Spans void through return. Solfeggio + A harmonic series.
# 528=DNA repair, 741=intuition, 963=divine, 432=Verdi A.
# ------------------------------------------------------------------
bell_vol = auto([
(0, 0.0), (24, 0.0), (27, 0.3),
(32, 0.9), (40, 1.0), (48, 0.8),
(54, 0.3), (58, 0.0),
])
sacred_hz = [110, 165, 220, 275, 330, 432, 440, 528, 660, 741, 880, 963]
bell_density = auto([
(0, 0), (24, 0), (28, 4), (34, 14),
(40, 20),(48, 16),(54, 6), (58, 0),
])
# Spread bells over the whole region with density driving count
region_s = int(SR * 25); region_e = int(SR * 59)
for _ in range(140): # pool of 140, density gate controls audibility
pos = np.random.randint(region_s, region_e)
pos_t = pos / SR
vol_here = float(bell_vol[pos]) * float(bell_density[pos]) / 20.0
if vol_here < 0.02: continue
freq_b = np.random.choice(sacred_hz)
dur_b = np.random.uniform(0.8, 4.0)
t_b = np.linspace(0, dur_b, int(SR*dur_b))
ratio_b = np.random.choice([1.618, 2.0, 2.756, 3.14, 2.414])
bell = np.sin(2*np.pi*freq_b*t_b + 2.2*np.sin(2*np.pi*freq_b*ratio_b*t_b))
bell *= np.exp(-t_b * np.random.uniform(1.0, 3.5)) * vol_here * 0.22
bell = reverb_simple(bell,
delay_ms=int(np.random.uniform(60,220)),
fb=0.42, mix=0.32)
pp = np.random.uniform(-0.92, 0.92)
sl, sr = pan(bell, pp)
place_st(L, R, sl, sr, pos)
# ------------------------------------------------------------------
# LAYER 10: WARM CHOIR A major (A C# E) detuned sine cluster
# Blooms from nothing, peaks in IV, gently fades.
# ------------------------------------------------------------------
choir_vol = auto([
(0, 0.00), (36, 0.00), (42, 0.30),
(46, 0.70), (50, 0.80), (54, 0.55),
(58, 0.10), (60, 0.00),
])
choir_data = [
(220.00, 0.30), (277.18, 0.24), (329.63, 0.20),
(440.00, 0.16), (554.37, 0.11), (659.26, 0.07),
]
for freq_c, amp_c in choir_data:
for _ in range(3): # 3 detuned voices per note
detune = np.random.uniform(-0.015, 0.015)
fd = freq_c * (1 + detune)
vib_depth = np.random.uniform(0.008, 0.018)
vib_rate = np.random.uniform(4.5, 6.0)
vib = 1.0 + vib_depth * np.sin(2*np.pi*vib_rate*t +
np.random.uniform(0, 2*np.pi))
phase_c = 2*np.pi * np.cumsum(fd * vib) / SR
voice = amp_c / 3.0 * np.sin(phase_c)
voice *= choir_vol
pp = np.random.uniform(-0.65, 0.65)
gl, gr = pan(voice, pp)
L += gl * 0.90; R += gr * 0.90
# ------------------------------------------------------------------
# LAYER 11: ICAROS MELODY A pentatonic, single wandering line
# Appears in return, gives melodic identity to the love section.
# ------------------------------------------------------------------
icaro_vol = auto([
(0, 0.0), (38, 0.0), (42, 0.3),
(48, 0.6), (52, 0.4), (56, 0.0),
])
penta_a = [110, 124, 138, 165, 185, 220, 247, 277, 330, 370, 440, 494]
for note_i in range(36):
pos_t2 = 38.0 + note_i * (18.0/36)
pos = int(pos_t2 * SR)
if pos >= N_AUDIO: continue
vol_here = float(icaro_vol[min(pos, N_AUDIO-1)])
if vol_here < 0.02: continue
note_f = np.random.choice(penta_a)
dur_n = np.random.uniform(0.25, 1.1)
t_n = np.linspace(0, dur_n, int(SR*dur_n))
vib_n = 1.0 + 0.02*np.sin(2*np.pi*5.5*t_n)
ph_n = 2*np.pi*note_f*np.cumsum(vib_n)/SR
seg_n = np.sin(ph_n)*0.32 + np.sin(ph_n*2)*0.12 + np.sin(ph_n*3)*0.04
seg_n = env_adsr(seg_n, a=0.025, d=0.12, s=0.45, r=0.30)
seg_n = reverb_simple(seg_n, delay_ms=110, fb=0.32, mix=0.28)
pp = np.random.uniform(-0.55, 0.55)
sl, sr = pan(seg_n * vol_here, pp)
place_st(L, R, sl, sr, pos)
# ------------------------------------------------------------------
# LAYER 12: PURE TONE A harmonic series - the clear light
# Rises from silence as everything else fades. Ends the piece.
# ------------------------------------------------------------------
tone_vol = auto([
(0, 0.00), (48, 0.00), (53, 0.00),
(55, 0.40), (58, 0.70), (60, 0.00),
])
# Slight breathing tremolo on the pure tone
tone_tremolo = 1.0 + 0.035*np.sin(2*np.pi*4.2*t)
tone_phase = 2*np.pi * np.cumsum(440.0 * tone_tremolo) / SR
pure_tone = (0.52 * np.sin(tone_phase) +
0.22 * np.sin(tone_phase * 1.5) + # E6 fifth
0.13 * np.sin(tone_phase * 2.0) + # A6 octave
0.07 * np.sin(tone_phase * 2.5) + # C#7 major third
0.04 * np.sin(tone_phase * 3.0)) # E7
pure_tone *= tone_vol
L += pure_tone * 0.80; R += pure_tone * 0.80
# ------------------------------------------------------------------
# MASTER BUS
# ------------------------------------------------------------------
# Gentle tape saturation - unified glue across all layers
L = np.tanh(L * 1.28) / np.tanh(1.28)
R = np.tanh(R * 1.28) / np.tanh(1.28)
# Normalise
peak = max(np.max(np.abs(L)), np.max(np.abs(R)))
if peak > 1e-9:
L, R = L / peak * 0.91, R / peak * 0.91
return L, R
def save_wav(path, L, R):
stereo = np.column_stack([np.clip(L,-1,1), np.clip(R,-1,1)])
wavfile.write(path, SR, (stereo * INT16_MAX).astype(np.int16))
# ============================================================================
# ENVELOPE EXTRACTION
# ============================================================================
def extract_envelopes(L, R):
mono = (L + R) * 0.5
fsize = SR // FPS
rms = np.zeros(N_FRAMES)
kick_e = np.zeros(N_FRAMES)
treble_e = np.zeros(N_FRAMES)
for f in range(N_FRAMES):
s = f * fsize; e = min(s + fsize, N_AUDIO)
chunk = mono[s:e]
if not len(chunk): continue
rms[f] = float(np.sqrt(np.mean(chunk**2)))
fft = np.abs(np.fft.rfft(chunk))
freqs = np.fft.rfftfreq(len(chunk), 1.0/SR)
kick_e[f] = float(np.sum(fft[freqs < 120])) / (len(chunk)+1)
treble_e[f] = float(np.sum(fft[(freqs>3000)&(freqs<10000)])) / (len(chunk)+1)
def nm(x): m=np.max(x); return x/m if m>1e-9 else x
return {'rms': nm(rms), 'kick': nm(kick_e), 'treble': nm(treble_e)}
# ============================================================================
# ESOTERIC TEXT BANK (YTP flavor)
# ============================================================================
# Strings must be pure ASCII for Windows compatibility
ESOTERIC_TEXTS = [
# Consciousness / philosophy of mind
"QUALIA ARE NOT COMPUTABLE",
"THE HARD PROBLEM HAS NO SOLUTION",
"YOU ARE THE UNIVERSE OBSERVING ITSELF",
"CHALMERS WAS RIGHT",
"FUNCTIONALISM IS COPE",
"WHAT IS IT LIKE TO BE A BAT",
"CONSCIOUSNESS PRECEDES MATTER",
"THE OBSERVER COLLAPSES THE WAVE",
# Psychedelics / yage / DMT
"5-MEO-DMT IS NOT A METAPHOR",
"THE ENTITIES HAVE ALWAYS BEEN HERE",
"AYAHUASCA CORRECTS THE ERROR",
"ICAROS REACH FREQUENCIES BELOW THOUGHT",
"THE PINEAL GLAND REMEMBERS EVERYTHING",
"TERRENCE SAW THE TIMEWAVE ZERO",
"NOVELTY APPROACHES INFINITE DENSITY",
"THE LOGOS SPEAKS IN MOVING JEWELS",
# Bardo / samsara / Buddhism
"THE BARDO THODOL SAYS: DO NOT BE AFRAID",
"CLEAR LIGHT - RECOGNISE IT",
"SAMSARA IS NIRVANA MISUNDERSTOOD",
"YOU HAVE DIED BEFORE. YOU WILL AGAIN.",
"THE WHEEL TURNS ON IGNORANCE",
"INDRA'S NET HAS NO CENTER",
"THE JEWEL REFLECTS EVERY OTHER JEWEL",
"BODHICITTA: AWAKEN FOR ALL BEINGS",
# Kabbalah / occult
"KETHER -> MALKUTH: THE LIGHTNING PATH",
"DA'ATH IS THE HIDDEN SEPHIROT",
"AIN SOPH AUR: LIMITLESS LIGHT",
"THE GOLEM ASKS WHO MADE YOU",
"CROWLEY: DO WHAT THOU WILT",
"GURDJIEFF: YOU ARE ASLEEP",
"THE ENNEAGRAM CONTAINS ALL COSMOLOGY",
"FOURTH WAY: ESSENCE VS PERSONALITY",
# Berghain / BDSM
"BERGHAIN REGEL 1: RESPECT BOUNDARIES",
"CONSENT IS THE ONLY LAW",
"THE DUNGEON IS A TEMPLE",
"POWER EXCHANGE IS A SPIRITUAL ACT",
"SUBMIT TO THE BEAT",
"TECHNO IS THE RITUAL",
"BERLIN 4AM: THE SELF DISSOLVES",
"THE DOOR DECIDES",
# Anxiety / depression / shadow
"THE SHADOW WANTS TO BE SEEN",
"JUNG: YOU ARE NOT WHAT HAPPENED TO YOU",
"DEPRESSION IS THE SOUL REFUSING",
"ANXIETY IS CONSCIOUSNESS WITHOUT ANCHOR",
"THE WOUND IS WHERE THE LIGHT ENTERS",
"YOU CANNOT HEAL WHAT YOU WON'T FEEL",
"THE NUMBNESS IS NOT SAFETY",
# Matrix / simulation
"THERE IS NO SPOON. THERE IS NO YOU.",
"THE SIMULATION RUNS ON LONELINESS",
"ERROR: SELF NOT FOUND",
"CTRL+ALT+DELETE THE EGO",
"THE MATRIX HAS YOU. YOU HAVE THE MATRIX.",
"REALITY.EXE HAS STOPPED RESPONDING",
"WHO IS RUNNING THIS PROCESS",
# YTP chaos
"404: SOUL NOT FOUND",
"PLEASE INSERT CONSCIOUSNESS TO CONTINUE",
"YOUR CURRENT DHARMA IS UNSUPPORTED",
"WARNING: INFINITE REGRESSION DETECTED",
"PRESS F TO PAY RESPECTS TO THE EGO",
"LOADING... LOADING... LOADING...",
"THIS IS NOT A SIMULATION. PROBABLY.",
"DO YOU WANT TO SAVE BEFORE DYING",
]
# Fonts and sizes for text
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SMALL = cv2.FONT_HERSHEY_PLAIN
def draw_esoteric_text(img, t, rms, intensity=1.0, seed_offset=0):
"""
YTP-style random esoteric text overlays.
Appears in bursts, varies size/color/position/opacity.
intensity: 0-1 controls frequency and size.
"""
rng = random.Random(int(t * 17 + seed_offset))
n_texts = rng.randint(0, int(intensity * 3))
for _ in range(n_texts):
if rng.random() > intensity * 0.7: continue
text = rng.choice(ESOTERIC_TEXTS)
# Random position
x = rng.randint(10, W - 350)
y = rng.randint(20, H - 20)
# Scale: sometimes huge (YTP), usually small
if rng.random() < 0.15 * intensity:
scale = rng.uniform(1.2, 2.5)
thickness = 2
else:
scale = rng.uniform(0.35, 0.8)
thickness = 1
# Color: usually sickly green/cyan, occasionally red/white flash
r2 = rng.random()
if r2 < 0.6:
color = (0, int(rng.uniform(160, 255)), 0) # green
elif r2 < 0.8:
color = (int(rng.uniform(180,255)), int(rng.uniform(180,255)), 0) # cyan
elif r2 < 0.92:
color = (0, 0, int(rng.uniform(200, 255))) # red
else:
color = (255, 255, 255) # white flash
# Alpha blend via overlay
overlay = img.copy()
cv2.putText(overlay, text, (x, y), FONT, scale, color, thickness, cv2.LINE_AA)
alpha = rng.uniform(0.55, 0.95)
cv2.addWeighted(overlay, alpha, img, 1-alpha, 0, img)
return img
# ============================================================================
# VISUAL PRIMITIVES (improved from v1)
# ============================================================================
def hsv_bgr(h, s, v):
h = h % 360
c = v*s; x = c*(1-abs((h/60)%2-1)); m = v-c
if h<60: r2,g2,b2=c,x,0
elif h<120: r2,g2,b2=x,c,0
elif h<180: r2,g2,b2=0,c,x
elif h<240: r2,g2,b2=0,x,c
elif h<300: r2,g2,b2=x,0,c
else: r2,g2,b2=c,0,x
return (int((b2+m)*255), int((g2+m)*255), int((r2+m)*255))
def draw_matrix_rain(img, t, cols_state):
cw = 8; ch = 14
for col, st in cols_state.items():
hy = st['hy']; trail = st['trail']
for row in range(trail):
y = int(hy) - row * ch
if 0 <= y < H:
bright = 1.0 if row == 0 else max(0, 1.0 - row/trail)
g = int(255 * bright)
w2 = int(200 * bright) if row == 0 else 0
cv2.rectangle(img, (col*cw, y), (col*cw+cw-1, y+ch-2), (w2,g,w2), -1)
st['hy'] += st['speed']
if st['hy'] > H + trail*ch:
st['hy'] = -np.random.randint(0, H)
st['speed'] = np.random.uniform(6, 22)
st['trail'] = np.random.randint(5, 22)
def draw_starfield(img, t, star_state, intensity=1.0):
"""
Scene I: cold deep-space starfield. Stars drift slowly.
intensity: 0=invisible -> 1=full
"""
if intensity <= 0: return img
breath = 0.6 + 0.4 * math.sin(2*math.pi*0.12*t)
for sx, sy, sr, spd, phase in star_state:
sy_moved = (sy + spd * t) % H
twinkle = 0.5 + 0.5 * math.sin(2*math.pi*0.8*t + phase)
bright = int(intensity * breath * twinkle * sr * 255)
bright = max(0, min(255, bright))
if bright < 8: continue
sx_i = int(sx); sy_i = int(sy_moved)
if 0 <= sx_i < W and 0 <= sy_i < H:
col = (bright, int(bright*0.92), int(bright*0.78))
r_px = 1 if sr < 0.6 else 2
cv2.circle(img, (sx_i, sy_i), r_px, col, -1, cv2.LINE_AA)
return img
def draw_eeg_wave(img, t, intensity=1.0, rms=0.0):
"""
Scene I: EEG/brainwave line. Flat at rest, animates with energy.
"""
if intensity <= 0: return img
cy2 = H // 2
amp = int(intensity * (18 + rms * 60))
freq_hz = 1.2 + rms * 4.0
pts = []
for x in range(0, W, 2):
phase = 2*math.pi*freq_hz*(t + x / (W * 0.3))
y = cy2 + int(amp * (0.7*math.sin(phase) + 0.3*math.sin(phase*2.3+0.8)))
y = max(0, min(H-1, y))
pts.append((x, y))
if len(pts) > 1:
arr = np.array(pts, dtype=np.int32)
bright = int(intensity * 180)
col = (bright, int(bright*1.0), int(bright*0.5))
cv2.polylines(img, [arr], False, col, 1, cv2.LINE_AA)
glow_col = (bright//4, bright//4, bright//8)
for dx in [-1, 1]:
gpts = [(max(0,min(W-1,x+dx)), y) for x,y in pts]
cv2.polylines(img, [np.array(gpts,dtype=np.int32)], False, glow_col, 1)
return img
def draw_tempo_pulse(img, t, bpm, intensity=1.0):
"""
I->II bridge: radial pulse ring expanding from centre on each beat.
"""
if intensity <= 0: return img
beat_dur = 60.0 / max(bpm, 1.0)
beat_phase = (t % beat_dur) / beat_dur
r = int(beat_phase * min(W,H) * 0.55)
alpha = (1.0 - beat_phase) * intensity
if alpha < 0.02 or r < 2: return img
bright = int(alpha * 220)
cx2, cy2 = W//2, H//2
cv2.circle(img, (cx2,cy2), r, (bright//3, bright, bright//2), 1, cv2.LINE_AA)
r2 = r // 2
if r2 > 1:
cv2.circle(img, (cx2,cy2), r2, (bright//5, bright//2, bright//4), 1, cv2.LINE_AA)
return img
def draw_dmt_web(img, t, intensity):
"""Flower of life + Lissajous torus + spinning merkaba lines."""
cx, cy = W//2, H//2
R_max = int(min(W,H) * 0.40 * intensity)
if R_max < 2: return img
# Flower of Life: 7-circle arrangement
for i in range(7):
if i == 0:
ox, oy = cx, cy
else:
ang = (i-1) * math.pi/3 + t*0.5
r_orb = R_max * 0.5
ox = int(cx + r_orb * math.cos(ang))
oy = int(cy + r_orb * math.sin(ang))
hue = (t*40 + i*51) % 360
col = hsv_bgr(hue, 1.0, intensity)
r_draw = max(1, R_max // 2)
cv2.circle(img, (ox, oy), r_draw, col, 1, cv2.LINE_AA)
# Torus Lissajous knot
N = 300
for k in range(N):
th = 2*math.pi*k/N
# (3,4) torus knot
a=3; b=4
r_t = 1.0 + 0.45*math.cos(b*th)
lx = int(cx + R_max*0.85*r_t*math.cos(a*th+t*0.4))
ly = int(cy + R_max*0.85*r_t*math.sin(a*th+t*0.25))
lx2 = int(cx + R_max*0.85*(1+0.45*math.cos(b*(th+0.03)))*math.cos(a*(th+0.03)+t*0.4))
ly2 = int(cy + R_max*0.85*(1+0.45*math.cos(b*(th+0.03)))*math.sin(a*(th+0.03)+t*0.25))
hue2 = (t*60 + k*360/N) % 360
col2 = hsv_bgr(hue2, 0.9, intensity*0.8)
if 0<=lx<W and 0<=ly<H and 0<=lx2<W and 0<=ly2<H:
cv2.line(img, (lx,ly), (lx2,ly2), col2, 1, cv2.LINE_AA)
# Merkaba (star tetrahedron) lines - 2D projection
for tri in range(2):
pts = []
for v in range(3):
ang = v*2*math.pi/3 + tri*math.pi/3 + t*(0.6 if tri==0 else -0.4)
px = int(cx + R_max*0.7*math.cos(ang))
py = int(cy + R_max*0.7*math.sin(ang))
pts.append((px, py))
col3 = hsv_bgr((t*80+tri*180)%360, 0.8, intensity*0.9)
arr = np.array(pts, dtype=np.int32)
cv2.polylines(img, [arr], True, col3, 1, cv2.LINE_AA)
return img
def draw_kabbalistic_tree(img, t, intensity):
"""Simplified Tree of Life (10 sephiroth + 22 paths), fades in."""
cx, cy = W//2, int(H*0.5)
sc = int(H * 0.38 * intensity)
if sc < 10: return img
# Sephiroth positions (normalized, y=1 at top/Kether, y=-1 at bottom/Malkuth)
seph = {
"K": (0.0, 1.0), # Kether
"C": (-0.4, 0.7), # Chokmah
"B": ( 0.4, 0.7), # Binah
"D": (-0.4, 0.3), # Chesed
"G": ( 0.4, 0.3), # Geburah
"T": ( 0.0, 0.0), # Tiphareth (center, heart)
"N": (-0.4,-0.3), # Netzach
"H": ( 0.4,-0.3), # Hod
"Y": ( 0.0,-0.6), # Yesod
"M": ( 0.0,-1.0), # Malkuth
}
def sp(name):
nx, ny = seph[name]
return (int(cx + nx*sc*0.7), int(cy - ny*sc))
# 22 paths (simplified subset)
paths = [("K","C"),("K","B"),("K","T"),("C","B"),("C","D"),("C","T"),
("B","G"),("B","T"),("D","G"),("D","T"),("D","N"),("G","T"),
("G","H"),("T","N"),("T","H"),("T","Y"),("N","H"),("N","Y"),
("H","Y"),("Y","M"),("N","M"),("H","M")]
for a_n, b_n in paths:
pa, pb = sp(a_n), sp(b_n)
hue = (t*20 + hash(a_n+b_n)*17) % 360
col = hsv_bgr(hue, 0.7, intensity*0.6)
cv2.line(img, pa, pb, col, 1, cv2.LINE_AA)
# Draw sephiroth as circles with pulsing glow
for name, (nx, ny) in seph.items():
pt = sp(name)
pulse = 0.7 + 0.3*math.sin(t*2 + hash(name)*0.7)
r = max(2, int(sc*0.06*pulse))
hue2 = (hash(name)*37 + t*30) % 360
col2 = hsv_bgr(hue2, 0.9, intensity*pulse)
cv2.circle(img, pt, r, col2, -1, cv2.LINE_AA)
cv2.circle(img, pt, r+2, col2, 1, cv2.LINE_AA)
# Da'ath - the hidden sephirot, shown as a question mark / void circle
da_pt = (cx, int(cy - 0.5*sc))
cv2.circle(img, da_pt, max(2, int(sc*0.04)),
hsv_bgr(280, 0.5, intensity*0.4), 1, cv2.LINE_AA)
return img
def draw_mandala(img, t, progress, warm=True):
cx, cy = W//2, H//2
R = int(min(W,H) * 0.38 * progress)
if R < 3: return img
# 12 petals, 4 rings
for petal in range(12):
ang = petal*(2*math.pi/12) + t*0.25
for ring in range(1, 6):
r = int(R * ring/5)
center = (int(cx + r*0.55*math.cos(ang)), int(cy + r*0.55*math.sin(ang)))
if warm:
hue = (25 + petal*4 + ring*8 + t*10) % 65
else:
hue = (petal*30 + ring*20 + t*25) % 360
col = hsv_bgr(hue, 1.0, progress*0.88)
try:
cv2.ellipse(img, center, (max(1,r//5), max(1,r//10)),
math.degrees(ang), 0, 360, col, 1, cv2.LINE_AA)
except: pass
# Outer tick ring
for i in range(72):
ang2 = i*(2*math.pi/72) + t*0.6
x1 = int(cx + R*0.90*math.cos(ang2)); y1 = int(cy + R*0.90*math.sin(ang2))
x2 = int(cx + R*math.cos(ang2)); y2 = int(cy + R*math.sin(ang2))
col2 = hsv_bgr((i*5+t*40)%360, 1.0, progress*0.8)
cv2.line(img, (x1,y1), (x2,y2), col2, 1, cv2.LINE_AA)
# Inner Sri Yantra-ish triangle nesting
for tri in range(4):
pts = []
r_tri = R*(0.3 + tri*0.12)
base_ang = math.pi/2 + tri*math.pi/6 + t*(0.1+tri*0.05)
for v in range(3):
va = base_ang + v*2*math.pi/3
pts.append((int(cx+r_tri*math.cos(va)), int(cy+r_tri*math.sin(va))))
arr = np.array(pts, dtype=np.int32)
hue3 = (tri*90 + t*15) % 360
col3 = hsv_bgr(hue3, 0.9, progress*0.7)
cv2.polylines(img, [arr], True, col3, 1, cv2.LINE_AA)
return img
def draw_void_bardo(img, t, breath):
"""Bardo geometry: breathing tesseract + samsara wheel + floating eye."""
cx, cy = W//2, H//2
sc = int(150 * breath)
if sc < 5: return img
# Rotating cube
ax, ay, az = t*0.35, t*0.28, t*0.12
verts = np.array([[-1,-1,-1],[1,-1,-1],[1,1,-1],[-1,1,-1],
[-1,-1,1],[1,-1,1],[1,1,1],[-1,1,1]], dtype=float) * sc*0.5
def Rx(p, a):
ca,sa=math.cos(a),math.sin(a)
return (np.array([[1,0,0],[0,ca,-sa],[0,sa,ca]])@p.T).T
def Ry(p, a):
ca,sa=math.cos(a),math.sin(a)
return (np.array([[ca,0,sa],[0,1,0],[-sa,0,ca]])@p.T).T
def Rz(p, a):
ca,sa=math.cos(a),math.sin(a)
return (np.array([[ca,-sa,0],[sa,ca,0],[0,0,1]])@p.T).T
v2 = Rx(Ry(Rz(verts, az), ay), ax)
fov = 420
proj = [(int(cx+p[0]*fov/(fov+p[2]+350)), int(cy+p[1]*fov/(fov+p[2]+350)))
for p in v2]
edges = [(0,1),(1,2),(2,3),(3,0),(4,5),(5,6),(6,7),(7,4),(0,4),(1,5),(2,6),(3,7)]
cube_col = hsv_bgr(210, 0.5, breath*0.75)
for a2, b2 in edges:
pa, pb = proj[a2], proj[b2]
if all(0<=c[0]<W and 0<=c[1]<H for c in [pa,pb]):
cv2.line(img, pa, pb, cube_col, 1, cv2.LINE_AA)
# Samsara wheel: 8 spokes rotating
for spoke in range(8):
ang = spoke*math.pi/4 + t*0.5
r_w = int(sc*1.5)
x1 = int(cx + r_w*0.3*math.cos(ang)); y1 = int(cy + r_w*0.3*math.sin(ang))
x2 = int(cx + r_w*math.cos(ang)); y2 = int(cy + r_w*math.sin(ang))
col_s = hsv_bgr((spoke*45+t*20)%360, 0.4, breath*0.6)
cv2.line(img, (x1,y1), (x2,y2), col_s, 1, cv2.LINE_AA)
cv2.circle(img, (cx,cy), max(2,int(sc*1.5)), hsv_bgr(220,0.3,breath*0.5), 1, cv2.LINE_AA)
cv2.circle(img, (cx,cy), max(1,int(sc*0.3)), hsv_bgr(220,0.3,breath*0.6), 1, cv2.LINE_AA)
# All-seeing eye (iris + pupil)
eye_y = cy - int(sc*0.8)
eye_r = max(4, int(sc*0.25))
pupil_r = max(2, int(eye_r*0.4*(0.7+0.3*breath)))
cv2.ellipse(img,(cx,eye_y),(eye_r*2,eye_r),0,0,360,hsv_bgr(60,0.3,breath*0.8),1,cv2.LINE_AA)
cv2.circle(img,(cx,eye_y),eye_r,hsv_bgr(40,0.6,breath*0.6),-1)
cv2.circle(img,(cx,eye_y),pupil_r,(0,0,0),-1)
cv2.circle(img,(cx,eye_y),eye_r,hsv_bgr(50,0.5,breath*0.9),1,cv2.LINE_AA)
return img
def draw_grid_rebirth(img, t, progress):
if progress <= 0: return img
horizon = H//2; vp_x = W//2
n_h = int(18*progress); n_v = int(20*progress)
for i in range(1, n_h+1):
persp = (i/18)**2.2
y = int(horizon + (H-horizon)*persp)
alpha = 0.3 + 0.7*persp
c = (int(alpha*220), int(alpha*230), int(alpha*255))
cv2.line(img, (0,y), (W,y), c, 1)
for i in range(n_v+1):
xb = int(i*W/20)
col2 = (int(progress*220), int(progress*230), int(progress*255))
cv2.line(img, (vp_x,horizon), (xb,H), col2, 1)
return img
def chromatic_ab(img, amt):
if amt < 1: return img
sh = int(amt)
b,g,r = cv2.split(img)
rows, cols = img.shape[:2]
r = cv2.warpAffine(r, np.float32([[1,0, sh],[0,1,0]]), (cols,rows))
b = cv2.warpAffine(b, np.float32([[1,0,-sh],[0,1,0]]), (cols,rows))
return cv2.merge([b,g,r])
def scan_tears(img, n, max_sh=60):
if n == 0: return img
out = img.copy()
for _ in range(n):
y = np.random.randint(0, H-4)
h2 = np.random.randint(1, 7)
sh = np.random.randint(-max_sh, max_sh)
y2 = min(y+h2, H)
out[y:y2,:] = np.roll(img[y:y2,:], sh, axis=1)
return out
def vignette(img, strength=0.80, col=(0,0,0)):
cx2, cy2 = W//2, H//2
Y, X = np.mgrid[0:H, 0:W]
dist = np.sqrt(((X-cx2)/(W/2))**2 + ((Y-cy2)/(H/2))**2)
mask = np.clip((dist-0.35)/0.55, 0, 1) * strength
ca = np.array(col, dtype=np.float32)
out = img.astype(np.float32)*(1-mask[:,:,None]) + ca*mask[:,:,None]
return out.clip(0,255).astype(np.uint8)
def vhs_dropout(img, n=2):
out = img.copy()
for _ in range(n):
x = np.random.randint(0, W-81); y = np.random.randint(0, H-5)
w2 = np.random.randint(5,80); h2 = np.random.randint(1,4)
aw = min(w2,W-x); ah = min(h2,H-y)
if aw>0 and ah>0:
out[y:y+ah,x:x+aw] = np.random.randint(0,256,(ah,aw,3),dtype=np.uint8)
return out
def datamosh_smear(img, prev, rms):
"""Blend current + blurred prior frame for datamosh effect."""
if prev is None: return img
k = int(rms*60+1)|1
smear = cv2.GaussianBlur(prev, (k,k), 0)
return cv2.addWeighted(img, 0.3, smear, 0.7, 0)
# ============================================================================
# MAIN RENDER LOOP
# ============================================================================
def render_frame(f, envs, matrix_cols, prev_frame, datamosh_active, extra_state=None):
if extra_state is None: extra_state = {'stars': []}
t = f / FPS
rms = float(envs['rms'][f])
kick = float(envs['kick'][f])
treble = float(envs['treble'][f])
# Normalised position in each section
p1 = tn(t, M1_S, M1_E)
p2 = tn(t, M2_S, M2_E)
p3 = tn(t, M3_S, M3_E)
p4 = tn(t, M4_S, M4_E)
p5 = tn(t, M5_S, M5_E)
# Crossfade weights: how much of each section is "active"
# Each section fades in over XF seconds, fades out over XF seconds
w1 = xfade_in(t, M1_S) * xfade_out(t, M1_E)
w2 = xfade_in(t, M2_S) * xfade_out(t, M2_E)
w3 = xfade_in(t, M3_S) * xfade_out(t, M3_E)
w4 = xfade_in(t, M4_S) * xfade_out(t, M4_E)
w5 = xfade_in(t, M5_S)
# --- Build each section's layer separately, then composite ---
# ~~~ SCENE I: STARFIELD + EEG WAVE ~~~
# Cold deep space. The universe before it knows you're watching.
star_layer = np.zeros((H,W,3), dtype=np.uint8)
if w1 > 0.01:
draw_starfield(star_layer, t, extra_state['stars'], intensity=w1)
draw_eeg_wave(star_layer, t, intensity=w1, rms=rms)
# Bridge (t=8-12): tempo pulse rings appear, BPM accelerating
if t >= 8.0:
bridge_intensity = w1 * min(1.0, (t-8.0)/4.0)
# BPM interpolated from 72->140 over 8-12s
bpm_bridge = 72.0 + (140.0-72.0)*min(1.0, max(0.0,(t-8.0)/4.0))
draw_tempo_pulse(star_layer, t, bpm_bridge, intensity=bridge_intensity)
# ~~~ SCENE II: MATRIX RAIN tearing into DMT geometry ~~~
# The code of the simulation revealed, then dissolving.
rain_layer = np.zeros((H,W,3), dtype=np.uint8)
if w2 > 0.01:
draw_matrix_rain(rain_layer, t, matrix_cols)
# Rain tears apart increasingly as section progresses
tears = int(p2 * 26)
rain_layer = scan_tears(rain_layer, tears, int(p2*120))
# Corrupt rain with magenta/red channel shift in later half
if p2 > 0.4:
corrupt_amt = (p2-0.4)/0.6
b2, g2, r2 = cv2.split(rain_layer)
shift = int(corrupt_amt * 8)
r2 = cv2.warpAffine(r2, np.float32([[1,0,shift],[0,1,0]]), (W,H))
rain_layer = cv2.merge([b2//3, g2, r2])
# ~~~ DMT web geometry (erupts through rain mid-II, bleeds into III) ~~~
dmt_layer = np.zeros((H,W,3), dtype=np.uint8)
dmt_w = max(0, w2*max(0, p2-0.30)/0.70) + w3*0.4
if dmt_w > 0.01:
draw_dmt_web(dmt_layer, t*1.4, min(1.0, dmt_w))
# ~~~ Kabbalistic tree (late II, full III) ~~~
tree_layer = np.zeros((H,W,3), dtype=np.uint8)
tree_w = w2 * max(0, p2-0.55)/0.45 * 0.5 + w3 * 0.85
if tree_w > 0.05:
draw_kabbalistic_tree(tree_layer, t, min(1.0, tree_w))
# ~~~ SCENE III: Void / Bardo ~~~
void_layer = np.zeros((H,W,3), dtype=np.uint8)
if w3 > 0.01:
breath = 0.45 + 0.55*math.sin(2*math.pi*0.18*t)**2
draw_void_bardo(void_layer, t, breath * w3)
# ~~~ SCENE IV: Mandala ~~~
mandala_layer = np.zeros((H,W,3), dtype=np.uint8)
if w4 > 0.01:
draw_mandala(mandala_layer, t, p4 * w4, warm=True)
# ~~~ SCENE V: Grid rebirth ~~~
grid_layer = np.zeros((H,W,3), dtype=np.uint8)
if w5 > 0.01:
gp = max(0, (p5-0.2)/0.8)
draw_grid_rebirth(grid_layer, t, gp * w5)
# --- Background colors (visually distinct per scene) ---
# I: near-black with cool blue tint -- deep space
bg_1 = np.full((H,W,3), (int(2+w1*4), int(3+w1*5), int(6+w1*12)), dtype=np.uint8)
# II: very dark with magenta/red bleed building from edges
red_bleed = int(p2 * w2 * 18)
bg_2 = np.full((H,W,3), (red_bleed//3, 2, int(4+red_bleed)), dtype=np.uint8)
# III: void blue-black
bg_3 = np.full((H,W,3), (int(8+w3*6), int(4+w3*4), int(12+w3*8)), dtype=np.uint8)
# IV: warm amber
amb = int(p4 * w4 * 28)
bg_4 = np.full((H,W,3), (amb, amb//3, amb//8), dtype=np.uint8)
# V: white light
wv = int(w5 * 240)
bg_5 = np.full((H,W,3), (wv,wv,wv), dtype=np.uint8)
img = np.zeros((H,W,3), dtype=np.uint8)
for bg, wt in [(bg_1,w1),(bg_2,w2),(bg_3,w3),(bg_4,w4),(bg_5,w5)]:
if wt > 0.001:
img = cv2.addWeighted(img, 1.0, bg, wt, 0)
# Composite content
# Scene I: starfield (fades out as II fades in)
if w1 > 0.001:
# Starfield dims as rain takes over
star_fade = w1 * (1.0 - w2*0.9)
if star_fade > 0.001:
img = cv2.addWeighted(img, 1.0, star_layer, min(1.0, star_fade), 0)
# Scene II: rain (completely absent in I, only present in II)
if w2 > 0.001:
rain_w = w2 * (1.0 - p2*0.70)
if rain_w > 0.001:
img = cv2.addWeighted(img, 1.0, rain_layer, min(1.0, rain_w), 0)
if dmt_w > 0.001:
img = cv2.addWeighted(img, 1.0, dmt_layer, min(1.0, dmt_w), 0)
if tree_w > 0.01:
img = cv2.addWeighted(img, 1.0, tree_layer, min(0.85, tree_w), 0)
if w3 > 0.001:
img = cv2.addWeighted(img, 1.0, void_layer, min(1.0, w3), 0)
if w4 > 0.001:
img = cv2.addWeighted(img, 1.0, mandala_layer, min(1.0, w4), 0)
if w5 > 0.001:
img = cv2.addWeighted(img, 1.0, grid_layer, min(1.0, w5), 0)
# --- Post processing ---
# Scene I: very subtle chromatic aberration (reality slightly off)
# Scene II: strong CA grows with anxiety
ca_amt = (w1 * p1 * 2.5) + (w2 * p2 * 14) + (rms * 1.5)
img = chromatic_ab(img, ca_amt)
# Scan tears: Scene II only
if w2 > 0.1:
img = scan_tears(img, int(w2 * p2 * 14), int(w2 * p2 * 90))
# VHS dropout: Scene II only (not I -- I is clean cold space)
if w2 > 0.1:
img = vhs_dropout(img, int(w2 * 3))
# Datamosh at transitions
if datamosh_active and prev_frame is not None:
img = datamosh_smear(img, prev_frame, rms)
# Kick flash: Scene II -- now white-green rather than green (hotter)
if w2 > 0.2 and kick > 0.80 and np.random.random() < 0.52:
flash_r = int(40 + p2*80); flash_g = int(180 + p2*50)
flash = np.full((H,W,3), (flash_r, flash_g, flash_r), dtype=np.uint8)
img = cv2.addWeighted(img, 0.35, flash, 0.65, 0)
# White overwhelm at start of V
if w5 > 0.01 and p5 < 0.25:
wv2 = int(255 * (1 - p5/0.25) * w5)
white = np.full((H,W,3), wv2, dtype=np.uint8)
img = cv2.addWeighted(img, 1.0-w5*(1-p5/0.25), white, w5*(1-p5/0.25), 0)
# Vignette: always, colour shifts by section
vig_col = (0,0,0)
if w4 > 0.2: vig_col = (int(5*w4), int(15*w4), int(40*w4))
img = vignette(img, strength=0.62, col=vig_col)
# --- ESOTERIC TEXT (YTP layer) ---
# Different intensity and density per section
text_intensity = 0.0
if w1 > 0.1: text_intensity += w1 * 0.25 # sparse in veil
if w2 > 0.1: text_intensity += w2 * p2 * 0.80 # peaks in descent
if w3 > 0.1: text_intensity += w3 * 0.35 # eerie in void
if w4 > 0.1: text_intensity += w4 * 0.30 # gentle in return
if w5 > 0.1: text_intensity += w5 * 0.15 # sparse in rebirth
if text_intensity > 0.05:
draw_esoteric_text(img, t, rms, text_intensity, seed_offset=f*7)
return img
# ============================================================================
# GENERATE
# ============================================================================
def generate():
base = os.path.dirname(os.path.abspath(__file__))
audio_path = os.path.join(base, "consciousness_audio.wav")
video_path = os.path.join(base, "consciousness_frames.mp4")
out_path = os.path.join(base, "consciousness_descent_v3.mp4")
print("=" * 60)
print(" CONSCIOUSNESS DESCENT v2")
print(" 60s . 5 Movements . 1280x720 . 30fps")
print(" Smoother transitions . Coherent soundtrack")
print(" YTP esoteric chaos layer enabled")
print("=" * 60)
print("\n[1/4] Synthesizing audio ...")
L, R = synthesize_audio()
save_wav(audio_path, L, R)
print(" ok %d KB" % (os.path.getsize(audio_path)//1024))
print("[2/4] Extracting envelopes ...")
envs = extract_envelopes(L, R)
print("[3/4] Rendering %d frames ..." % N_FRAMES)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter(video_path, fourcc, FPS, (W, H))
# Matrix rain state
cw = 8
n_cols = W // cw
matrix_cols = {c: {'hy': float(np.random.randint(-H, 0)),
'speed': np.random.uniform(6, 22),
'trail': np.random.randint(5, 22)}
for c in range(n_cols)}
# Starfield state: list of (x, y, brightness, drift_speed, phase)
n_stars = 320
star_state = [(float(np.random.randint(0,W)),
float(np.random.randint(0,H)),
np.random.uniform(0.3, 1.0),
np.random.uniform(0.02, 0.25),
np.random.uniform(0, 2*math.pi))
for _ in range(n_stars)]
extra_state = {'stars': star_state}
prev_frame = None
datamosh_timer = 0
for f in range(N_FRAMES):
t = f / FPS
rms = float(envs['rms'][f])
# Datamosh trigger: at movement boundaries
datamosh_active = False
for boundary in [M2_S, M3_S, M4_S, M5_S]:
if abs(t - boundary) < 0.5:
datamosh_active = True
frame = render_frame(f, envs, matrix_cols, prev_frame, datamosh_active, extra_state)
writer.write(frame)
prev_frame = frame.copy()
if f % 360 == 0:
t_s = f / FPS
sec = ("I.Veil" if t_s < 12 else
"II.Ego Death" if t_s < 28 else
"III.Bardo" if t_s < 40 else
"IV.Return" if t_s < 52 else "V.Recognise")
print(" %d/%d (%.0f%%) [%s]" % (f, N_FRAMES, f/N_FRAMES*100, sec))
writer.release()
print(" video frames done")
print("[4/4] Muxing with FFmpeg ...")
cmd = ["ffmpeg", "-y", "-i", video_path, "-i", audio_path,
"-c:v", "libx264", "-preset", "fast", "-crf", "17",
"-c:a", "aac", "-b:a", "192k", "-shortest", out_path]
res = subprocess.run(cmd, capture_output=True, text=True)
if res.returncode == 0:
mb = os.path.getsize(out_path) / 1024 / 1024
print(" ok %.1f MB -> %s" % (mb, out_path))
try: os.remove(video_path); os.remove(audio_path)
except: pass
else:
print("[ffmpeg error]", res.stderr[-500:])
print("\nDone.")
if __name__ == "__main__":
generate()