Ajout du sampler et de l'input

This commit is contained in:
2025-10-11 22:48:09 +02:00
parent 7f150e8bb4
commit 00e8b4a3a5
23 changed files with 1048 additions and 46 deletions

View File

@ -4,6 +4,7 @@
import VUMeter from "./lib/components/VUMeter.svelte";
import { engines } from "./lib/audio/engines/registry";
import type { SynthEngine } from "./lib/audio/engines/SynthEngine";
import type { EngineType } from "./lib/audio/engines/SynthEngine";
import { AudioService } from "./lib/audio/services/AudioService";
import { downloadWAV } from "./lib/audio/utils/WAVEncoder";
import {
@ -18,9 +19,12 @@
getAllProcessors,
} from "./lib/audio/processors/registry";
import type { AudioProcessor } from "./lib/audio/processors/AudioProcessor";
import { Sample } from "./lib/audio/engines/Sample";
import { Input } from "./lib/audio/engines/Input";
let currentEngineIndex = 0;
let engine = engines[currentEngineIndex];
let engineType: EngineType = engine.getType();
const audioService = new AudioService();
@ -34,9 +38,17 @@
let isProcessed = false;
let showProcessorPopup = false;
let popupTimeout: ReturnType<typeof setTimeout> | null = null;
let isRecording = false;
let isDragOver = false;
const allProcessors = getAllProcessors();
$: showDuration = engineType !== 'sample';
$: showRandomButton = engineType === 'generative';
$: showRecordButton = engineType === 'input';
$: showFileDropZone = engineType === 'sample' && !currentBuffer;
$: showMutateButton = engineType === 'generative' && !isProcessed && currentBuffer;
onMount(() => {
audioService.setVolume(volume);
audioService.setPlaybackUpdateCallback((position) => {
@ -157,7 +169,83 @@
function switchEngine(index: number) {
currentEngineIndex = index;
engine = engines[index];
generateRandom();
engineType = engine.getType();
currentBuffer = null;
currentParams = null;
isProcessed = false;
if (engineType === 'generative') {
generateRandom();
}
}
async function handleFileInput(event: Event) {
const input = event.target as HTMLInputElement;
if (!input.files || input.files.length === 0) return;
const file = input.files[0];
await loadAudioFile(file);
}
async function loadAudioFile(file: File) {
if (!(engine instanceof Sample)) return;
try {
await engine.loadFile(file);
currentParams = engine.randomParams();
waveformColor = generateRandomColor();
isProcessed = false;
regenerateBuffer();
} catch (error) {
console.error('Failed to load audio file:', error);
alert(`Failed to load audio file: ${error}`);
}
}
async function recordAudio() {
if (!(engine instanceof Input)) return;
if (isRecording) return;
try {
isRecording = true;
await engine.record(duration);
currentParams = engine.randomParams();
waveformColor = generateRandomColor();
isProcessed = false;
regenerateBuffer();
} catch (error) {
console.error('Failed to record audio:', error);
alert(`Failed to record audio: ${error}`);
} finally {
isRecording = false;
}
}
function handleDrop(event: DragEvent) {
event.preventDefault();
isDragOver = false;
if (!event.dataTransfer) return;
const files = event.dataTransfer.files;
if (files.length === 0) return;
const file = files[0];
if (!file.type.startsWith('audio/')) {
alert('Please drop an audio file');
return;
}
loadAudioFile(file);
}
function handleDragOver(event: DragEvent) {
event.preventDefault();
isDragOver = true;
}
function handleDragLeave(event: DragEvent) {
event.preventDefault();
isDragOver = false;
}
async function closeModal() {
@ -199,7 +287,7 @@
case "arrowright":
event.preventDefault();
const durationIncrement = event.shiftKey ? 1 : 0.05;
duration = Math.min(8, duration + durationIncrement);
duration = Math.min(32, duration + durationIncrement);
saveDuration(duration);
break;
case "arrowdown":
@ -237,18 +325,20 @@
{/each}
</div>
<div class="controls-group">
<div class="slider-control duration-slider">
<label for="duration">Duration: {duration.toFixed(2)}s</label>
<input
id="duration"
type="range"
min="0.05"
max="8"
step="0.01"
value={duration}
oninput={handleDurationChange}
/>
</div>
{#if showDuration}
<div class="slider-control duration-slider">
<label for="duration">Duration: {duration.toFixed(2)}s</label>
<input
id="duration"
type="range"
min="0.05"
max="32"
step="0.01"
value={duration}
oninput={handleDurationChange}
/>
</div>
{/if}
<div class="slider-control">
<label for="volume">Volume</label>
<input
@ -266,39 +356,75 @@
<div class="main-area">
<div class="waveform-container">
<WaveformDisplay
buffer={currentBuffer}
color={waveformColor}
{playbackPosition}
onclick={replaySound}
/>
{#if showFileDropZone}
<div
class="file-drop-zone"
class:drag-over={isDragOver}
role="button"
tabindex="0"
ondrop={handleDrop}
ondragover={handleDragOver}
ondragleave={handleDragLeave}
>
<div class="drop-zone-content">
<h2>Drop an audio file here</h2>
<label for="file-input" class="file-input-label">
<input
id="file-input"
type="file"
accept="audio/*"
onchange={handleFileInput}
style="display: none;"
/>
Choose a file
</label>
</div>
</div>
{:else}
<WaveformDisplay
buffer={currentBuffer}
color={waveformColor}
{playbackPosition}
onclick={replaySound}
/>
{/if}
<div class="bottom-controls">
<button onclick={generateRandom}>Random (R)</button>
{#if !isProcessed}
{#if showRandomButton}
<button onclick={generateRandom}>Random (R)</button>
{/if}
{#if showRecordButton}
<button onclick={recordAudio} disabled={isRecording}>
{isRecording ? 'Recording...' : 'Record'}
</button>
{/if}
{#if showMutateButton}
<button onclick={mutate}>Mutate (M)</button>
{/if}
<div
class="process-button-container"
role="group"
onmouseenter={handlePopupMouseEnter}
onmouseleave={handlePopupMouseLeave}
>
<button onclick={processSound}>Process (P)</button>
{#if showProcessorPopup}
<div class="processor-popup">
{#each allProcessors as processor}
<button
class="processor-tile"
data-description={processor.getDescription()}
onclick={() => processWithSpecificProcessor(processor)}
>
{processor.getName()}
</button>
{/each}
</div>
{/if}
</div>
<button onclick={download}>Download (D)</button>
{#if currentBuffer}
<div
class="process-button-container"
role="group"
onmouseenter={handlePopupMouseEnter}
onmouseleave={handlePopupMouseLeave}
>
<button onclick={processSound}>Process (P)</button>
{#if showProcessorPopup}
<div class="processor-popup">
{#each allProcessors as processor}
<button
class="processor-tile"
data-description={processor.getDescription()}
onclick={() => processWithSpecificProcessor(processor)}
>
{processor.getName()}
</button>
{/each}
</div>
{/if}
</div>
<button onclick={download}>Download (D)</button>
{/if}
</div>
</div>
<div class="vu-meter-container">
@ -601,9 +727,9 @@
padding: 0.75rem;
z-index: 1000;
display: grid;
grid-template-columns: repeat(3, 1fr);
grid-template-columns: repeat(4, 1fr);
gap: 0.5rem;
width: 450px;
width: 600px;
margin-bottom: 0.5rem;
}
@ -652,4 +778,56 @@
.processor-tile:hover::after {
opacity: 1;
}
.file-drop-zone {
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
border: 2px dashed #444;
background-color: #0a0a0a;
transition: all 0.2s;
}
.file-drop-zone.drag-over {
border-color: #646cff;
background-color: #1a1a1a;
}
.drop-zone-content {
text-align: center;
color: #ccc;
}
.drop-zone-content h2 {
font-size: 1.5rem;
margin-bottom: 1rem;
color: #fff;
}
.drop-zone-content p {
margin: 1rem 0;
font-size: 1rem;
}
.file-input-label {
display: inline-block;
padding: 0.75rem 1.5rem;
background-color: #646cff;
color: #fff;
border: 1px solid #646cff;
cursor: pointer;
transition: background-color 0.2s;
font-size: 1rem;
}
.file-input-label:hover {
background-color: #535bf2;
}
button:disabled {
opacity: 0.5;
cursor: not-allowed;
}
</style>

View File

@ -69,6 +69,10 @@ export class Benjolin implements SynthEngine<BenjolinParams> {
return 'Some kind of rungler/benjolin inspired generator';
}
getType() {
return 'generative' as const;
}
generate(params: BenjolinParams, sampleRate: number, duration: number): [Float32Array, Float32Array] {
const numSamples = Math.floor(duration * sampleRate);
const left = new Float32Array(numSamples);

View File

@ -63,6 +63,10 @@ export class DubSiren implements SynthEngine<DubSirenParams> {
return 'Siren generator with pitch sweeps, anti-aliased oscillators and stable filtering';
}
getType() {
return 'generative' as const;
}
generate(params: DubSirenParams, sampleRate: number, duration: number): [Float32Array, Float32Array] {
const numSamples = Math.floor(sampleRate * duration);
const leftBuffer = new Float32Array(numSamples);

View File

@ -71,6 +71,10 @@ export class FourOpFM implements SynthEngine<FourOpFMParams> {
return 'Four-operator FM synthesis with multiple algorithms, envelope curves, and LFO waveforms';
}
getType() {
return 'generative' as const;
}
generate(params: FourOpFMParams, sampleRate: number, duration: number): [Float32Array, Float32Array] {
const numSamples = Math.floor(sampleRate * duration);
const leftBuffer = new Float32Array(numSamples);

View File

@ -0,0 +1,120 @@
import type { SynthEngine } from './SynthEngine';
interface InputParams {
recorded: boolean;
}
export class Input implements SynthEngine<InputParams> {
private leftChannel: Float32Array | null = null;
private rightChannel: Float32Array | null = null;
private recordedSampleRate: number = 44100;
getName(): string {
return 'Input';
}
getDescription(): string {
return 'Record audio from your microphone or default input device';
}
getType() {
return 'input' as const;
}
async record(duration: number): Promise<void> {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: false,
noiseSuppression: false,
autoGainControl: false
}
});
const audioContext = new AudioContext();
this.recordedSampleRate = audioContext.sampleRate;
const source = audioContext.createMediaStreamSource(stream);
const destination = audioContext.createMediaStreamDestination();
source.connect(destination);
const mediaRecorder = new MediaRecorder(destination.stream);
const chunks: Blob[] = [];
mediaRecorder.ondataavailable = (e) => {
if (e.data.size > 0) {
chunks.push(e.data);
}
};
const recordingComplete = new Promise<void>((resolve) => {
mediaRecorder.onstop = async () => {
const blob = new Blob(chunks, { type: 'audio/webm' });
const arrayBuffer = await blob.arrayBuffer();
try {
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
if (audioBuffer.numberOfChannels === 1) {
this.leftChannel = audioBuffer.getChannelData(0);
this.rightChannel = audioBuffer.getChannelData(0);
} else {
this.leftChannel = audioBuffer.getChannelData(0);
this.rightChannel = audioBuffer.getChannelData(1);
}
} catch (error) {
throw new Error(`Failed to decode recorded audio: ${error}`);
} finally {
stream.getTracks().forEach(track => track.stop());
await audioContext.close();
}
resolve();
};
});
mediaRecorder.start();
setTimeout(() => {
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}, duration * 1000);
await recordingComplete;
}
generate(_params: InputParams, sampleRate: number, _duration: number): [Float32Array, Float32Array] {
if (!this.leftChannel || !this.rightChannel) {
throw new Error('No audio recorded. Please record audio first.');
}
if (this.recordedSampleRate === sampleRate) {
return [this.leftChannel.slice(), this.rightChannel.slice()];
}
const resampleRatio = sampleRate / this.recordedSampleRate;
const newLength = Math.floor(this.leftChannel.length * resampleRatio);
const leftResampled = new Float32Array(newLength);
const rightResampled = new Float32Array(newLength);
for (let i = 0; i < newLength; i++) {
const sourceIndex = i / resampleRatio;
const index0 = Math.floor(sourceIndex);
const index1 = Math.min(index0 + 1, this.leftChannel.length - 1);
const fraction = sourceIndex - index0;
leftResampled[i] = this.leftChannel[index0] * (1 - fraction) + this.leftChannel[index1] * fraction;
rightResampled[i] = this.rightChannel[index0] * (1 - fraction) + this.rightChannel[index1] * fraction;
}
return [leftResampled, rightResampled];
}
randomParams(): InputParams {
return { recorded: this.leftChannel !== null && this.rightChannel !== null };
}
mutateParams(params: InputParams): InputParams {
return params;
}
}

View File

@ -46,6 +46,10 @@ export class NoiseDrum implements SynthEngine {
return 'Versatile noise-based percussion synthesizer inspired by classic drum machines';
}
getType() {
return 'generative' as const;
}
randomParams(): NoiseDrumParams {
// Intelligently bias parameter ranges to create diverse percussion types
const filterBias = Math.random();

View File

@ -78,6 +78,10 @@ export class Ring implements SynthEngine<RingParams> {
return 'Complex ring modulator with dual modulators, multiple LFOs, feedback, and evolving timbres';
}
getType() {
return 'generative' as const;
}
generate(params: RingParams, sampleRate: number, duration: number): [Float32Array, Float32Array] {
const numSamples = Math.floor(sampleRate * duration);
const leftBuffer = new Float32Array(numSamples);

View File

@ -0,0 +1,76 @@
import type { SynthEngine } from './SynthEngine';
interface SampleParams {
loaded: boolean;
}
export class Sample implements SynthEngine<SampleParams> {
private leftChannel: Float32Array | null = null;
private rightChannel: Float32Array | null = null;
private sampleRate: number = 44100;
getName(): string {
return 'Sample';
}
getDescription(): string {
return 'Load audio files from disk and process them';
}
getType() {
return 'sample' as const;
}
async loadFile(file: File): Promise<void> {
const arrayBuffer = await file.arrayBuffer();
const audioContext = new AudioContext();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
this.sampleRate = audioBuffer.sampleRate;
if (audioBuffer.numberOfChannels === 1) {
this.leftChannel = audioBuffer.getChannelData(0);
this.rightChannel = audioBuffer.getChannelData(0);
} else {
this.leftChannel = audioBuffer.getChannelData(0);
this.rightChannel = audioBuffer.getChannelData(1);
}
await audioContext.close();
}
generate(_params: SampleParams, sampleRate: number, _duration: number): [Float32Array, Float32Array] {
if (!this.leftChannel || !this.rightChannel) {
throw new Error('No audio file loaded. Please load a file first.');
}
if (this.sampleRate === sampleRate) {
return [this.leftChannel.slice(), this.rightChannel.slice()];
}
const resampleRatio = sampleRate / this.sampleRate;
const newLength = Math.floor(this.leftChannel.length * resampleRatio);
const leftResampled = new Float32Array(newLength);
const rightResampled = new Float32Array(newLength);
for (let i = 0; i < newLength; i++) {
const sourceIndex = i / resampleRatio;
const index0 = Math.floor(sourceIndex);
const index1 = Math.min(index0 + 1, this.leftChannel.length - 1);
const fraction = sourceIndex - index0;
leftResampled[i] = this.leftChannel[index0] * (1 - fraction) + this.leftChannel[index1] * fraction;
rightResampled[i] = this.rightChannel[index0] * (1 - fraction) + this.rightChannel[index1] * fraction;
}
return [leftResampled, rightResampled];
}
randomParams(): SampleParams {
return { loaded: this.leftChannel !== null && this.rightChannel !== null };
}
mutateParams(params: SampleParams): SampleParams {
return params;
}
}

View File

@ -2,9 +2,13 @@
// The duration parameter should be used to scale time-based parameters (envelopes, LFOs, etc.)
// Time-based parameters should be stored as ratios (0-1) and scaled by duration during generation
// Engines must generate stereo output: [leftChannel, rightChannel]
export type EngineType = 'generative' | 'sample' | 'input';
export interface SynthEngine<T = any> {
getName(): string;
getDescription(): string;
getType(): EngineType;
generate(params: T, sampleRate: number, duration: number): [Float32Array, Float32Array];
randomParams(): T;
mutateParams(params: T, mutationAmount?: number): T;

View File

@ -34,6 +34,10 @@ export class ZzfxEngine implements SynthEngine<ZzfxParams> {
return 'Retro 8-bit sound effects generator with pitch bending, noise, and bit crushing';
}
getType() {
return 'generative' as const;
}
generate(params: ZzfxParams, sampleRate: number, duration: number): [Float32Array, Float32Array] {
// ZZFX uses 44100 sample rate internally
const zzfxSampleRate = 44100;

View File

@ -5,8 +5,12 @@ import { Benjolin } from './Benjolin';
import { ZzfxEngine } from './ZzfxEngine';
import { NoiseDrum } from './NoiseDrum';
import { Ring } from './Ring';
import { Sample } from './Sample';
import { Input } from './Input';
export const engines: SynthEngine[] = [
new Sample(),
new Input(),
new FourOpFM(),
new DubSiren(),
new Benjolin(),

View File

@ -0,0 +1,29 @@
import type { AudioProcessor } from "./AudioProcessor";
export class BitCrusher implements AudioProcessor {
getName(): string {
return "Bit Crusher";
}
getDescription(): string {
return "Reduces bit depth for lo-fi digital distortion";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const bitDepth = Math.floor(Math.random() * 6) + 3;
const levels = Math.pow(2, bitDepth);
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
newLeft[i] = Math.floor(leftChannel[i] * levels) / levels;
newRight[i] = Math.floor(rightChannel[i] * levels) / levels;
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,68 @@
import type { AudioProcessor } from "./AudioProcessor";
export class Compressor implements AudioProcessor {
getName(): string {
return "Compressor";
}
getDescription(): string {
return "Reduces dynamic range by taming peaks with makeup gain";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const sampleRate = 44100;
const thresholdDb = -12 - Math.random() * 6;
const threshold = Math.pow(10, thresholdDb / 20);
const ratio = 4 + Math.random() * 4;
const attackMs = 1 + Math.random() * 4;
const releaseMs = 50 + Math.random() * 100;
const attackCoeff = Math.exp(-1 / (sampleRate * (attackMs / 1000)));
const releaseCoeff = Math.exp(-1 / (sampleRate * (releaseMs / 1000)));
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
let envelopeL = 0;
let envelopeR = 0;
for (let i = 0; i < leftChannel.length; i++) {
const inputL = Math.abs(leftChannel[i]);
const inputR = Math.abs(rightChannel[i]);
envelopeL = inputL > envelopeL
? attackCoeff * envelopeL + (1 - attackCoeff) * inputL
: releaseCoeff * envelopeL + (1 - releaseCoeff) * inputL;
envelopeR = inputR > envelopeR
? attackCoeff * envelopeR + (1 - attackCoeff) * inputR
: releaseCoeff * envelopeR + (1 - releaseCoeff) * inputR;
const peakEnvelope = Math.max(envelopeL, envelopeR);
let gainReduction = 1;
if (peakEnvelope > threshold) {
const overThresholdDb = 20 * Math.log10(peakEnvelope / threshold);
const compressedDb = overThresholdDb / ratio;
const reductionDb = overThresholdDb - compressedDb;
gainReduction = Math.pow(10, -reductionDb / 20);
}
newLeft[i] = leftChannel[i] * gainReduction;
newRight[i] = rightChannel[i] * gainReduction;
}
const makeupGainDb = Math.abs(thresholdDb) * 0.5;
const makeupGain = Math.pow(10, makeupGainDb / 20);
for (let i = 0; i < newLeft.length; i++) {
newLeft[i] *= makeupGain;
newRight[i] *= makeupGain;
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,39 @@
import type { AudioProcessor } from "./AudioProcessor";
export class HaasEffect implements AudioProcessor {
getName(): string {
return "Haas Effect";
}
getDescription(): string {
return "Creates stereo width with micro-delay (precedence effect)";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const sampleRate = 44100;
const delayMs = 5 + Math.random() * 25;
const delaySamples = Math.floor((delayMs / 1000) * sampleRate);
const attenuationDb = -1 - Math.random() * 2;
const attenuation = Math.pow(10, attenuationDb / 20);
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
newLeft[i] = leftChannel[i];
}
for (let i = 0; i < rightChannel.length; i++) {
if (i >= delaySamples) {
newRight[i] = rightChannel[i - delaySamples] * attenuation;
} else {
newRight[i] = 0;
}
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,39 @@
import type { AudioProcessor } from "./AudioProcessor";
export class Normalize implements AudioProcessor {
getName(): string {
return "Normalize";
}
getDescription(): string {
return "Normalizes audio to maximum amplitude without clipping";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
let maxAmplitude = 0;
for (let i = 0; i < leftChannel.length; i++) {
maxAmplitude = Math.max(maxAmplitude, Math.abs(leftChannel[i]));
maxAmplitude = Math.max(maxAmplitude, Math.abs(rightChannel[i]));
}
if (maxAmplitude === 0) {
return [leftChannel, rightChannel];
}
const gain = 1.0 / maxAmplitude;
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
newLeft[i] = leftChannel[i] * gain;
newRight[i] = rightChannel[i] * gain;
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,48 @@
import type { AudioProcessor } from "./AudioProcessor";
export class OctaveDown implements AudioProcessor {
getName(): string {
return "Octave Down";
}
getDescription(): string {
return "Shifts pitch down one octave by halving playback rate";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const inputLength = leftChannel.length;
const outputLength = inputLength * 2;
const newLeft = new Float32Array(outputLength);
const newRight = new Float32Array(outputLength);
for (let i = 0; i < outputLength; i++) {
const sourceIndex = i / 2;
const lowerIndex = Math.floor(sourceIndex);
const upperIndex = Math.min(lowerIndex + 1, inputLength - 1);
const fraction = sourceIndex - lowerIndex;
newLeft[i] = leftChannel[lowerIndex] * (1 - fraction) + leftChannel[upperIndex] * fraction;
newRight[i] = rightChannel[lowerIndex] * (1 - fraction) + rightChannel[upperIndex] * fraction;
}
this.applyFadeOut(newLeft, newRight);
return [newLeft, newRight];
}
private applyFadeOut(left: Float32Array, right: Float32Array): void {
const fadeLength = Math.min(Math.floor(left.length * 0.05), 2205);
const fadeStart = left.length - fadeLength;
for (let i = 0; i < fadeLength; i++) {
const gain = 1 - (i / fadeLength);
const index = fadeStart + i;
left[index] *= gain;
right[index] *= gain;
}
}
}

View File

@ -0,0 +1,48 @@
import type { AudioProcessor } from "./AudioProcessor";
export class OctaveUp implements AudioProcessor {
getName(): string {
return "Octave Up";
}
getDescription(): string {
return "Shifts pitch up one octave by doubling playback rate";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const inputLength = leftChannel.length;
const outputLength = Math.floor(inputLength / 2);
const newLeft = new Float32Array(outputLength);
const newRight = new Float32Array(outputLength);
for (let i = 0; i < outputLength; i++) {
const sourceIndex = i * 2;
const lowerIndex = Math.floor(sourceIndex);
const upperIndex = Math.min(lowerIndex + 1, inputLength - 1);
const fraction = sourceIndex - lowerIndex;
newLeft[i] = leftChannel[lowerIndex] * (1 - fraction) + leftChannel[upperIndex] * fraction;
newRight[i] = rightChannel[lowerIndex] * (1 - fraction) + rightChannel[upperIndex] * fraction;
}
this.applyFadeOut(newLeft, newRight);
return [newLeft, newRight];
}
private applyFadeOut(left: Float32Array, right: Float32Array): void {
const fadeLength = Math.min(Math.floor(left.length * 0.05), 2205);
const fadeStart = left.length - fadeLength;
for (let i = 0; i < fadeLength; i++) {
const gain = 1 - (i / fadeLength);
const index = fadeStart + i;
left[index] *= gain;
right[index] *= gain;
}
}
}

View File

@ -0,0 +1,31 @@
import type { AudioProcessor } from "./AudioProcessor";
export class PhaseInverter implements AudioProcessor {
getName(): string {
return "Phase Inverter";
}
getDescription(): string {
return "Inverts polarity of one or both channels";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const mode = Math.floor(Math.random() * 3);
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
const invertLeft = mode === 0 || mode === 2;
const invertRight = mode === 1 || mode === 2;
for (let i = 0; i < leftChannel.length; i++) {
newLeft[i] = invertLeft ? -leftChannel[i] : leftChannel[i];
newRight[i] = invertRight ? -rightChannel[i] : rightChannel[i];
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,141 @@
import type { AudioProcessor } from './AudioProcessor';
export class Phaser implements AudioProcessor {
private readonly sampleRate = 44100;
getName(): string {
return 'Phaser';
}
getDescription(): string {
return 'Classic phaser effect with sweeping all-pass filters for swirling, spacey sounds';
}
process(
leftChannel: Float32Array,
rightChannel: Float32Array
): [Float32Array, Float32Array] {
const length = leftChannel.length;
const numStages = Math.floor(Math.random() * 4) + 4; // 4-7 all-pass filter stages
const lfoRate = Math.random() * 2.5 + 0.2; // 0.2-2.7 Hz LFO rate (pushed higher sometimes)
const lfoDepth = Math.random() * 0.4 + 0.5; // 50-90% modulation depth
const feedback = Math.random() * 0.7 + 0.2; // 20-90% feedback (increased range)
const mix = Math.random() * 0.4 + 0.4; // 40-80% wet
const minFreq = Math.random() * 200 + 200; // 200-400 Hz minimum frequency
const maxFreq = Math.random() * 1500 + 1500; // 1500-3000 Hz maximum frequency
const stereoPhaseOffset = Math.random() * Math.PI; // Random stereo offset
const leftOut = new Float32Array(length);
const rightOut = new Float32Array(length);
const leftStates = this.createFilterStates(numStages);
const rightStates = this.createFilterStates(numStages);
let leftFeedback = 0;
let rightFeedback = 0;
for (let i = 0; i < length; i++) {
const t = i / this.sampleRate;
const lfoLeft = Math.sin(2 * Math.PI * lfoRate * t);
const lfoRight = Math.sin(2 * Math.PI * lfoRate * t + stereoPhaseOffset);
const freqLeft = this.mapLfoToFreq(lfoLeft, minFreq, maxFreq, lfoDepth);
const freqRight = this.mapLfoToFreq(lfoRight, minFreq, maxFreq, lfoDepth);
let leftProcessed = leftChannel[i] + leftFeedback * feedback;
let rightProcessed = rightChannel[i] + rightFeedback * feedback;
for (let stage = 0; stage < numStages; stage++) {
leftProcessed = this.allPassFilter(
leftProcessed,
freqLeft,
leftStates[stage]
);
rightProcessed = this.allPassFilter(
rightProcessed,
freqRight,
rightStates[stage]
);
}
leftFeedback = leftProcessed;
rightFeedback = rightProcessed;
const dryGain = Math.sqrt(1 - mix);
const wetGain = Math.sqrt(mix);
leftOut[i] = leftChannel[i] * dryGain + leftProcessed * wetGain;
rightOut[i] = rightChannel[i] * dryGain + rightProcessed * wetGain;
leftOut[i] = this.softClip(leftOut[i]);
rightOut[i] = this.softClip(rightOut[i]);
}
this.normalizeOutput(leftOut, rightOut);
return [leftOut, rightOut];
}
private createFilterStates(numStages: number): Array<{ x1: number; y1: number }> {
const states = [];
for (let i = 0; i < numStages; i++) {
states.push({ x1: 0, y1: 0 });
}
return states;
}
private allPassFilter(
input: number,
frequency: number,
state: { x1: number; y1: number }
): number {
const c = (Math.tan(Math.PI * frequency / this.sampleRate) - 1) /
(Math.tan(Math.PI * frequency / this.sampleRate) + 1);
const output = c * input + state.x1 - c * state.y1;
state.x1 = input;
state.y1 = output;
return output;
}
private mapLfoToFreq(
lfo: number,
minFreq: number,
maxFreq: number,
depth: number
): number {
const normalizedLfo = (lfo + 1) * 0.5;
const modulatedLfo = normalizedLfo * depth + (1 - depth) * 0.5;
return minFreq + (maxFreq - minFreq) * modulatedLfo;
}
private softClip(sample: number): number {
const threshold = 0.95;
if (Math.abs(sample) < threshold) {
return sample;
}
const sign = sample < 0 ? -1 : 1;
const abs = Math.abs(sample);
return sign * (threshold + (1 - threshold) * Math.tanh((abs - threshold) / (1 - threshold)));
}
private normalizeOutput(leftOut: Float32Array, rightOut: Float32Array): void {
let maxPeak = 0;
for (let i = 0; i < leftOut.length; i++) {
maxPeak = Math.max(maxPeak, Math.abs(leftOut[i]), Math.abs(rightOut[i]));
}
if (maxPeak > 0.01) {
const targetPeak = 0.95;
const normalizeGain = Math.min(1.0, targetPeak / maxPeak);
for (let i = 0; i < leftOut.length; i++) {
leftOut[i] *= normalizeGain;
rightOut[i] *= normalizeGain;
}
}
}
}

View File

@ -0,0 +1,43 @@
import type { AudioProcessor } from "./AudioProcessor";
export class RingModulator implements AudioProcessor {
getName(): string {
return "Ring Modulator";
}
getDescription(): string {
return "Frequency modulation for metallic, bell-like tones";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const sampleRate = 44100;
const carrierFreq = 20 + Math.random() * 400;
const mix = 0.5 + Math.random() * 0.5;
const freqModDepth = Math.random() * 50;
const freqModRate = 0.1 + Math.random() * 2;
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
const t = i / sampleRate;
const modulation = Math.sin(2 * Math.PI * freqModRate * t);
const modulatedFreq = carrierFreq + (modulation * freqModDepth);
const carrier = Math.sin(2 * Math.PI * modulatedFreq * t);
const modulatedL = leftChannel[i] * carrier;
const modulatedR = rightChannel[i] * carrier;
newLeft[i] = leftChannel[i] * (1 - mix) + modulatedL * mix;
newRight[i] = rightChannel[i] * (1 - mix) + modulatedR * mix;
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,31 @@
import type { AudioProcessor } from "./AudioProcessor";
export class StereoWidener implements AudioProcessor {
getName(): string {
return "Stereo Widener";
}
getDescription(): string {
return "Expands stereo field using mid-side processing";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const widthFactor = 1.5 + Math.random() * 1.5;
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
const mid = (leftChannel[i] + rightChannel[i]) * 0.5;
const side = (leftChannel[i] - rightChannel[i]) * 0.5 * widthFactor;
newLeft[i] = mid + side;
newRight[i] = mid - side;
}
return [newLeft, newRight];
}
}

View File

@ -0,0 +1,57 @@
import type { AudioProcessor } from "./AudioProcessor";
export class Waveshaper implements AudioProcessor {
getName(): string {
return "Waveshaper";
}
getDescription(): string {
return "Transfer function distortion with various curve shapes";
}
async process(
leftChannel: Float32Array,
rightChannel: Float32Array
): Promise<[Float32Array, Float32Array]> {
const shapeType = Math.floor(Math.random() * 4);
const drive = 1 + Math.random() * 4;
const mix = 0.5 + Math.random() * 0.5;
const newLeft = new Float32Array(leftChannel.length);
const newRight = new Float32Array(rightChannel.length);
for (let i = 0; i < leftChannel.length; i++) {
const wetL = this.applyShape(leftChannel[i] * drive, shapeType);
const wetR = this.applyShape(rightChannel[i] * drive, shapeType);
newLeft[i] = leftChannel[i] * (1 - mix) + wetL * mix;
newRight[i] = rightChannel[i] * (1 - mix) + wetR * mix;
}
return [newLeft, newRight];
}
private applyShape(x: number, shapeType: number): number {
switch (shapeType) {
case 0:
return Math.tanh(x);
case 1:
return x / (1 + Math.abs(x));
case 2: {
const clipped = Math.max(-1, Math.min(1, x));
return clipped - (clipped * clipped * clipped) / 3;
}
case 3: {
if (x > 1) return 1;
if (x < -1) return -1;
return 1.5 * x - 0.5 * x * x * x;
}
default:
return x;
}
}
}

View File

@ -11,6 +11,17 @@ import { MicroPitch } from './MicroPitch';
import { SpectralBlur } from './SpectralBlur';
import { SpectralShift } from './SpectralShift';
import { ConvolutionReverb } from './ConvolutionReverb';
import { Phaser } from './Phaser';
import { OctaveDown } from './OctaveDown';
import { OctaveUp } from './OctaveUp';
import { Normalize } from './Normalize';
import { BitCrusher } from './BitCrusher';
import { StereoWidener } from './StereoWidener';
import { HaasEffect } from './HaasEffect';
import { PhaseInverter } from './PhaseInverter';
import { Compressor } from './Compressor';
import { RingModulator } from './RingModulator';
import { Waveshaper } from './Waveshaper';
const processors: AudioProcessor[] = [
new SegmentShuffler(),
@ -25,6 +36,17 @@ const processors: AudioProcessor[] = [
new SpectralBlur(),
new SpectralShift(),
new ConvolutionReverb(),
new Phaser(),
new OctaveDown(),
new OctaveUp(),
new Normalize(),
new BitCrusher(),
new StereoWidener(),
new HaasEffect(),
new PhaseInverter(),
new Compressor(),
new RingModulator(),
new Waveshaper(),
];
export function getRandomProcessor(): AudioProcessor {