progress on mic calibration, although still busted
This commit is contained in:
parent
38f92736d1
commit
62b14b5f17
4 changed files with 67 additions and 9 deletions
|
@ -4,6 +4,7 @@ import { useSelector, useDispatch } from "react-redux";
|
||||||
import { RootState } from "../rootReducer";
|
import { RootState } from "../rootReducer";
|
||||||
|
|
||||||
import * as MixerState from "./state";
|
import * as MixerState from "./state";
|
||||||
|
import { VUMeter } from "./VUMeter";
|
||||||
|
|
||||||
export function MicCalibrationModal() {
|
export function MicCalibrationModal() {
|
||||||
const state = useSelector(
|
const state = useSelector(
|
||||||
|
@ -18,7 +19,17 @@ export function MicCalibrationModal() {
|
||||||
{state !== null && (
|
{state !== null && (
|
||||||
<>
|
<>
|
||||||
<h3>Peak: {state.peak}</h3>
|
<h3>Peak: {state.peak}</h3>
|
||||||
<h3>Loudness: {state.loudness}</h3>
|
<b>
|
||||||
|
Speak into the microphone at a normal volume. Adjust the
|
||||||
|
gain slider until the bar below is green when you're speaking.
|
||||||
|
</b>
|
||||||
|
<VUMeter
|
||||||
|
width={400}
|
||||||
|
height={40}
|
||||||
|
value={state.peak}
|
||||||
|
range={[-70, 0]}
|
||||||
|
greenRange={[-3.5, -1.5]}
|
||||||
|
/>
|
||||||
<button
|
<button
|
||||||
onClick={() =>
|
onClick={() =>
|
||||||
dispatch(MixerState.stopMicCalibration())
|
dispatch(MixerState.stopMicCalibration())
|
||||||
|
|
47
src/mixer/VUMeter.tsx
Normal file
47
src/mixer/VUMeter.tsx
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
import React, { useRef, useLayoutEffect, useEffect, HTMLProps } from "react";
|
||||||
|
|
||||||
|
interface VUMeterProps extends HTMLProps<HTMLCanvasElement> {
|
||||||
|
value: number;
|
||||||
|
range: [number, number];
|
||||||
|
greenRange: [number, number];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function VUMeter(props: VUMeterProps) {
|
||||||
|
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
||||||
|
const ctxRef = useRef<CanvasRenderingContext2D | null>(null);
|
||||||
|
|
||||||
|
useLayoutEffect(() => {
|
||||||
|
if (canvasRef.current) {
|
||||||
|
ctxRef.current = canvasRef.current.getContext("2d");
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!canvasRef.current || !ctxRef.current) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const valueRange = props.range[1] - props.range[0];
|
||||||
|
const width = canvasRef.current.width;
|
||||||
|
const height = canvasRef.current.height;
|
||||||
|
|
||||||
|
const ctx = ctxRef.current;
|
||||||
|
ctx.fillStyle = "#000";
|
||||||
|
ctx.fillRect(0, 0, width, height);
|
||||||
|
|
||||||
|
if (props.value >= props.greenRange[0] && props.value <= props.greenRange[1]) {
|
||||||
|
ctx.fillStyle = "#00ff00";
|
||||||
|
} else {
|
||||||
|
ctx.fillStyle = "#e8d120";
|
||||||
|
}
|
||||||
|
|
||||||
|
const valueOffset = (props.value - props.range[0]) / (props.range[1] - props.range[0])
|
||||||
|
|
||||||
|
ctx.fillRect(0, 0, valueOffset * width, height);
|
||||||
|
}, [props.value, props.range, props.greenRange]);
|
||||||
|
|
||||||
|
const { value, range, greenRange, ...rest } = props;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<canvas ref={canvasRef} {...rest} />
|
||||||
|
);
|
||||||
|
}
|
|
@ -13,7 +13,7 @@ declare const sampleRate: number;
|
||||||
|
|
||||||
type StereoModeEnum = "M3" | "M6" | "AB";
|
type StereoModeEnum = "M3" | "M6" | "AB";
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
class LoudnessProcessor extends AudioWorkletProcessor {
|
class DBFSPeakProcessor extends AudioWorkletProcessor {
|
||||||
process(
|
process(
|
||||||
inputs: Float32Array[][],
|
inputs: Float32Array[][],
|
||||||
outputs: Float32Array[][],
|
outputs: Float32Array[][],
|
||||||
|
@ -38,7 +38,7 @@ class LoudnessProcessor extends AudioWorkletProcessor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class PeakProcessor extends AudioWorkletProcessor {
|
class PPMPeakProcessor extends AudioWorkletProcessor {
|
||||||
intermediateValue: number[] = [0.0, 0.0];
|
intermediateValue: number[] = [0.0, 0.0];
|
||||||
lockonfract = (1.0 - LOCK_ON_FACTOR) ** (1.0 / (sampleRate * LOCK_ON_TIME));
|
lockonfract = (1.0 - LOCK_ON_FACTOR) ** (1.0 / (sampleRate * LOCK_ON_TIME));
|
||||||
drop = DROP_FACTOR ** (1.0 / (sampleRate / DROP_TIME));
|
drop = DROP_FACTOR ** (1.0 / (sampleRate / DROP_TIME));
|
||||||
|
@ -93,4 +93,4 @@ class PeakProcessor extends AudioWorkletProcessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
registerProcessor("loudness-processor", PeakProcessor);
|
registerProcessor("loudness-processor", DBFSPeakProcessor);
|
||||||
|
|
|
@ -34,7 +34,6 @@ finalCompressor.ratio.value = 20; //brickwall destination comressor
|
||||||
finalCompressor.threshold.value = -0.5;
|
finalCompressor.threshold.value = -0.5;
|
||||||
finalCompressor.attack.value = 0;
|
finalCompressor.attack.value = 0;
|
||||||
finalCompressor.release.value = 0.2;
|
finalCompressor.release.value = 0.2;
|
||||||
finalCompressor.connect(audioContext.destination);
|
|
||||||
|
|
||||||
export const destination = audioContext.createMediaStreamDestination();
|
export const destination = audioContext.createMediaStreamDestination();
|
||||||
console.log("final destination", destination);
|
console.log("final destination", destination);
|
||||||
|
@ -454,6 +453,7 @@ export const load = (
|
||||||
// THIS IS BAD
|
// THIS IS BAD
|
||||||
(wavesurfer as any).backend.gainNode.disconnect();
|
(wavesurfer as any).backend.gainNode.disconnect();
|
||||||
(wavesurfer as any).backend.gainNode.connect(finalCompressor);
|
(wavesurfer as any).backend.gainNode.connect(finalCompressor);
|
||||||
|
(wavesurfer as any).backend.gainNode.connect(audioContext.destination);
|
||||||
|
|
||||||
// Double-check we haven't been aborted since
|
// Double-check we haven't been aborted since
|
||||||
if (signal.aborted) {
|
if (signal.aborted) {
|
||||||
|
@ -653,12 +653,12 @@ export const openMicrophone = (micID:string): AppThunk => async (dispatch, getSt
|
||||||
micCompressor.threshold.value = -18;
|
micCompressor.threshold.value = -18;
|
||||||
micCompressor.attack.value = 0.01;
|
micCompressor.attack.value = 0.01;
|
||||||
micCompressor.release.value = 0.1;
|
micCompressor.release.value = 0.1;
|
||||||
// TODO: for testing we're connecting mic output to main out
|
|
||||||
// When streaming works we don't want to do this, because the latency is high enough to speech-jam
|
|
||||||
micSource
|
micSource
|
||||||
.connect(micGain)
|
.connect(micGain)
|
||||||
.connect(micCompressor)
|
.connect(micCompressor)
|
||||||
.connect(finalCompressor);
|
.connect(finalCompressor);
|
||||||
|
// TODO remove this
|
||||||
|
micCompressor.connect(audioContext.destination);
|
||||||
dispatch(mixerState.actions.micOpen(micID));
|
dispatch(mixerState.actions.micOpen(micID));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -674,7 +674,7 @@ export const setMicVolume = (
|
||||||
|
|
||||||
let cancelLoudnessMeasurement: (() => void) | null = null;
|
let cancelLoudnessMeasurement: (() => void) | null = null;
|
||||||
|
|
||||||
const CALIBRATE_THE_CALIBRATOR = true;
|
const CALIBRATE_THE_CALIBRATOR = false;
|
||||||
|
|
||||||
export const startMicCalibration = (): AppThunk => async (dispatch, getState) => {
|
export const startMicCalibration = (): AppThunk => async (dispatch, getState) => {
|
||||||
if (!getState().mixer.mic.open) {
|
if (!getState().mixer.mic.open) {
|
||||||
|
@ -689,7 +689,7 @@ export const startMicCalibration = (): AppThunk => async (dispatch, getState) =
|
||||||
sauce.load();
|
sauce.load();
|
||||||
input = audioContext.createMediaElementSource(sauce);
|
input = audioContext.createMediaElementSource(sauce);
|
||||||
} else {
|
} else {
|
||||||
input = micSource!;
|
input = micCompressor!;
|
||||||
}
|
}
|
||||||
cancelLoudnessMeasurement = await createLoudnessMeasurement(
|
cancelLoudnessMeasurement = await createLoudnessMeasurement(
|
||||||
input,
|
input,
|
||||||
|
|
Loading…
Reference in a new issue