最新
This commit is contained in:
parent
0b5d9b11fd
commit
408a1cd8d5
19
.vscode/c_cpp_properties.json
vendored
Normal file
19
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Mac",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**"
|
||||
],
|
||||
"defines": [],
|
||||
"macFrameworkPath": [
|
||||
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/Frameworks"
|
||||
],
|
||||
"compilerPath": "/usr/bin/clang",
|
||||
"cStandard": "c17",
|
||||
"cppStandard": "c++17",
|
||||
"intelliSenseMode": "macos-clang-arm64"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
||||
8
.vscode/settings.json
vendored
Normal file
8
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"files.associations": {
|
||||
"*.sdp": "xml",
|
||||
"*.json": "jsonc",
|
||||
"vector": "cpp",
|
||||
"type_traits": "cpp"
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1111,16 +1111,79 @@ function dbg(...args) {
|
||||
var _emscripten_memcpy_js = (dest, src, num) => HEAPU8.copyWithin(dest, src, src + num);
|
||||
|
||||
var getHeapMax = () =>
|
||||
HEAPU8.length;
|
||||
// Stay one Wasm page short of 4GB: while e.g. Chrome is able to allocate
|
||||
// full 4GB Wasm memories, the size will wrap back to 0 bytes in Wasm side
|
||||
// for any code that deals with heap sizes, which would require special
|
||||
// casing all heap size related code to treat 0 specially.
|
||||
2147483648;
|
||||
|
||||
var abortOnCannotGrowMemory = (requestedSize) => {
|
||||
abort(`Cannot enlarge memory arrays to size ${requestedSize} bytes (OOM). Either (1) compile with -sINITIAL_MEMORY=X with X higher than the current value ${HEAP8.length}, (2) compile with -sALLOW_MEMORY_GROWTH which allows increasing the size at runtime, or (3) if you want malloc to return NULL (0) instead of this abort, compile with -sABORTING_MALLOC=0`);
|
||||
var growMemory = (size) => {
|
||||
var b = wasmMemory.buffer;
|
||||
var pages = (size - b.byteLength + 65535) / 65536;
|
||||
try {
|
||||
// round size grow request up to wasm page size (fixed 64KB per spec)
|
||||
wasmMemory.grow(pages); // .grow() takes a delta compared to the previous size
|
||||
updateMemoryViews();
|
||||
return 1 /*success*/;
|
||||
} catch(e) {
|
||||
err(`growMemory: Attempted to grow heap from ${b.byteLength} bytes to ${size} bytes, but got error: ${e}`);
|
||||
}
|
||||
// implicit 0 return to save code size (caller will cast "undefined" into 0
|
||||
// anyhow)
|
||||
};
|
||||
var _emscripten_resize_heap = (requestedSize) => {
|
||||
var oldSize = HEAPU8.length;
|
||||
// With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
|
||||
requestedSize >>>= 0;
|
||||
abortOnCannotGrowMemory(requestedSize);
|
||||
// With multithreaded builds, races can happen (another thread might increase the size
|
||||
// in between), so return a failure, and let the caller retry.
|
||||
assert(requestedSize > oldSize);
|
||||
|
||||
// Memory resize rules:
|
||||
// 1. Always increase heap size to at least the requested size, rounded up
|
||||
// to next page multiple.
|
||||
// 2a. If MEMORY_GROWTH_LINEAR_STEP == -1, excessively resize the heap
|
||||
// geometrically: increase the heap size according to
|
||||
// MEMORY_GROWTH_GEOMETRIC_STEP factor (default +20%), At most
|
||||
// overreserve by MEMORY_GROWTH_GEOMETRIC_CAP bytes (default 96MB).
|
||||
// 2b. If MEMORY_GROWTH_LINEAR_STEP != -1, excessively resize the heap
|
||||
// linearly: increase the heap size by at least
|
||||
// MEMORY_GROWTH_LINEAR_STEP bytes.
|
||||
// 3. Max size for the heap is capped at 2048MB-WASM_PAGE_SIZE, or by
|
||||
// MAXIMUM_MEMORY, or by ASAN limit, depending on which is smallest
|
||||
// 4. If we were unable to allocate as much memory, it may be due to
|
||||
// over-eager decision to excessively reserve due to (3) above.
|
||||
// Hence if an allocation fails, cut down on the amount of excess
|
||||
// growth, in an attempt to succeed to perform a smaller allocation.
|
||||
|
||||
// A limit is set for how much we can grow. We should not exceed that
|
||||
// (the wasm binary specifies it, so if we tried, we'd fail anyhow).
|
||||
var maxHeapSize = getHeapMax();
|
||||
if (requestedSize > maxHeapSize) {
|
||||
err(`Cannot enlarge memory, requested ${requestedSize} bytes, but the limit is ${maxHeapSize} bytes!`);
|
||||
return false;
|
||||
}
|
||||
|
||||
var alignUp = (x, multiple) => x + (multiple - x % multiple) % multiple;
|
||||
|
||||
// Loop through potential heap size increases. If we attempt a too eager
|
||||
// reservation that fails, cut down on the attempted size and reserve a
|
||||
// smaller bump instead. (max 3 times, chosen somewhat arbitrarily)
|
||||
for (var cutDown = 1; cutDown <= 4; cutDown *= 2) {
|
||||
var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown); // ensure geometric growth
|
||||
// but limit overreserving (default to capping at +96MB overgrowth at most)
|
||||
overGrownHeapSize = Math.min(overGrownHeapSize, requestedSize + 100663296 );
|
||||
|
||||
var newSize = Math.min(maxHeapSize, alignUp(Math.max(requestedSize, overGrownHeapSize), 65536));
|
||||
|
||||
var replacement = growMemory(newSize);
|
||||
if (replacement) {
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
err(`Failed to grow the heap from ${oldSize} bytes to ${newSize} bytes, not enough memory!`);
|
||||
return false;
|
||||
};
|
||||
|
||||
var SYSCALLS = {
|
||||
@ -1406,7 +1469,6 @@ var missingLibrarySymbols = [
|
||||
'convertU32PairToI53',
|
||||
'zeroMemory',
|
||||
'exitJS',
|
||||
'growMemory',
|
||||
'isLeapYear',
|
||||
'ydayFromDate',
|
||||
'arraySum',
|
||||
@ -1601,7 +1663,7 @@ var unexportedSymbols = [
|
||||
'convertI32PairToI53Checked',
|
||||
'ptrToString',
|
||||
'getHeapMax',
|
||||
'abortOnCannotGrowMemory',
|
||||
'growMemory',
|
||||
'ENV',
|
||||
'MONTH_DAYS_REGULAR',
|
||||
'MONTH_DAYS_LEAP',
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
BIN
test/image.bin
Normal file
BIN
test/image.bin
Normal file
Binary file not shown.
BIN
test/image.bmp
Normal file
BIN
test/image.bmp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 14 MiB |
BIN
test/image.jpg
Normal file
BIN
test/image.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 779 KiB |
@ -45,7 +45,7 @@
|
||||
return btoa(binary); // 使用 JavaScript 的 btoa() 函数
|
||||
}
|
||||
|
||||
var fileurl = './image.png'
|
||||
var fileurl = './image.bin'
|
||||
// 初始化 WebAssembly 模块
|
||||
var Module = {
|
||||
onRuntimeInitialized: function () {
|
||||
|
||||
18
testCOut/a.out.js
Normal file
18
testCOut/a.out.js
Normal file
File diff suppressed because one or more lines are too long
79
testCOut/main.cpp
Normal file
79
testCOut/main.cpp
Normal file
@ -0,0 +1,79 @@
|
||||
|
||||
|
||||
#include "./signalsmith-stretch.h"
|
||||
#include <vector>
|
||||
|
||||
#include <emscripten.h>
|
||||
int main() {}
|
||||
|
||||
using Sample = float;
|
||||
using Stretch = signalsmith::stretch::SignalsmithStretch<Sample>;
|
||||
Stretch stretch;
|
||||
|
||||
// Allocates memory for buffers, and returns it
|
||||
std::vector<Sample> buffers;
|
||||
std::vector<Sample *> buffersIn, buffersOut;
|
||||
|
||||
extern "C" {
|
||||
Sample * EMSCRIPTEN_KEEPALIVE setBuffers(int channels, int length) {
|
||||
buffers.resize(length*channels*2);
|
||||
Sample *data = buffers.data();
|
||||
buffersIn.resize(0);
|
||||
buffersOut.resize(0);
|
||||
for (int c = 0; c < channels; ++c) {
|
||||
buffersIn.push_back(data + length*c);
|
||||
buffersOut.push_back(data + length*(c + channels));
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
int EMSCRIPTEN_KEEPALIVE blockSamples() {
|
||||
return stretch.blockSamples();
|
||||
}
|
||||
int EMSCRIPTEN_KEEPALIVE intervalSamples() {
|
||||
return stretch.intervalSamples();
|
||||
}
|
||||
int EMSCRIPTEN_KEEPALIVE inputLatency() {
|
||||
return stretch.inputLatency();
|
||||
}
|
||||
int EMSCRIPTEN_KEEPALIVE outputLatency() {
|
||||
return stretch.outputLatency();
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE reset() {
|
||||
stretch.reset();
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE presetDefault(int nChannels, Sample sampleRate) {
|
||||
stretch.presetDefault(nChannels, sampleRate);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE presetCheaper(int nChannels, Sample sampleRate) {
|
||||
stretch.presetCheaper(nChannels, sampleRate);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE configure(int nChannels, int blockSamples, int intervalSamples, bool splitComputation) {
|
||||
stretch.configure(nChannels, blockSamples, intervalSamples, splitComputation);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE setTransposeFactor(Sample multiplier, Sample tonalityLimit) {
|
||||
stretch.setTransposeFactor(multiplier, tonalityLimit);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE setTransposeSemitones(Sample semitones, Sample tonalityLimit) {
|
||||
stretch.setTransposeSemitones(semitones, tonalityLimit);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE setFormantFactor(Sample multiplier, bool compensate) {
|
||||
stretch.setFormantFactor(multiplier, compensate);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE setFormantSemitones(Sample semitones, bool compensate) {
|
||||
stretch.setFormantSemitones(semitones, compensate);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE setFormantBase(Sample freq) {
|
||||
stretch.setFormantBase(freq);
|
||||
}
|
||||
// We can't do setFreqMap()
|
||||
void EMSCRIPTEN_KEEPALIVE seek(int inputSamples, double playbackRate) {
|
||||
stretch.seek(buffersIn, inputSamples, playbackRate);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE process(int inputSamples, int outputSamples) {
|
||||
stretch.process(buffersIn, inputSamples, buffersOut, outputSamples);
|
||||
}
|
||||
void EMSCRIPTEN_KEEPALIVE flush(int outputSamples) {
|
||||
stretch.flush(buffersOut, outputSamples);
|
||||
}
|
||||
}
|
||||
10
testCOut/pre.js
Normal file
10
testCOut/pre.js
Normal file
@ -0,0 +1,10 @@
|
||||
// Adapted from the Emscripten error message when initialising std::random_device
|
||||
var crypto = globalThis?.crypto || {
|
||||
getRandomValues: array => {
|
||||
// Cryptographically insecure, but fine for audio
|
||||
for (var i = 0; i < array.length; i++) array[i] = (Math.random()*256)|0;
|
||||
}
|
||||
};
|
||||
var performance = globalThis?.performance || {
|
||||
now: _ => Date.now()
|
||||
};
|
||||
1380
testCOut/signalsmith-linear/fft.h
Normal file
1380
testCOut/signalsmith-linear/fft.h
Normal file
File diff suppressed because it is too large
Load Diff
631
testCOut/signalsmith-linear/stft.h
Normal file
631
testCOut/signalsmith-linear/stft.h
Normal file
@ -0,0 +1,631 @@
|
||||
#ifndef SIGNALSMITH_AUDIO_LINEAR_STFT_H
|
||||
#define SIGNALSMITH_AUDIO_LINEAR_STFT_H
|
||||
|
||||
#include "./fft.h"
|
||||
|
||||
namespace signalsmith { namespace linear {
|
||||
|
||||
enum {
|
||||
STFT_SPECTRUM_PACKED=0,
|
||||
STFT_SPECTRUM_MODIFIED=1,
|
||||
STFT_SPECTRUM_UNPACKED=2,
|
||||
};
|
||||
|
||||
/// A self-normalising STFT, with variable position/window for output blocks
|
||||
template<typename Sample, bool splitComputation=false, int spectrumType=STFT_SPECTRUM_PACKED>
|
||||
struct DynamicSTFT {
|
||||
static constexpr bool modified = (spectrumType == STFT_SPECTRUM_MODIFIED);
|
||||
static constexpr bool unpacked = (spectrumType == STFT_SPECTRUM_UNPACKED);
|
||||
RealFFT<Sample, splitComputation, modified> fft;
|
||||
|
||||
using Complex = std::complex<Sample>;
|
||||
|
||||
enum class WindowShape {ignore, acg, kaiser};
|
||||
static constexpr WindowShape acg = WindowShape::acg;
|
||||
static constexpr WindowShape kaiser = WindowShape::kaiser;
|
||||
|
||||
void configure(size_t inChannels, size_t outChannels, size_t blockSamples, size_t extraInputHistory=0, size_t intervalSamples=0, Sample asymmetry=0) {
|
||||
_analysisChannels = inChannels;
|
||||
_synthesisChannels = outChannels;
|
||||
_blockSamples = blockSamples;
|
||||
_fftSamples = fft.fastSizeAbove((blockSamples + 1)/2)*2;
|
||||
fft.resize(_fftSamples);
|
||||
_fftBins = _fftSamples/2 + (spectrumType == STFT_SPECTRUM_UNPACKED);
|
||||
|
||||
_inputLengthSamples = _blockSamples + extraInputHistory;
|
||||
input.buffer.resize(_inputLengthSamples*_analysisChannels);
|
||||
|
||||
output.buffer.resize(_blockSamples*_synthesisChannels);
|
||||
output.windowProducts.resize(_blockSamples);
|
||||
spectrumBuffer.resize(_fftBins*std::max(_analysisChannels, _synthesisChannels));
|
||||
timeBuffer.resize(_fftSamples);
|
||||
|
||||
_analysisWindow.resize(_blockSamples);
|
||||
_synthesisWindow.resize(_blockSamples);
|
||||
setInterval(intervalSamples ? intervalSamples : blockSamples/4, acg, asymmetry);
|
||||
|
||||
reset();
|
||||
}
|
||||
|
||||
size_t blockSamples() const {
|
||||
return _blockSamples;
|
||||
}
|
||||
size_t fftSamples() const {
|
||||
return _fftSamples;
|
||||
}
|
||||
size_t defaultInterval() const {
|
||||
return _defaultInterval;
|
||||
}
|
||||
size_t bands() const {
|
||||
return _fftBins;
|
||||
}
|
||||
size_t analysisLatency() const {
|
||||
return _blockSamples - _analysisOffset;
|
||||
}
|
||||
size_t synthesisLatency() const {
|
||||
return _synthesisOffset;
|
||||
}
|
||||
size_t latency() const {
|
||||
return synthesisLatency() + analysisLatency();
|
||||
}
|
||||
Sample binToFreq(Sample b) const {
|
||||
return (modified ? b + Sample(0.5) : b)/_fftSamples;
|
||||
}
|
||||
Sample freqToBin(Sample f) const {
|
||||
return modified ? f*_fftSamples - Sample(0.5) : f*_fftSamples;
|
||||
}
|
||||
|
||||
void reset(Sample productWeight=1) {
|
||||
input.pos = _blockSamples;
|
||||
output.pos = 0;
|
||||
for (auto &v : input.buffer) v = 0;
|
||||
for (auto &v : output.buffer) v = 0;
|
||||
for (auto &v : spectrumBuffer) v = 0;
|
||||
for (auto &v : output.windowProducts) v = 0;
|
||||
addWindowProduct();
|
||||
for (int i = int(_blockSamples) - int(_defaultInterval) - 1; i >= 0; --i) {
|
||||
output.windowProducts[i] += output.windowProducts[i + _defaultInterval];
|
||||
}
|
||||
for (auto &v : output.windowProducts) v = v*productWeight + almostZero;
|
||||
moveOutput(_defaultInterval); // ready for first block immediately
|
||||
}
|
||||
|
||||
void writeInput(size_t channel, size_t offset, size_t length, const Sample *inputArray) {
|
||||
Sample *buffer = input.buffer.data() + channel*_inputLengthSamples;
|
||||
|
||||
size_t offsetPos = (input.pos + offset)%_inputLengthSamples;
|
||||
size_t inputWrapIndex = _inputLengthSamples - offsetPos;
|
||||
size_t chunk1 = std::min(length, inputWrapIndex);
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = offsetPos + i;
|
||||
buffer[i2] = inputArray[i];
|
||||
}
|
||||
for (size_t i = chunk1; i < length; ++i) {
|
||||
size_t i2 = i + offsetPos -_inputLengthSamples;
|
||||
buffer[i2] = inputArray[i];
|
||||
}
|
||||
}
|
||||
void writeInput(size_t channel, size_t length, const Sample *inputArray) {
|
||||
writeInput(channel, 0, length, inputArray);
|
||||
}
|
||||
void moveInput(size_t samples, bool clearInput=false) {
|
||||
if (clearInput) {
|
||||
size_t inputWrapIndex = _inputLengthSamples - input.pos;
|
||||
size_t chunk1 = std::min(samples, inputWrapIndex);
|
||||
for (size_t c = 0; c < _analysisChannels; ++c) {
|
||||
Sample *buffer = input.buffer.data() + c*_inputLengthSamples;
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = input.pos + i;
|
||||
buffer[i2] = 0;
|
||||
}
|
||||
for (size_t i = chunk1; i < samples; ++i) {
|
||||
size_t i2 = i + input.pos - _inputLengthSamples;
|
||||
buffer[i2] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
input.pos = (input.pos + samples)%_inputLengthSamples;
|
||||
_samplesSinceAnalysis += samples;
|
||||
}
|
||||
size_t samplesSinceAnalysis() const {
|
||||
return _samplesSinceAnalysis;
|
||||
}
|
||||
|
||||
/// When no more synthesis is expected, let output taper away to 0 based on windowing. Otherwise, the output will be scaled as if there's just a very long block interval, which can exaggerate artefacts and numerical errors. You still can't read more than `blockSamples()` into the future.
|
||||
void finishOutput(Sample strength=1, size_t offset=0) {
|
||||
Sample maxWindowProduct = 0;
|
||||
|
||||
size_t chunk1 = std::max(offset, std::min(_blockSamples, _blockSamples - output.pos));
|
||||
|
||||
for (size_t i = offset; i < chunk1; ++i) {
|
||||
size_t i2 = output.pos + i;
|
||||
Sample &wp = output.windowProducts[i2];
|
||||
maxWindowProduct = std::max(wp, maxWindowProduct);
|
||||
wp += (maxWindowProduct - wp)*strength;
|
||||
}
|
||||
for (size_t i = chunk1; i < _blockSamples; ++i) {
|
||||
size_t i2 = i + output.pos - _blockSamples;
|
||||
Sample &wp = output.windowProducts[i2];
|
||||
maxWindowProduct = std::max(wp, maxWindowProduct);
|
||||
wp += (maxWindowProduct - wp)*strength;
|
||||
}
|
||||
}
|
||||
|
||||
void readOutput(size_t channel, size_t offset, size_t length, Sample *outputArray) {
|
||||
Sample *buffer = output.buffer.data() + channel*_blockSamples;
|
||||
size_t offsetPos = (output.pos + offset)%_blockSamples;
|
||||
size_t outputWrapIndex = _blockSamples - offsetPos;
|
||||
size_t chunk1 = std::min(length, outputWrapIndex);
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = offsetPos + i;
|
||||
outputArray[i] = buffer[i2]/output.windowProducts[i2];
|
||||
}
|
||||
for (size_t i = chunk1; i < length; ++i) {
|
||||
size_t i2 = i + offsetPos - _blockSamples;
|
||||
outputArray[i] = buffer[i2]/output.windowProducts[i2];
|
||||
}
|
||||
}
|
||||
void readOutput(size_t channel, size_t length, Sample *outputArray) {
|
||||
return readOutput(channel, 0, length, outputArray);
|
||||
}
|
||||
void addOutput(size_t channel, size_t offset, size_t length, const Sample *newOutputArray) {
|
||||
length = std::min(_blockSamples, length);
|
||||
Sample *buffer = output.buffer.data() + channel*_blockSamples;
|
||||
size_t offsetPos = (output.pos + offset)%_blockSamples;
|
||||
size_t outputWrapIndex = _blockSamples - offsetPos;
|
||||
size_t chunk1 = std::min(length, outputWrapIndex);
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = offsetPos + i;
|
||||
buffer[i2] += newOutputArray[i]*output.windowProducts[i2];
|
||||
}
|
||||
for (size_t i = chunk1; i < length; ++i) {
|
||||
size_t i2 = i + offsetPos - _blockSamples;
|
||||
buffer[i2] += newOutputArray[i]*output.windowProducts[i2];
|
||||
}
|
||||
}
|
||||
void addOutput(size_t channel, size_t length, const Sample *newOutputArray) {
|
||||
return addOutput(channel, 0, length, newOutputArray);
|
||||
}
|
||||
void replaceOutput(size_t channel, size_t offset, size_t length, const Sample *newOutputArray) {
|
||||
length = std::min(_blockSamples, length);
|
||||
Sample *buffer = output.buffer.data() + channel*_blockSamples;
|
||||
size_t offsetPos = (output.pos + offset)%_blockSamples;
|
||||
size_t outputWrapIndex = _blockSamples - offsetPos;
|
||||
size_t chunk1 = std::min(length, outputWrapIndex);
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = offsetPos + i;
|
||||
buffer[i2] = newOutputArray[i]*output.windowProducts[i2];
|
||||
}
|
||||
for (size_t i = chunk1; i < length; ++i) {
|
||||
size_t i2 = i + offsetPos - _blockSamples;
|
||||
buffer[i2] = newOutputArray[i]*output.windowProducts[i2];
|
||||
}
|
||||
}
|
||||
void replaceOutput(size_t channel, size_t length, const Sample *newOutputArray) {
|
||||
return replaceOutput(channel, 0, length, newOutputArray);
|
||||
}
|
||||
void moveOutput(size_t samples) {
|
||||
if (samples == 1) { // avoid all the loops/chunks if we can
|
||||
for (size_t c = 0; c < _synthesisChannels; ++c) {
|
||||
output.buffer[output.pos + c*_blockSamples] = 0;
|
||||
}
|
||||
output.windowProducts[output.pos] = almostZero;
|
||||
if (++output.pos >= _blockSamples) output.pos = 0;
|
||||
return;
|
||||
}
|
||||
// Zero the output buffer as we cross it
|
||||
size_t outputWrapIndex = _blockSamples - output.pos;
|
||||
size_t chunk1 = std::min(samples, outputWrapIndex);
|
||||
for (size_t c = 0; c < _synthesisChannels; ++c) {
|
||||
Sample *buffer = output.buffer.data() + c*_blockSamples;
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = output.pos + i;
|
||||
buffer[i2] = 0;
|
||||
}
|
||||
for (size_t i = chunk1; i < samples; ++i) {
|
||||
size_t i2 = i + output.pos - _blockSamples;
|
||||
buffer[i2] = 0;
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
size_t i2 = output.pos + i;
|
||||
output.windowProducts[i2] = almostZero;
|
||||
}
|
||||
for (size_t i = chunk1; i < samples; ++i) {
|
||||
size_t i2 = i + output.pos - _blockSamples;
|
||||
output.windowProducts[i2] = almostZero;
|
||||
}
|
||||
output.pos = (output.pos + samples)%_blockSamples;
|
||||
_samplesSinceSynthesis += samples;
|
||||
}
|
||||
size_t samplesSinceSynthesis() const {
|
||||
return _samplesSinceSynthesis;
|
||||
}
|
||||
|
||||
Complex * spectrum(size_t channel) {
|
||||
return spectrumBuffer.data() + channel*_fftBins;
|
||||
}
|
||||
const Complex * spectrum(size_t channel) const {
|
||||
return spectrumBuffer.data() + channel*_fftBins;
|
||||
}
|
||||
|
||||
Sample * analysisWindow() {
|
||||
return _analysisWindow.data();
|
||||
}
|
||||
const Sample * analysisWindow() const {
|
||||
return _analysisWindow.data();
|
||||
}
|
||||
// Sets the centre index of the window
|
||||
void analysisOffset(size_t offset) {
|
||||
_analysisOffset = offset;
|
||||
}
|
||||
size_t analysisOffset() const {
|
||||
return _analysisOffset;
|
||||
}
|
||||
Sample * synthesisWindow() {
|
||||
return _synthesisWindow.data();
|
||||
}
|
||||
const Sample * synthesisWindow() const {
|
||||
return _synthesisWindow.data();
|
||||
}
|
||||
// Sets the centre index of the window
|
||||
void synthesisOffset(size_t offset) {
|
||||
_synthesisOffset = offset;
|
||||
}
|
||||
size_t synthesisOffset() const {
|
||||
return _synthesisOffset;
|
||||
}
|
||||
|
||||
void setInterval(size_t defaultInterval, WindowShape windowShape=WindowShape::ignore, Sample asymmetry=0) {
|
||||
_defaultInterval = defaultInterval;
|
||||
if (windowShape == WindowShape::ignore) return;
|
||||
|
||||
if (windowShape == acg) {
|
||||
auto window = ApproximateConfinedGaussian::withBandwidth(double(_blockSamples)/defaultInterval);
|
||||
window.fill(_synthesisWindow, _blockSamples, asymmetry, false);
|
||||
} else if (windowShape == kaiser) {
|
||||
auto window = Kaiser::withBandwidth(double(_blockSamples)/defaultInterval, true);
|
||||
window.fill(_synthesisWindow, _blockSamples, asymmetry, true);
|
||||
}
|
||||
|
||||
_analysisOffset = _synthesisOffset = _blockSamples/2;
|
||||
if (_analysisChannels == 0) {
|
||||
for (auto &v : _analysisWindow) v = 1;
|
||||
} else if (asymmetry == 0) {
|
||||
forcePerfectReconstruction(_synthesisWindow, _blockSamples, _defaultInterval);
|
||||
for (size_t i = 0; i < _blockSamples; ++i) {
|
||||
_analysisWindow[i] = _synthesisWindow[i];
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < _blockSamples; ++i) {
|
||||
_analysisWindow[i] = _synthesisWindow[_blockSamples - 1 - i];
|
||||
}
|
||||
}
|
||||
// Set offsets to peak's index
|
||||
for (size_t i = 0; i < _blockSamples; ++i) {
|
||||
if (_analysisWindow[i] > _analysisWindow[_analysisOffset]) _analysisOffset = i;
|
||||
if (_synthesisWindow[i] > _synthesisWindow[_synthesisOffset]) _synthesisOffset = i;
|
||||
}
|
||||
}
|
||||
|
||||
void analyse(size_t samplesInPast=0) {
|
||||
for (size_t s = 0; s < analyseSteps(); ++s) {
|
||||
analyseStep(s, samplesInPast);
|
||||
}
|
||||
}
|
||||
size_t analyseSteps() const {
|
||||
return splitComputation ? _analysisChannels*(fft.steps() + 1) : _analysisChannels;
|
||||
}
|
||||
void analyseStep(size_t step, std::size_t samplesInPast=0) {
|
||||
size_t fftSteps = splitComputation ? fft.steps() : 0;
|
||||
size_t channel = step/(fftSteps + 1);
|
||||
step -= channel*(fftSteps + 1);
|
||||
|
||||
if (step-- == 0) { // extra step at start of each channel: copy windowed input into buffer
|
||||
size_t offsetPos = (_inputLengthSamples*2 + input.pos - _blockSamples - samplesInPast)%_inputLengthSamples;
|
||||
size_t inputWrapIndex = _inputLengthSamples - offsetPos;
|
||||
size_t chunk1 = std::min(_analysisOffset, inputWrapIndex);
|
||||
size_t chunk2 = std::max(_analysisOffset, std::min(_blockSamples, inputWrapIndex));
|
||||
|
||||
_samplesSinceAnalysis = samplesInPast;
|
||||
Sample *buffer = input.buffer.data() + channel*_inputLengthSamples;
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
Sample w = modified ? -_analysisWindow[i] : _analysisWindow[i];
|
||||
size_t ti = i + (_fftSamples - _analysisOffset);
|
||||
size_t bi = offsetPos + i;
|
||||
timeBuffer[ti] = buffer[bi]*w;
|
||||
}
|
||||
for (size_t i = chunk1; i < _analysisOffset; ++i) {
|
||||
Sample w = modified ? -_analysisWindow[i] : _analysisWindow[i];
|
||||
size_t ti = i + (_fftSamples - _analysisOffset);
|
||||
size_t bi = i + offsetPos - _inputLengthSamples;
|
||||
timeBuffer[ti] = buffer[bi]*w;
|
||||
}
|
||||
for (size_t i = _analysisOffset; i < chunk2; ++i) {
|
||||
Sample w = _analysisWindow[i];
|
||||
size_t ti = i - _analysisOffset;
|
||||
size_t bi = offsetPos + i;
|
||||
timeBuffer[ti] = buffer[bi]*w;
|
||||
}
|
||||
for (size_t i = chunk2; i < _blockSamples; ++i) {
|
||||
Sample w = _analysisWindow[i];
|
||||
size_t ti = i - _analysisOffset;
|
||||
size_t bi = i + offsetPos - _inputLengthSamples;
|
||||
timeBuffer[ti] = buffer[bi]*w;
|
||||
}
|
||||
for (size_t i = _blockSamples - _analysisOffset; i < _fftSamples - _analysisOffset; ++i) {
|
||||
timeBuffer[i] = 0;
|
||||
}
|
||||
if (splitComputation) return;
|
||||
}
|
||||
auto *spectrumPtr = spectrum(channel);
|
||||
if (splitComputation) {
|
||||
fft.fft(step, timeBuffer.data(), spectrumPtr);
|
||||
if (unpacked && step == fft.steps() - 1) {
|
||||
spectrumPtr[_fftBins - 1] = spectrumPtr[0].imag();
|
||||
spectrumPtr[0].imag(0);
|
||||
}
|
||||
} else {
|
||||
fft.fft(timeBuffer.data(), spectrum(channel));
|
||||
if (unpacked) {
|
||||
spectrumPtr[_fftBins - 1] = spectrumPtr[0].imag();
|
||||
spectrumPtr[0].imag(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void synthesise() {
|
||||
for (size_t s = 0; s < synthesiseSteps(); ++s) {
|
||||
synthesiseStep(s);
|
||||
}
|
||||
}
|
||||
size_t synthesiseSteps() const {
|
||||
return splitComputation ? (_synthesisChannels*(fft.steps() + 1) + 1) : _synthesisChannels;
|
||||
}
|
||||
void synthesiseStep(size_t step) {
|
||||
if (step == 0) { // Extra first step which adds in the effective gain for a pure analysis-synthesis cycle
|
||||
addWindowProduct();
|
||||
if (splitComputation) return;
|
||||
}
|
||||
if (splitComputation) --step;
|
||||
|
||||
size_t fftSteps = splitComputation ? fft.steps() : 0;
|
||||
size_t channel = step/(fftSteps + 1);
|
||||
step -= channel*(fftSteps + 1);
|
||||
|
||||
auto *spectrumPtr = spectrum(channel);
|
||||
if (unpacked && step == 0) { // re-pack
|
||||
spectrumPtr[0].imag(spectrumPtr[_fftBins - 1].real());
|
||||
}
|
||||
|
||||
if (splitComputation) {
|
||||
if (step < fftSteps) {
|
||||
fft.ifft(step, spectrumPtr, timeBuffer.data());
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
fft.ifft(spectrumPtr, timeBuffer.data());
|
||||
}
|
||||
|
||||
// extra step after each channel's FFT
|
||||
Sample *buffer = output.buffer.data() + channel*_blockSamples;
|
||||
size_t outputWrapIndex = _blockSamples - output.pos;
|
||||
size_t chunk1 = std::min(_synthesisOffset, outputWrapIndex);
|
||||
size_t chunk2 = std::min(_blockSamples, std::max(_synthesisOffset, outputWrapIndex));
|
||||
|
||||
for (size_t i = 0; i < chunk1; ++i) {
|
||||
Sample w = modified ? -_synthesisWindow[i] : _synthesisWindow[i];
|
||||
size_t ti = i + (_fftSamples - _synthesisOffset);
|
||||
size_t bi = output.pos + i;
|
||||
buffer[bi] += timeBuffer[ti]*w;
|
||||
}
|
||||
for (size_t i = chunk1; i < _synthesisOffset; ++i) {
|
||||
Sample w = modified ? -_synthesisWindow[i] : _synthesisWindow[i];
|
||||
size_t ti = i + (_fftSamples - _synthesisOffset);
|
||||
size_t bi = i + output.pos - _blockSamples;
|
||||
buffer[bi] += timeBuffer[ti]*w;
|
||||
}
|
||||
for (size_t i = _synthesisOffset; i < chunk2; ++i) {
|
||||
Sample w = _synthesisWindow[i];
|
||||
size_t ti = i - _synthesisOffset;
|
||||
size_t bi = output.pos + i;
|
||||
buffer[bi] += timeBuffer[ti]*w;
|
||||
}
|
||||
for (size_t i = chunk2; i < _blockSamples; ++i) {
|
||||
Sample w = _synthesisWindow[i];
|
||||
size_t ti = i - _synthesisOffset;
|
||||
size_t bi = i + output.pos - _blockSamples;
|
||||
buffer[bi] += timeBuffer[ti]*w;
|
||||
}
|
||||
}
|
||||
|
||||
#define COMPAT_SPELLING(name, alt) \
|
||||
template<class ...Args> \
|
||||
void alt(Args &&...args) { \
|
||||
name(std::forward<Args>(args)...); \
|
||||
}
|
||||
COMPAT_SPELLING(analyse, analyze);
|
||||
COMPAT_SPELLING(analyseStep, analyseStep);
|
||||
COMPAT_SPELLING(analyseSteps, analyzeSteps);
|
||||
COMPAT_SPELLING(synthesise, synthesize);
|
||||
COMPAT_SPELLING(synthesiseStep, synthesizeStep);
|
||||
COMPAT_SPELLING(synthesiseSteps, synthesizeSteps);
|
||||
|
||||
/// Input (only available so we can save/restore the input state)
|
||||
struct Input {
|
||||
void swap(Input &other) {
|
||||
std::swap(pos, other.pos);
|
||||
std::swap(buffer, other.buffer);
|
||||
}
|
||||
|
||||
Input & operator=(const Input &other) {
|
||||
pos = other.pos;
|
||||
buffer.assign(other.buffer.begin(), other.buffer.end());
|
||||
return *this;
|
||||
}
|
||||
private:
|
||||
friend struct DynamicSTFT;
|
||||
size_t pos = 0;
|
||||
std::vector<Sample> buffer;
|
||||
};
|
||||
Input input;
|
||||
|
||||
/// Output (only available so we can save/restore the output state)
|
||||
struct Output {
|
||||
void swap(Output &other) {
|
||||
std::swap(pos, other.pos);
|
||||
std::swap(buffer, other.buffer);
|
||||
std::swap(windowProducts, other.windowProducts);
|
||||
}
|
||||
|
||||
Output & operator=(const Output &other) {
|
||||
pos = other.pos;
|
||||
buffer.assign(other.buffer.begin(), other.buffer.end());
|
||||
windowProducts.assign(other.windowProducts.begin(), other.windowProducts.end());
|
||||
return *this;
|
||||
}
|
||||
private:
|
||||
friend struct DynamicSTFT;
|
||||
size_t pos = 0;
|
||||
std::vector<Sample> buffer;
|
||||
std::vector<Sample> windowProducts;
|
||||
};
|
||||
Output output;
|
||||
|
||||
private:
|
||||
static constexpr Sample almostZero = 1e-30;
|
||||
|
||||
size_t _analysisChannels, _synthesisChannels, _inputLengthSamples, _blockSamples, _fftSamples, _fftBins;
|
||||
size_t _defaultInterval = 0;
|
||||
|
||||
std::vector<Sample> _analysisWindow, _synthesisWindow;
|
||||
size_t _analysisOffset = 0, _synthesisOffset = 0;
|
||||
|
||||
std::vector<Complex> spectrumBuffer;
|
||||
std::vector<Sample> timeBuffer;
|
||||
|
||||
size_t _samplesSinceSynthesis = 0, _samplesSinceAnalysis = 0;
|
||||
|
||||
void addWindowProduct() {
|
||||
_samplesSinceSynthesis = 0;
|
||||
|
||||
int windowShift = int(_synthesisOffset) - int(_analysisOffset);
|
||||
size_t wMin = std::max<ptrdiff_t>(0, windowShift);
|
||||
size_t wMax = std::min<ptrdiff_t>(_blockSamples, int(_blockSamples) + windowShift);
|
||||
|
||||
Sample *windowProduct = output.windowProducts.data();
|
||||
size_t outputWrapIndex = _blockSamples - output.pos;
|
||||
size_t chunk1 = std::min<size_t>(wMax, std::max<size_t>(wMin, outputWrapIndex));
|
||||
for (size_t i = wMin; i < chunk1; ++i) {
|
||||
Sample wa = _analysisWindow[i - windowShift];
|
||||
Sample ws = _synthesisWindow[i];
|
||||
size_t bi = output.pos + i;
|
||||
windowProduct[bi] += wa*ws*_fftSamples;
|
||||
}
|
||||
for (size_t i = chunk1; i < wMax; ++i) {
|
||||
Sample wa = _analysisWindow[i - windowShift];
|
||||
Sample ws = _synthesisWindow[i];
|
||||
size_t bi = i + output.pos - _blockSamples;
|
||||
windowProduct[bi] += wa*ws*_fftSamples;
|
||||
}
|
||||
}
|
||||
|
||||
// Copied from DSP library `windows.h`
|
||||
class Kaiser {
|
||||
inline static double bessel0(double x) {
|
||||
const double significanceLimit = 1e-4;
|
||||
double result = 0;
|
||||
double term = 1;
|
||||
double m = 0;
|
||||
while (term > significanceLimit) {
|
||||
result += term;
|
||||
++m;
|
||||
term *= (x*x)/(4*m*m);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
double beta;
|
||||
double invB0;
|
||||
|
||||
static double heuristicBandwidth(double bandwidth) {
|
||||
return bandwidth + 8/((bandwidth + 3)*(bandwidth + 3)) + 0.25*std::max(3 - bandwidth, 0.0);
|
||||
}
|
||||
public:
|
||||
Kaiser(double beta) : beta(beta), invB0(1/bessel0(beta)) {}
|
||||
|
||||
static Kaiser withBandwidth(double bandwidth, bool heuristicOptimal=false) {
|
||||
return Kaiser(bandwidthToBeta(bandwidth, heuristicOptimal));
|
||||
}
|
||||
static double bandwidthToBeta(double bandwidth, bool heuristicOptimal=false) {
|
||||
if (heuristicOptimal) { // Heuristic based on numerical search
|
||||
bandwidth = heuristicBandwidth(bandwidth);
|
||||
}
|
||||
bandwidth = std::max(bandwidth, 2.0);
|
||||
double alpha = std::sqrt(bandwidth*bandwidth*0.25 - 1);
|
||||
return alpha*M_PI;
|
||||
}
|
||||
|
||||
template<typename Data>
|
||||
void fill(Data &&data, size_t size, double warp, bool isForSynthesis) const {
|
||||
double invSize = 1.0/size;
|
||||
size_t offsetI = (size&1) ? 1 : (isForSynthesis ? 0 : 2);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
double r = (2*i + offsetI)*invSize - 1;
|
||||
r = (r + warp)/(1 + r*warp);
|
||||
double arg = std::sqrt(1 - r*r);
|
||||
data[i] = bessel0(beta*arg)*invB0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ApproximateConfinedGaussian {
|
||||
double gaussianFactor;
|
||||
|
||||
double gaussian(double x) const {
|
||||
return std::exp(-x*x*gaussianFactor);
|
||||
}
|
||||
public:
|
||||
static double bandwidthToSigma(double bandwidth) {
|
||||
return 0.3/std::sqrt(bandwidth);
|
||||
}
|
||||
static ApproximateConfinedGaussian withBandwidth(double bandwidth) {
|
||||
return ApproximateConfinedGaussian(bandwidthToSigma(bandwidth));
|
||||
}
|
||||
|
||||
ApproximateConfinedGaussian(double sigma) : gaussianFactor(0.0625/(sigma*sigma)) {}
|
||||
|
||||
/// Fills an arbitrary container
|
||||
template<typename Data>
|
||||
void fill(Data &&data, size_t size, double warp, bool isForSynthesis) const {
|
||||
double invSize = 1.0/size;
|
||||
double offsetScale = gaussian(1)/(gaussian(3) + gaussian(-1));
|
||||
double norm = 1/(gaussian(0) - 2*offsetScale*(gaussian(2)));
|
||||
size_t offsetI = (size&1) ? 1 : (isForSynthesis ? 0 : 2);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
double r = (2*i + offsetI)*invSize - 1;
|
||||
r = (r + warp)/(1 + r*warp);
|
||||
data[i] = norm*(gaussian(r) - offsetScale*(gaussian(r - 2) + gaussian(r + 2)));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Data>
|
||||
void forcePerfectReconstruction(Data &&data, size_t windowLength, size_t interval) {
|
||||
for (size_t i = 0; i < interval; ++i) {
|
||||
double sum2 = 0;
|
||||
for (size_t index = i; index < windowLength; index += interval) {
|
||||
sum2 += data[index]*data[index];
|
||||
}
|
||||
double factor = 1/std::sqrt(sum2);
|
||||
for (size_t index = i; index < windowLength; index += interval) {
|
||||
data[index] *= factor;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}} // namespace
|
||||
|
||||
#endif // include guard
|
||||
1060
testCOut/signalsmith-stretch.h
Normal file
1060
testCOut/signalsmith-stretch.h
Normal file
File diff suppressed because it is too large
Load Diff
11
testCOut/生成
Normal file
11
testCOut/生成
Normal file
@ -0,0 +1,11 @@
|
||||
docker run --rm -v $(pwd):/src emscripten/emsdk \
|
||||
bash -c "em++ main.cpp \
|
||||
-sEXPORT_NAME=testWasm -DEXPORT_NAME=testWasm \
|
||||
-I / \
|
||||
-std=c++11 -O3 -ffast-math -fno-exceptions -fno-rtti \
|
||||
--pre-js pre.js --closure 0 \
|
||||
-Wall -Wextra -Wfatal-errors -Wpedantic -pedantic-errors \
|
||||
-sSINGLE_FILE=1 -sMODULARIZE -sENVIRONMENT=web,worker,shell -sNO_EXIT_RUNTIME=1 \
|
||||
-sFILESYSTEM=0 -sEXPORTED_RUNTIME_METHODS=HEAP8,UTF8ToString \
|
||||
-sINITIAL_MEMORY=512kb -sALLOW_MEMORY_GROWTH=1 -sMEMORY_GROWTH_GEOMETRIC_STEP=0.5 -sABORTING_MALLOC=1 \
|
||||
-sSTRICT=1 -sDYNAMIC_EXECUTION=0"
|
||||
131
utils(小程序兼容代码).js
Normal file
131
utils(小程序兼容代码).js
Normal file
@ -0,0 +1,131 @@
|
||||
// wasm图片压缩工具类
|
||||
const wasmMgr = {
|
||||
// 存储已加载的wasm模块
|
||||
imageCompressModule: null,
|
||||
|
||||
// 获取图片压缩模块
|
||||
getCompressImg() {
|
||||
if (this.imageCompressModule) {
|
||||
return Promise.resolve(this.imageCompressModule);
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const wasmImports = {
|
||||
__assert_fail: (condition, filename, line, func) => {
|
||||
console.log(condition, filename, line, func);
|
||||
},
|
||||
emscripten_resize_heap: (size, old_size) => {
|
||||
console.log(size, old_size);
|
||||
},
|
||||
fd_close: (fd) => {
|
||||
console.log(fd);
|
||||
},
|
||||
fd_seek: (fd, offset, whence) => {
|
||||
console.log(fd, offset, whence);
|
||||
},
|
||||
fd_write: (fd, buf, len, pos) => {
|
||||
console.log(fd, buf, len, pos);
|
||||
},
|
||||
emscripten_memcpy_js: (dest, src, len) => {
|
||||
this.imageCompressModule.HEAPU8.copyWithin(dest, src, src + len);
|
||||
},
|
||||
};
|
||||
|
||||
// 微信小程序环境
|
||||
if (typeof WXWebAssembly !== "undefined") {
|
||||
WXWebAssembly.instantiate(
|
||||
"/convert_image_to_webp.wasm",
|
||||
{
|
||||
env: wasmImports,
|
||||
wasi_snapshot_preview1: wasmImports,
|
||||
}
|
||||
).then((result) => {
|
||||
this.imageCompressModule = {
|
||||
_convert_image_to_webp: result.instance.exports.convert_image_to_webp,
|
||||
_malloc: result.instance.exports.malloc,
|
||||
_free: result.instance.exports.free,
|
||||
};
|
||||
this.imageCompressModule.HEAPU8 = new Uint8Array(
|
||||
result.instance.exports.memory.buffer
|
||||
);
|
||||
console.log("convert_image_to_webp加载成功");
|
||||
resolve(this.imageCompressModule);
|
||||
}).catch((err) => {
|
||||
console.error("Failed to load wasm script", err);
|
||||
reject(err);
|
||||
});
|
||||
} else {
|
||||
// H5环境或其他环境
|
||||
console.error("当前环境不支持WebAssembly");
|
||||
reject(new Error("当前环境不支持WebAssembly"));
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
// 图片压缩函数
|
||||
async compressImg(file, quality = 0.5, w, h, target_w, target_h) {
|
||||
const compressImgHandler = (inputData, module, isOrgin = false) => {
|
||||
const inputDataPtr = module._malloc(inputData.length);
|
||||
module.HEAPU8.set(inputData, inputDataPtr);
|
||||
const outputSizePtr = module._malloc(4);
|
||||
const webpPtr = module._convert_image_to_webp(
|
||||
inputDataPtr,
|
||||
inputData.length,
|
||||
w,
|
||||
h,
|
||||
target_w,
|
||||
target_h,
|
||||
80 * (quality > 1 ? 1 : quality),
|
||||
outputSizePtr,
|
||||
1,
|
||||
isOrgin ? 1 : 0
|
||||
);
|
||||
const outputSize =
|
||||
module.HEAPU8[outputSizePtr] |
|
||||
(module.HEAPU8[outputSizePtr + 1] << 8) |
|
||||
(module.HEAPU8[outputSizePtr + 2] << 16) |
|
||||
(module.HEAPU8[outputSizePtr + 3] << 24);
|
||||
const webpData = new Uint8Array(module.HEAPU8.buffer, webpPtr, outputSize);
|
||||
module._free(webpPtr);
|
||||
module._free(outputSizePtr);
|
||||
module._free(inputDataPtr);
|
||||
//如果只需要二进制原始数据,可以直接返回webpdata 减少base64转换
|
||||
// return webpData
|
||||
return 'data:image/webp;base64,' + this.arrayBufferToBase64(webpData);
|
||||
};
|
||||
|
||||
try {
|
||||
const module = await this.getCompressImg();
|
||||
|
||||
if (file instanceof Uint8Array) {
|
||||
return compressImgHandler(file, module, true);
|
||||
} else {
|
||||
return new Promise((resolve, reject) => {
|
||||
wx.getFileSystemManager().readFile({
|
||||
filePath: file,
|
||||
success: res => {
|
||||
resolve(compressImgHandler(new Uint8Array(res.data), module));
|
||||
},
|
||||
fail: e => {
|
||||
console.error("读取文件失败", e);
|
||||
reject(e);
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("图片压缩失败", error);
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
|
||||
// 将ArrayBuffer转换为Base64
|
||||
arrayBufferToBase64(buffer) {
|
||||
|
||||
return wx.arrayBufferToBase64(buffer);
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
wasmMgr
|
||||
};
|
||||
BIN
wasm_webp_output.zip
Normal file
BIN
wasm_webp_output.zip
Normal file
Binary file not shown.
4
生成
4
生成
@ -1,6 +1,6 @@
|
||||
# 调试模式
|
||||
docker run --rm -v $(pwd):/src emscripten/emsdk emcc convert_image_to_webp.cpp stb_image.c libwebp.a libsharpyuv.a libwebpdecoder.a libwebpdemux.a libwebpmux.a -o ./test/convert_image_to_webp.js -g -s WASM=1 -s INITIAL_MEMORY=34340864 -s EXPORTED_FUNCTIONS="['_free','_malloc','_convert_image_to_webp']" -s EXTRA_EXPORTED_RUNTIME_METHODS='["cwrap", "getValue"]' -gsource-map
|
||||
docker run --rm -v $(pwd):/src emscripten/emsdk emcc convert_image_to_webp.cpp stb_image.c libwebp.a libsharpyuv.a libwebpdecoder.a libwebpdemux.a libwebpmux.a -o ./test/convert_image_to_webp.js -g -s WASM=1 -s INITIAL_MEMORY=20MB -s ALLOW_MEMORY_GROWTH=1 -s EXPORTED_FUNCTIONS="['_free','_malloc','_convert_image_to_webp']" -s EXTRA_EXPORTED_RUNTIME_METHODS='["cwrap", "getValue"]' -gsource-map
|
||||
|
||||
|
||||
# 优化后的命令
|
||||
docker run --rm -v $(pwd):/src emscripten/emsdk emcc convert_image_to_webp.cpp stb_image.c libwebp.a libsharpyuv.a -o ./output/convert_image_to_webp.js -s WASM=1 -s NO_FILESYSTEM=1 -s INITIAL_MEMORY=34340864 -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=268435456 -s EXPORTED_FUNCTIONS="['_free','_malloc','_convert_image_to_webp']" -s EXTRA_EXPORTED_RUNTIME_METHODS='["cwrap", "getValue"]' -O2
|
||||
docker run --rm -v $(pwd):/src emscripten/emsdk emcc convert_image_to_webp.cpp stb_image.c libwebp.a libsharpyuv.a -o ./output/convert_image_to_webp.js -s WASM=1 -s NO_FILESYSTEM=1 -s INITIAL_MEMORY=512MB -s EXPORTED_FUNCTIONS="['_free','_malloc','_convert_image_to_webp']" -s EXTRA_EXPORTED_RUNTIME_METHODS='["cwrap", "getValue"]' -O2
|
||||
Loading…
Reference in New Issue
Block a user