From 5f2a71faf140869a022ec37d9212f5be1fa63f10 Mon Sep 17 00:00:00 2001 From: Saurav Panda Date: Wed, 5 Feb 2025 22:54:57 -0800 Subject: [PATCH 1/2] release: new vesion of chrome extension --- extensions/chrome/public/background.js | 9 +++++++++ extensions/chrome/public/manifest.json | 9 ++++++++- package.json | 2 +- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/extensions/chrome/public/background.js b/extensions/chrome/public/background.js index e2d7635..b055a2e 100644 --- a/extensions/chrome/public/background.js +++ b/extensions/chrome/public/background.js @@ -121,5 +121,14 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { }); return true; } +}); + +// Toggle side panel when command is triggered +chrome.commands.onCommand.addListener((command) => { + if (command === 'toggle-side-panel') { + chrome.windows.getCurrent().then(window => { + chrome.sidePanel.open({ windowId: window.id }); + }); + } }); diff --git a/extensions/chrome/public/manifest.json b/extensions/chrome/public/manifest.json index c8b8958..d699c8d 100644 --- a/extensions/chrome/public/manifest.json +++ b/extensions/chrome/public/manifest.json @@ -1,7 +1,7 @@ { "manifest_version": 3, "name": "BrowserAgent - AI Agents in Browser", - "version": "1.0.2", + "version": "1.0.3", "description": "Run private, cost-free AI agents directly in your browser. Automate tasks, enhance browsing, and control your data with local AI.", "action": { "default_title": "Browser AI", @@ -38,6 +38,13 @@ "default": "Ctrl+Shift+Y", "mac": "Command+Shift+Y" } + }, + "toggle-side-panel": { + "suggested_key": { + "default": "Ctrl+Shift+Y", + "mac": "Command+Shift+Y" + }, + "description": "Toggle the side panel" } }, "content_scripts": [{ diff --git a/package.json b/package.json index 5891063..39a4b6f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@browserai/browserai", - "version": "1.0.22", + "version": "1.0.23", "private": false, "description": "A library for running AI models directly in the browser", "main": "dist/index.js", From 005a2c73d0c351980aac50883110c471478ac178 Mon Sep 17 00:00:00 2001 From: Saurav Panda Date: Thu, 6 Feb 2025 21:37:20 -0800 Subject: [PATCH 2/2] TTS webgpu tensor processing fixed! --- src/engines/tts-engine.ts | 2 +- src/libs/transformers/utils/tensor.ts | 219 +++++++++++--------------- 2 files changed, 97 insertions(+), 124 deletions(-) diff --git a/src/engines/tts-engine.ts b/src/engines/tts-engine.ts index 56d6e68..ab54613 100644 --- a/src/engines/tts-engine.ts +++ b/src/engines/tts-engine.ts @@ -21,7 +21,7 @@ export class TTSEngine { this.model = await StyleTextToSpeech2Model.from_pretrained(modelConfig.repo, { progress_callback: options.onProgress, dtype: options.dtype || "q4", - device: "wasm", + device: "webgpu", }); this.tokenizer = await AutoTokenizer.from_pretrained(modelConfig.repo, { diff --git a/src/libs/transformers/utils/tensor.ts b/src/libs/transformers/utils/tensor.ts index dcc63b3..9844b1f 100644 --- a/src/libs/transformers/utils/tensor.ts +++ b/src/libs/transformers/utils/tensor.ts @@ -491,42 +491,16 @@ export class Tensor { } const this_data = this.data; + const fn = (a: number, b: number) => a + (b ** p); if (dim === null) { // @ts-ignore - let val = this_data.reduce((a, b) => a + b ** p, 0) ** (1 / p); + const val = this_data.reduce(fn, 0) ** (1 / p); return new Tensor(this.type, [val], []); } - // Negative indexing - dim = safeIndex(dim, this.dims.length); - - // Calculate the shape of the resulting array after summation - const resultDims = this.dims.slice(); // Copy the original dimensions - resultDims[dim] = 1; // Remove the specified axis - - // Create a new array to store the accumulated values - // @ts-ignore - const result = new this_data.constructor(this_data.length / this.dims[dim]); + const [type, result, resultDims] = reduce_helper(fn, this, dim, keepdim); - // Iterate over the data array - for (let i = 0; i < this_data.length; ++i) { - // Calculate the index in the resulting array - let resultIndex = 0; - - for (let j = this.dims.length - 1, num = i, resultMultiplier = 1; j >= 0; --j) { - const size = this.dims[j]; - if (j !== dim) { - const index = num % size; - resultIndex += index * resultMultiplier; - resultMultiplier *= resultDims[j]; - } - num = Math.floor(num / size); - } - - // Accumulate the value at the current index - result[resultIndex] += this_data[i] ** p; - } if (p !== 1) { for (let i = 0; i < result.length; ++i) { @@ -534,11 +508,8 @@ export class Tensor { } } - if (!keepdim) { - resultDims.splice(dim, 1); - } + return new Tensor(type, result, resultDims); - return new Tensor(this.type, result, resultDims); } /** @@ -702,6 +673,34 @@ export class Tensor { return this.clone().neg_(); } + /** + * Computes input > val element-wise. + * @param {number} val The value to compare with. + * @returns {Tensor} A boolean tensor that is `true` where input is greater than other and `false` elsewhere. + */ + gt(val: number) { + const mask = new Uint8Array(this.data.length); + const this_data = this.data; + for (let i = 0; i < this_data.length; ++i) { + mask[i] = this_data[i] > val ? 1 : 0; + } + return new Tensor('bool', mask, this.dims); + } + + /** + * Computes input < val element-wise. + * @param {number} val The value to compare with. + * @returns {Tensor} A boolean tensor that is `true` where input is less than other and `false` elsewhere. + */ + lt(val: number) { + const mask = new Uint8Array(this.data.length); + const this_data = this.data; + for (let i = 0; i < this_data.length; ++i) { + mask[i] = this_data[i] < val ? 1 : 0; + } + return new Tensor('bool', mask, this.dims); + } + /** * In-place version of @see {@link Tensor.clamp} */ @@ -748,17 +747,20 @@ export class Tensor { min(dim = null, keepdim = false) { if (dim !== null) { - throw new Error('`dim !== null` not yet implemented.'); + const val = min(this.data as any)[0]; + return new Tensor(this.type, [val], [/* scalar */]); } - const value = min(this.data as any)[0]; - return new Tensor(this.type, [value], []); + const [type, result, resultDims] = reduce_helper((a, b) => Math.min(a, b), this, dim, keepdim, Infinity); + return new Tensor(type, result, resultDims); } max(dim = null, keepdim = false) { - if (dim !== null) { - throw new Error('`dim !== null` not yet implemented.'); + if (dim === null) { + // None to reduce over all dimensions. + const val = max(this.data as any)[0]; + return new Tensor(this.type, [val], [/* scalar */]); } - const value = max(this.data as any)[0]; - return new Tensor(this.type, [value], []); + const [type, result, resultDims] = reduce_helper((a, b) => Math.max(a, b), this, dim, keepdim, -Infinity); + return new Tensor(type, result, resultDims); } argmin(dim = null, keepdim = false) { @@ -1250,49 +1252,18 @@ export function stack(tensors: Tensor[], dim: number = 0) { } /** - * Calculates the standard deviation and mean over the dimensions specified by dim. dim can be a single dimension or `null` to reduce over all dimensions. - * @param {Tensor} input the input tenso - * @param {number|null} dim the dimension to reduce. If None, all dimensions are reduced. - * @param {number} correction difference between the sample size and sample degrees of freedom. Defaults to Bessel's correction, correction=1. + * @param {(previousValue: any, currentValue: any, currentIndex?: number, resultIndex?: number) => any} callbackfn + * @param {Tensor} input the input tensor. + * @param {number|null} dim the dimension to reduce. * @param {boolean} keepdim whether the output tensor has dim retained or not. - * @returns {Tensor[]} A tuple of (std, mean) tensors. + * @returns {[DataType, any, number[]]} The reduced tensor data. */ -export function std_mean(input: Tensor, dim: number | null = null, correction: number = 1, keepdim: boolean = false) { - const inputData = /** @type {Float32Array} */ input.data; +function reduce_helper(callbackfn: (previousValue: number, currentValue: number, currentIndex?: number| null, resultIndex?: number| null) => number, input: Tensor, dim: number | null = null, keepdim = false, initialValue: number | null = null) { + const inputData = input.data; const inputDims = input.dims; - if (dim === null) { - // None to reduce over all dimensions. - const sum = Array.prototype.reduce.call(inputData, ((a: number, b: number) => a + b) as any, 0) as number; - const mean = sum / inputData.length; - const std = Math.sqrt( - (Array.prototype.reduce.call(inputData, ((a: number, b: number) => a + (b - mean) ** 2) as any, 0) as number) / - (inputData.length - correction), - ); - - const meanTensor = new Tensor( - input.type as DataType, - [mean], - [ - /* scalar */ - ], - ); - const stdTensor = new Tensor( - input.type as DataType, - [std], - [ - /* scalar */ - ], - ); - - return [stdTensor, meanTensor]; - } - // Negative indexing - dim = safeIndex(dim, inputDims.length); - - const meanTensor = mean(input, dim, keepdim); - const meanTensorData = meanTensor.data; + dim = safeIndex(dim as number, inputDims.length); // Calculate the shape of the resulting array after summation const resultDims = inputDims.slice(); // Copy the original dimensions @@ -1301,9 +1272,13 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n // Create a new array to store the accumulated values // @ts-ignore const result = new inputData.constructor(inputData.length / inputDims[dim]); + if (initialValue !== null) { + result.fill(initialValue); + } // Iterate over the data array for (let i = 0; i < inputData.length; ++i) { + // Calculate the index in the resulting array let resultIndex = 0; @@ -1318,22 +1293,55 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n } // Accumulate the value at the current index - result[resultIndex] += (inputData[i] - meanTensorData[resultIndex]) ** 2; + result[resultIndex] = callbackfn(result[resultIndex], inputData[i], i, resultIndex); } - for (let i = 0; i < result.length; ++i) { - result[i] = Math.sqrt(result[i] / (inputDims[dim] - correction)); + if (!keepdim) resultDims.splice(dim, 1); + + return [input.type, result, resultDims]; +} + +/** + * Calculates the standard deviation and mean over the dimensions specified by dim. dim can be a single dimension or `null` to reduce over all dimensions. + * @param {Tensor} input the input tenso + * @param {number|null} dim the dimension to reduce. If None, all dimensions are reduced. + * @param {number} correction difference between the sample size and sample degrees of freedom. Defaults to Bessel's correction, correction=1. + * @param {boolean} keepdim whether the output tensor has dim retained or not. + * @returns {Tensor[]} A tuple of (std, mean) tensors. + */ +export function std_mean(input: Tensor, dim: number | null = null, correction = 1, keepdim = false) { + const inputData: any = /** @type {Float32Array} */(input.data); + const inputDims = input.dims; + + if (dim === null) { + // None to reduce over all dimensions. + const sum = inputData.reduce((a: number, b: number) => a + b, 0); + const mean = sum / inputData.length; + const std = Math.sqrt(inputData.reduce((a: number, b: number) => a + (b - mean) ** 2, 0) / (inputData.length - correction)); + + const meanTensor = new Tensor(input.type, [mean], [/* scalar */]); + const stdTensor = new Tensor(input.type, [std], [/* scalar */]); + + return [stdTensor, meanTensor]; } + dim = safeIndex(dim, inputDims.length); + const meanTensor = mean(input, dim, keepdim); + const meanTensorData = meanTensor.data; - if (!keepdim) { - resultDims.splice(dim, 1); + // Compute squared sum + const [type, result, resultDims] = reduce_helper((a: number, b: number, i: any, j: any) => a + (b - meanTensorData[j]) ** 2, input, dim, keepdim); + + // Square root of the squared sum + for (let i = 0; i < result.length; ++i) { + result[i] = Math.sqrt(result[i] / (inputDims[dim] - correction)); } - const stdTensor = new Tensor(input.type as DataType, result, resultDims); + const stdTensor = new Tensor(type, result, resultDims); return [stdTensor, meanTensor]; } + /** * Returns the mean value of each row of the input tensor in the given dimension dim. * @param {Tensor} input the input tensor. @@ -1343,11 +1351,12 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n */ export function mean(input: Tensor, dim: number | null = null, keepdim: boolean = false) { const inputData = /** @type {Float32Array} */ input.data; + const inputDims = input.dims; if (dim === null) { // None to reduce over all dimensions. // @ts-ignore - const val = inputData.reduce((a, b) => a + b, 0); + const val = inputData.reduce((a: number, b: number) => a + b, 0); return new Tensor( input.type, [val / inputData.length], @@ -1356,49 +1365,13 @@ export function mean(input: Tensor, dim: number | null = null, keepdim: boolean ], ); } - const inputDims = input.dims; // Negative indexing dim = safeIndex(dim, inputDims.length); - // Calculate the shape of the resulting array after summation - const resultDims = inputDims.slice(); // Copy the original dimensions - resultDims[dim] = 1; // Remove the specified axis - - // Create a new array to store the accumulated values - // @ts-ignore - const result = new inputData.constructor(inputData.length / inputDims[dim]); - - // Iterate over the data array - for (let i = 0; i < inputData.length; ++i) { - // Calculate the index in the resulting array - let resultIndex = 0; - - for (let j = inputDims.length - 1, num = i, resultMultiplier = 1; j >= 0; --j) { - const size = inputDims[j]; - if (j !== dim) { - const index = num % size; - resultIndex += index * resultMultiplier; - resultMultiplier *= resultDims[j]; - } - num = Math.floor(num / size); - } - - // Accumulate the value at the current index - result[resultIndex] += inputData[i]; - } - - if (inputDims[dim] !== 1) { - for (let i = 0; i < result.length; ++i) { - result[i] = result[i] / inputDims[dim]; - } - } - - if (!keepdim) { - resultDims.splice(dim, 1); - } + const [type, result, resultDims] = reduce_helper((a: number, b: number) => a + b, input, dim, keepdim); - return new Tensor(input.type, result, resultDims); + return new Tensor(type, result, resultDims); } function dimsToStride(dims: number[]) {