Skip to content

Commit

Permalink
Merge pull request #85 from Cloud-Code-AI/release-chrome-1
Browse files Browse the repository at this point in the history
release: new vesion of chrome extension
  • Loading branch information
sauravpanda authored Feb 7, 2025
2 parents 1d30ae5 + 005a2c7 commit 12f52ec
Show file tree
Hide file tree
Showing 5 changed files with 115 additions and 126 deletions.
9 changes: 9 additions & 0 deletions extensions/chrome/public/background.js
Original file line number Diff line number Diff line change
Expand Up @@ -121,5 +121,14 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
});
return true;
}
});

// Toggle side panel when command is triggered
chrome.commands.onCommand.addListener((command) => {
if (command === 'toggle-side-panel') {
chrome.windows.getCurrent().then(window => {
chrome.sidePanel.open({ windowId: window.id });
});
}
});

9 changes: 8 additions & 1 deletion extensions/chrome/public/manifest.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"manifest_version": 3,
"name": "BrowserAgent - AI Agents in Browser",
"version": "1.0.2",
"version": "1.0.3",
"description": "Run private, cost-free AI agents directly in your browser. Automate tasks, enhance browsing, and control your data with local AI.",
"action": {
"default_title": "Browser AI",
Expand Down Expand Up @@ -38,6 +38,13 @@
"default": "Ctrl+Shift+Y",
"mac": "Command+Shift+Y"
}
},
"toggle-side-panel": {
"suggested_key": {
"default": "Ctrl+Shift+Y",
"mac": "Command+Shift+Y"
},
"description": "Toggle the side panel"
}
},
"content_scripts": [{
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@browserai/browserai",
"version": "1.0.22",
"version": "1.0.23",
"private": false,
"description": "A library for running AI models directly in the browser",
"main": "dist/index.js",
Expand Down
2 changes: 1 addition & 1 deletion src/engines/tts-engine.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ export class TTSEngine {
this.model = await StyleTextToSpeech2Model.from_pretrained(modelConfig.repo, {
progress_callback: options.onProgress,
dtype: options.dtype || "q4",
device: "wasm",
device: "webgpu",
});

this.tokenizer = await AutoTokenizer.from_pretrained(modelConfig.repo, {
Expand Down
219 changes: 96 additions & 123 deletions src/libs/transformers/utils/tensor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -491,54 +491,25 @@ export class Tensor {
}

const this_data = this.data;
const fn = (a: number, b: number) => a + (b ** p);

if (dim === null) {
// @ts-ignore
let val = this_data.reduce((a, b) => a + b ** p, 0) ** (1 / p);
const val = this_data.reduce(fn, 0) ** (1 / p);
return new Tensor(this.type, [val], []);
}

// Negative indexing
dim = safeIndex(dim, this.dims.length);

// Calculate the shape of the resulting array after summation
const resultDims = this.dims.slice(); // Copy the original dimensions
resultDims[dim] = 1; // Remove the specified axis

// Create a new array to store the accumulated values
// @ts-ignore
const result = new this_data.constructor(this_data.length / this.dims[dim]);
const [type, result, resultDims] = reduce_helper(fn, this, dim, keepdim);

// Iterate over the data array
for (let i = 0; i < this_data.length; ++i) {
// Calculate the index in the resulting array
let resultIndex = 0;

for (let j = this.dims.length - 1, num = i, resultMultiplier = 1; j >= 0; --j) {
const size = this.dims[j];
if (j !== dim) {
const index = num % size;
resultIndex += index * resultMultiplier;
resultMultiplier *= resultDims[j];
}
num = Math.floor(num / size);
}

// Accumulate the value at the current index
result[resultIndex] += this_data[i] ** p;
}

if (p !== 1) {
for (let i = 0; i < result.length; ++i) {
result[i] = result[i] ** (1 / p);
}
}

if (!keepdim) {
resultDims.splice(dim, 1);
}
return new Tensor(type, result, resultDims);

return new Tensor(this.type, result, resultDims);
}

/**
Expand Down Expand Up @@ -702,6 +673,34 @@ export class Tensor {
return this.clone().neg_();
}

/**
* Computes input > val element-wise.
* @param {number} val The value to compare with.
* @returns {Tensor} A boolean tensor that is `true` where input is greater than other and `false` elsewhere.
*/
gt(val: number) {
const mask = new Uint8Array(this.data.length);
const this_data = this.data;
for (let i = 0; i < this_data.length; ++i) {
mask[i] = this_data[i] > val ? 1 : 0;
}
return new Tensor('bool', mask, this.dims);
}

/**
* Computes input < val element-wise.
* @param {number} val The value to compare with.
* @returns {Tensor} A boolean tensor that is `true` where input is less than other and `false` elsewhere.
*/
lt(val: number) {
const mask = new Uint8Array(this.data.length);
const this_data = this.data;
for (let i = 0; i < this_data.length; ++i) {
mask[i] = this_data[i] < val ? 1 : 0;
}
return new Tensor('bool', mask, this.dims);
}

/**
* In-place version of @see {@link Tensor.clamp}
*/
Expand Down Expand Up @@ -748,17 +747,20 @@ export class Tensor {

min(dim = null, keepdim = false) {
if (dim !== null) {
throw new Error('`dim !== null` not yet implemented.');
const val = min(this.data as any)[0];
return new Tensor(this.type, [val], [/* scalar */]);
}
const value = min(this.data as any)[0];
return new Tensor(this.type, [value], []);
const [type, result, resultDims] = reduce_helper((a, b) => Math.min(a, b), this, dim, keepdim, Infinity);
return new Tensor(type, result, resultDims);
}
max(dim = null, keepdim = false) {
if (dim !== null) {
throw new Error('`dim !== null` not yet implemented.');
if (dim === null) {
// None to reduce over all dimensions.
const val = max(this.data as any)[0];
return new Tensor(this.type, [val], [/* scalar */]);
}
const value = max(this.data as any)[0];
return new Tensor(this.type, [value], []);
const [type, result, resultDims] = reduce_helper((a, b) => Math.max(a, b), this, dim, keepdim, -Infinity);
return new Tensor(type, result, resultDims);
}

argmin(dim = null, keepdim = false) {
Expand Down Expand Up @@ -1250,49 +1252,18 @@ export function stack(tensors: Tensor[], dim: number = 0) {
}

/**
* Calculates the standard deviation and mean over the dimensions specified by dim. dim can be a single dimension or `null` to reduce over all dimensions.
* @param {Tensor} input the input tenso
* @param {number|null} dim the dimension to reduce. If None, all dimensions are reduced.
* @param {number} correction difference between the sample size and sample degrees of freedom. Defaults to Bessel's correction, correction=1.
* @param {(previousValue: any, currentValue: any, currentIndex?: number, resultIndex?: number) => any} callbackfn
* @param {Tensor} input the input tensor.
* @param {number|null} dim the dimension to reduce.
* @param {boolean} keepdim whether the output tensor has dim retained or not.
* @returns {Tensor[]} A tuple of (std, mean) tensors.
* @returns {[DataType, any, number[]]} The reduced tensor data.
*/
export function std_mean(input: Tensor, dim: number | null = null, correction: number = 1, keepdim: boolean = false) {
const inputData = /** @type {Float32Array} */ input.data;
function reduce_helper(callbackfn: (previousValue: number, currentValue: number, currentIndex?: number| null, resultIndex?: number| null) => number, input: Tensor, dim: number | null = null, keepdim = false, initialValue: number | null = null) {
const inputData = input.data;
const inputDims = input.dims;

if (dim === null) {
// None to reduce over all dimensions.
const sum = Array.prototype.reduce.call(inputData, ((a: number, b: number) => a + b) as any, 0) as number;
const mean = sum / inputData.length;
const std = Math.sqrt(
(Array.prototype.reduce.call(inputData, ((a: number, b: number) => a + (b - mean) ** 2) as any, 0) as number) /
(inputData.length - correction),
);

const meanTensor = new Tensor(
input.type as DataType,
[mean],
[
/* scalar */
],
);
const stdTensor = new Tensor(
input.type as DataType,
[std],
[
/* scalar */
],
);

return [stdTensor, meanTensor];
}

// Negative indexing
dim = safeIndex(dim, inputDims.length);

const meanTensor = mean(input, dim, keepdim);
const meanTensorData = meanTensor.data;
dim = safeIndex(dim as number, inputDims.length);

// Calculate the shape of the resulting array after summation
const resultDims = inputDims.slice(); // Copy the original dimensions
Expand All @@ -1301,9 +1272,13 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n
// Create a new array to store the accumulated values
// @ts-ignore
const result = new inputData.constructor(inputData.length / inputDims[dim]);
if (initialValue !== null) {
result.fill(initialValue);
}

// Iterate over the data array
for (let i = 0; i < inputData.length; ++i) {

// Calculate the index in the resulting array
let resultIndex = 0;

Expand All @@ -1318,22 +1293,55 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n
}

// Accumulate the value at the current index
result[resultIndex] += (inputData[i] - meanTensorData[resultIndex]) ** 2;
result[resultIndex] = callbackfn(result[resultIndex], inputData[i], i, resultIndex);
}

for (let i = 0; i < result.length; ++i) {
result[i] = Math.sqrt(result[i] / (inputDims[dim] - correction));
if (!keepdim) resultDims.splice(dim, 1);

return [input.type, result, resultDims];
}

/**
* Calculates the standard deviation and mean over the dimensions specified by dim. dim can be a single dimension or `null` to reduce over all dimensions.
* @param {Tensor} input the input tenso
* @param {number|null} dim the dimension to reduce. If None, all dimensions are reduced.
* @param {number} correction difference between the sample size and sample degrees of freedom. Defaults to Bessel's correction, correction=1.
* @param {boolean} keepdim whether the output tensor has dim retained or not.
* @returns {Tensor[]} A tuple of (std, mean) tensors.
*/
export function std_mean(input: Tensor, dim: number | null = null, correction = 1, keepdim = false) {
const inputData: any = /** @type {Float32Array} */(input.data);
const inputDims = input.dims;

if (dim === null) {
// None to reduce over all dimensions.
const sum = inputData.reduce((a: number, b: number) => a + b, 0);
const mean = sum / inputData.length;
const std = Math.sqrt(inputData.reduce((a: number, b: number) => a + (b - mean) ** 2, 0) / (inputData.length - correction));

const meanTensor = new Tensor(input.type, [mean], [/* scalar */]);
const stdTensor = new Tensor(input.type, [std], [/* scalar */]);

return [stdTensor, meanTensor];
}
dim = safeIndex(dim, inputDims.length);
const meanTensor = mean(input, dim, keepdim);
const meanTensorData = meanTensor.data;

if (!keepdim) {
resultDims.splice(dim, 1);
// Compute squared sum
const [type, result, resultDims] = reduce_helper((a: number, b: number, i: any, j: any) => a + (b - meanTensorData[j]) ** 2, input, dim, keepdim);

// Square root of the squared sum
for (let i = 0; i < result.length; ++i) {
result[i] = Math.sqrt(result[i] / (inputDims[dim] - correction));
}

const stdTensor = new Tensor(input.type as DataType, result, resultDims);
const stdTensor = new Tensor(type, result, resultDims);

return [stdTensor, meanTensor];
}


/**
* Returns the mean value of each row of the input tensor in the given dimension dim.
* @param {Tensor} input the input tensor.
Expand All @@ -1343,11 +1351,12 @@ export function std_mean(input: Tensor, dim: number | null = null, correction: n
*/
export function mean(input: Tensor, dim: number | null = null, keepdim: boolean = false) {
const inputData = /** @type {Float32Array} */ input.data;
const inputDims = input.dims;

if (dim === null) {
// None to reduce over all dimensions.
// @ts-ignore
const val = inputData.reduce((a, b) => a + b, 0);
const val = inputData.reduce((a: number, b: number) => a + b, 0);
return new Tensor(
input.type,
[val / inputData.length],
Expand All @@ -1356,49 +1365,13 @@ export function mean(input: Tensor, dim: number | null = null, keepdim: boolean
],
);
}
const inputDims = input.dims;

// Negative indexing
dim = safeIndex(dim, inputDims.length);

// Calculate the shape of the resulting array after summation
const resultDims = inputDims.slice(); // Copy the original dimensions
resultDims[dim] = 1; // Remove the specified axis

// Create a new array to store the accumulated values
// @ts-ignore
const result = new inputData.constructor(inputData.length / inputDims[dim]);

// Iterate over the data array
for (let i = 0; i < inputData.length; ++i) {
// Calculate the index in the resulting array
let resultIndex = 0;

for (let j = inputDims.length - 1, num = i, resultMultiplier = 1; j >= 0; --j) {
const size = inputDims[j];
if (j !== dim) {
const index = num % size;
resultIndex += index * resultMultiplier;
resultMultiplier *= resultDims[j];
}
num = Math.floor(num / size);
}

// Accumulate the value at the current index
result[resultIndex] += inputData[i];
}

if (inputDims[dim] !== 1) {
for (let i = 0; i < result.length; ++i) {
result[i] = result[i] / inputDims[dim];
}
}

if (!keepdim) {
resultDims.splice(dim, 1);
}
const [type, result, resultDims] = reduce_helper((a: number, b: number) => a + b, input, dim, keepdim);

return new Tensor(input.type, result, resultDims);
return new Tensor(type, result, resultDims);
}

function dimsToStride(dims: number[]) {
Expand Down

0 comments on commit 12f52ec

Please sign in to comment.