Using JavaScript for Machine Learning with TensorFlow.js

Ramkumar Khubchandani
7 min readMay 14, 2024

--

Machine Learning (ML) has become increasingly popular in recent years, and its applications span across various domains, including computer vision, natural language processing, and predictive analytics. While traditionally, ML models were built using languages like Python, the rise of modern web technologies has paved the way for running ML models directly in the browser using JavaScript.

Enter TensorFlow.js, a powerful library developed by Google that brings the power of TensorFlow (a popular open-source ML library) to the world of JavaScript. With TensorFlow.js, developers can train, deploy, and run ML models directly in the browser or in Node.js environments, opening up new possibilities for building intelligent and interactive web applications.

Real-Life Example: Handwritten Digit Recognition

One common use case for machine learning is image recognition tasks, such as handwritten digit recognition. In this example, we’ll build a simple web application that can recognize handwritten digits drawn on a canvas. We’ll train a convolutional neural network (CNN) model using the MNIST dataset, which consists of 60,000 training examples and 10,000 test examples of handwritten digits.

Before we dive into the code, let’s understand the high-level steps involved in this process:

  1. Load and preprocess the data: We’ll load the MNIST dataset and preprocess the data to make it suitable for training our model.
  2. Define the model architecture: We’ll define the architecture of our convolutional neural network using the TensorFlow.js API.
  3. Train the model: We’ll train our model on the preprocessed MNIST dataset.
  4. Evaluate the model: After training, we’ll evaluate the model’s performance on the test dataset.
  5. Deploy the model: Finally, we’ll deploy the trained model in our web application and use it to recognize handwritten digits drawn on a canvas.

Let’s start with the code:

// Load the required libraries
import * as tf from '@tensorflow/tfjs';
import * as mnist from '@tensorflow/mnist';

// Define some global variables
let model;
let canvas, ctx;
let imgData;

// Load the MNIST data and preprocess it
async function loadData() {
const mnistData = await mnist.getData();
const imgs = mnistData.imgs;
const labels = mnistData.labels;

// Normalize the pixel values
const numImages = imgs.shape[0];
const imgs2D = imgs.reshape([numImages, 28 * 28]);
const imgs2DNorm = imgs2D.div(255);

// Convert labels to one-hot encoding
const labelsTensor = tf.tensor1d(labels, 'int32');
const oneHotLabels = tf.oneHot(tf.cast(labelsTensor, 'int32'), 10);

return { imgs: imgs2DNorm, labels: oneHotLabels };
}

// Define the model architecture
function defineModel() {
const model = tf.sequential();

model.add(tf.layers.conv2d({
inputShape: [28, 28, 1],
kernelSize: 5,
filters: 8,
strides: 1,
activation: 'relu',
kernelInitializer: 'varianceScaling'
}));

model.add(tf.layers.maxPooling2d({ poolSize: [2, 2], strides: [2, 2] }));
model.add(tf.layers.conv2d({
kernelSize: 5,
filters: 16,
strides: 1,
activation: 'relu',
kernelInitializer: 'varianceScaling'
}));
model.add(tf.layers.maxPooling2d({ poolSize: [2, 2], strides: [2, 2] }));
model.add(tf.layers.flatten());
model.add(tf.layers.dense({
units: 10,
kernelInitializer: 'varianceScaling',
activation: 'softmax'
}));

return model;
}

// Train the model
async function train(model, imgs, labels) {
const BATCH_SIZE = 128;
const EPOCHS = 10;

const optimizer = tf.train.adam();
model.compile({
optimizer: optimizer,
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});

const batchedImgs = tf.data.array(imgs).batch(BATCH_SIZE);
const batchedLabels = tf.data.array(labels).batch(BATCH_SIZE);

const trainData = tf.data.zip({ imgs: batchedImgs, labels: batchedLabels });

const history = await model.fitDataset(trainData, {
epochs: EPOCHS,
validationSplit: 0.15,
callbacks: {
onEpochEnd: async (epoch, logs) => {
console.log(`Epoch ${epoch + 1} - Loss: ${logs.loss.toFixed(4)} - Acc: ${logs.acc.toFixed(4)}`);
}
}
});

return model;
}

// Evaluate the model
async function evaluate(model, imgs, labels) {
const evalOutput = await model.evaluate(imgs, labels);
console.log(`Loss: ${evalOutput[0].dataSync()[0].toFixed(4)} Acc: ${evalOutput[1].dataSync()[0].toFixed(4)}`);
}

// Function to draw on the canvas
function draw(e) {
const rect = canvas.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
ctx.fillRect(x, y, 10, 10);
imgData = ctx.getImageData(0, 0, 28, 28);
}

// Classify the drawn digit
async function classify() {
const imgTensor = tf.tensor4d(imgData.data, [1, 28, 28, 1]).div(255);
const prediction = model.predict(imgTensor).argMax(1).dataSync()[0];
console.log(`Predicted digit: ${prediction}`);
}

// Initialize the app
async function init() {
canvas = document.getElementById('canvas');
ctx = canvas.getContext('2d');
canvas.addEventListener('mousemove', draw);

const data = await loadData();
model = defineModel();
model = await train(model, data.imgs, data.labels);
await evaluate(model, data.imgs, data.labels);
}

init();

Here’s a step-by-step explanation of the code:

  1. Load and preprocess the data: We start by importing the required libraries (@tensorflow/tfjs and @tensorflow/mnist). Then, we define a function loadData that fetches the MNIST dataset using the mnist.getData method. We preprocess the data by normalizing the pixel values and converting the labels to one-hot encoding.
  2. Define the model architecture: The defineModel function creates a sequential model using the TensorFlow.js API. We define a convolutional neural network with two convolutional layers, two max-pooling layers, and a dense output layer with 10 units (one for each digit).
  3. Train the model: The train function compiles the model with an Adam optimizer and categorical cross-entropy loss function. It then trains the model on the preprocessed MNIST dataset using the model.fitDataset method. We set up a validation split of 15% and log the loss and accuracy for each epoch.
  4. Evaluate the model: The evaluate function evaluates the trained model's performance on the test dataset using the model.evaluate method.
  5. Deploy the model: In this part, we set up a canvas element and add an event listener for mouse movements. The draw function is called whenever the user moves the mouse over the canvas, allowing them to draw digits. The classify function is responsible for classifying the drawn digit using the trained model. It preprocesses the canvas image data, creates a tensor, and passes it through the model to obtain the predicted digit.

Finally, the init function initializes the application by setting

Sentiment Analysis

Sentiment analysis is a natural language processing (NLP) task that involves classifying a given piece of text (e.g., a product review, a tweet, or a movie review) as positive, negative, or neutral based on the sentiment expressed in the text. This can be useful in various applications, such as monitoring brand reputation, analyzing customer feedback, or understanding public opinion on social media.

In this example, we’ll use TensorFlow.js to build and train a sentiment analysis model, and then deploy it in a web application. We’ll use a pre-trained model for text embedding (converting text into numerical vectors) and train a dense neural network on top of it for sentiment classification.

Here’s a detailed explanation of the code:

// Load the required libraries
import * as tf from '@tensorflow/tfjs';
import * as use from '@tensorflow-models/universal-sentence-encoder';

// Define some global variables
let model, encoder;
let maxLen = 120; // Maximum length of input text

// Load the Universal Sentence Encoder model
async function loadEncoder() {
encoder = await use.load();
console.log('Universal Sentence Encoder loaded');
}

// Prepare the data
function prepareData(data) {
const texts = data.map(d => d.text);
const encodedTexts = encoder.encode(texts);
const labels = data.map(d => d.label === 'positive' ? 1 : 0);
return { texts: encodedTexts.array(), labels: tf.tensor1d(labels, 'int32') };
}

// Define the model architecture
function defineModel() {
const model = tf.sequential();
model.add(tf.layers.dense({ units: 16, activation: 'relu', inputShape: [512] }));
model.add(tf.layers.dense({ units: 8, activation: 'relu' }));
model.add(tf.layers.dense({ units: 1, activation: 'sigmoid' }));
return model;
}

// Train the model
async function train(model, data) {
const optimizer = tf.train.adam(0.001);
model.compile({
optimizer: optimizer,
loss: 'binaryCrossentropy',
metrics: ['accuracy'],
});

const { texts, labels } = prepareData(data);
const batchSize = 32;
const trainData = tf.data.zip({ xs: texts, ys: labels }).batch(batchSize);

const history = await model.fitDataset(trainData, {
epochs: 10,
callbacks: {
onEpochEnd: async (epoch, logs) => {
console.log(`Epoch ${epoch + 1} - Loss: ${logs.loss.toFixed(4)} - Acc: ${logs.acc.toFixed(4)}`);
}
}
});

return model;
}

// Classify the input text
async function classify(text) {
const encodedText = encoder.encode([text]);
const prediction = model.predict(encodedText.array()).dataSync()[0];
const sentiment = prediction > 0.5 ? 'positive' : 'negative';
console.log(`Sentiment: ${sentiment}`);
}

// Initialize the app
async function init() {
await loadEncoder();
model = defineModel();

// Load your training data here
const trainingData = [
{ text: 'This movie was awesome!', label: 'positive' },
{ text: 'I had a terrible experience with this product.', label: 'negative' },
// Add more training examples
];

model = await train(model, trainingData);

// Get user input and classify
const userInput = prompt('Enter some text to analyze sentiment:');
await classify(userInput);
}

init();

Here’s a step-by-step explanation:

  1. Load the required libraries: We start by importing the @tensorflow/tfjs library and the @tensorflow-models/universal-sentence-encoder library, which provides a pre-trained model for text embedding.
  2. Load the Universal Sentence Encoder model: The loadEncoder function loads the pre-trained Universal Sentence Encoder model, which will be used to convert text input into numerical vectors.
  3. Prepare the data: The prepareData function takes the training data (an array of objects with text and label properties) and prepares it for training. It encodes the text using the Universal Sentence Encoder and converts the labels to binary values (0 for negative, 1 for positive).
  4. Define the model architecture: The defineModel function defines a simple dense neural network with two hidden layers and a sigmoid output layer for binary classification.
  5. Train the model: The train function compiles the model with an Adam optimizer and binary cross-entropy loss function. It then prepares the training data using the prepareData function and trains the model on the data using the model.fitDataset method. We log the loss and accuracy for each epoch.
  6. Classify the input text: The classify function takes a text input, encodes it using the Universal Sentence Encoder, passes it through the trained model, and classifies the sentiment as positive or negative based on the output probability.
  7. Initialize the app: The init function initializes the application by loading the Universal Sentence Encoder model, defining the model architecture, and loading the training data. In this example, we've provided a simple training dataset with two examples. You can replace it with your own training data. After training the model, the function prompts the user to enter some text and classifies the sentiment of the input text using the classify function.

This example demonstrates how to build and train a sentiment analysis model using TensorFlow.js and the Universal Sentence Encoder model. You can integrate this code into a web application by creating a user interface for text input and displaying the sentiment classification result.

--

--

Ramkumar Khubchandani
Ramkumar Khubchandani

Written by Ramkumar Khubchandani

Frontend Developer|Technical Content Writer|React|Angular|React-Native|Corporate Trainer|JavaScript|Trainer|Teacher| Mobile: 7709330265|ramkumarkhub@gmail.com

No responses yet