Integrated with tracking.js

Hi,
I want to integrate color detection from this sample project. :point_down:

Could you tell me the proper way to start with integrating into the 8th wall? Is that possible to integrate?
I was trying to upload external JS files and call them, but I got a lot of errors.
Thank you!

Hi Lily! Are you using A-Frame or three.js?

If you’re using the cloud editor, you can load external libraries in head.html via script tags:

<script src="https://cdn.jsdelivr.net/gh/eduardolundgren/tracking.js@latest/build/tracking-min.js"></script>

This will load the library to the window object, so you can access the library at window.tracking.

I haven’t worked with this specific library before, but it looks like to get started you need a reference to the video DOM element. To do that in 8th Wall, you can create a custom camera pipeline module and obtain a reference to the video DOM element in the onCameraStatusChange lifecycle method:

const customPipelineModule = {
  name: 'custom',
  onCameraStatusChange: (event) => {
    if (event.status === 'hasVideo') {
      console.log(window.tracking)
      console.log(event.video)
    }
  },
}

That being said, it looks like this library was last updated in 2016 so I’m not sure it will work out-of-the-box.

hi Evan,
Thank you for your reply.
I am using A-Frame.
May I know where I should put the customPipelineModule?

Or do you have other possible resources to do this color detection interaction?

Thank you again!

Since you’re using A-Frame, you should add the custom camera pipeline module in an A-Frame component:

const myComponent = {
  init() {
    const onxrloaded = () => {
      XR8.addCameraPipelineModule({
        name: 'custom',
        onCameraStatusChange: (event) => {
          if (event.status === 'hasVideo') {
            console.log(window.tracking)
            console.log(event.video)
          }
        },
      })
    }
    window.XR8 ? onxrloaded() : window.addEventListener('xrloaded', onxrloaded)
  },
}

If this particular library doesn’t work, you might consider integrating a computer vision library to handle color detection: Camera Pipeline: QR Code | 8th Wall | 8th Wall

1 Like

Hi Evan,

Thank you so much for your response again.
I already loaded external library that you provided. (tracking-min.js)

But I still got the error below:

ERROR in ./head.html
Module build failed (from /var/task/node_modules/extract-loader/lib/extractLoader.js):
/tmp/src/readymake.colordetected/tracking-min.js:287
}(window));
^

ReferenceError: window is not defined

This looks like a build error, the code is not able to compile perhaps because code has been placed in the wrong place. Can you please share your head.html and app.js?

Hi here is my head.html

<meta name="8thwall:renderer" content="aframe:1.3.0">

<meta name="8thwall:package" content="@8thwall.xrextras">

<meta name="8thwall:package" content="@8thwall.landing-page">

<script src="https://cdn.jsdelivr.net/gh/eduardolundgren/tracking.js@latest/build/tracking-min.js"></script>

<script src="script.js"></script>

And app.js

import './index.css'

import {myComponent} from './camerapipeline'

AFRAME.registerComponent('camerapipeline', myComponent)

And I got this error

ERROR in ./head.html
Module build failed (from /var/task/node_modules/extract-loader/lib/extractLoader.js):
/tmp/src/readymake.colortest2/script.js:2
window.addEventListener("load", function(e) {
^

ReferenceError: window is not defined

Sorry, I’m beginner with a-frame, so I may import the script.js file in the wrong way.
Thank you again.

can you also share script.js and camerapipeline.js?

sure! I just use the js file from that color detection website.

script.js

window.addEventListener(“load”, function(e) {

console.log(“Page loaded!”);

// Store the color we will be tracking (selectable by clicking on the webcam feed)
var color = {r: 255, g: 0, b: 0};

// Grab reference to the tags we will be using
var slider = document.getElementById(“tolerance”);
var canvas = document.getElementById(‘canvas’);
var context = canvas.getContext(‘2d’);
var webcam = document.getElementById(‘webcam’);
var swatch = document.getElementById(“color”);

// Register our custom color tracking function
tracking.ColorTracker.registerColor(‘dynamic’, function(r, g, b) {
return getColorDistance(color, {r: r, g: g, b: b}) < slider.value
});

// Create the color tracking object
var tracker = new tracking.ColorTracker(“dynamic”);

// Add callback for the “track” event
tracker.on(‘track’, function(e) {

context.clearRect(0, 0, canvas.width, canvas.height);

if (e.data.length !== 0) {

  e.data.forEach(function(rect) {
    // console.log(rect);
    drawRect(rect, context, color);
  });

}

});

// Start tracking
tracking.track(webcam, tracker, { camera: true } );

// Add listener for the click event on the video
webcam.addEventListener(“click”, function (e) {

// Grab color from the video feed where the click occured
var c = getColorAt(webcam, e.offsetX, e.offsetY);

// Update target color
color.r = c.r;
color.g = c.g;
color.b = c.b;

// Update the div's background so we can see which color was selected
swatch.style.backgroundColor = "rgb(" + c.r + ", " + c.g + ", " + c.b + ")";

});

});

// Calculates the Euclidian distance between the target color and the actual color
function getColorDistance(target, actual) {
return Math.sqrt(
(target.r - actual.r) * (target.r - actual.r) +
(target.g - actual.g) * (target.g - actual.g) +
(target.b - actual.b) * (target.b - actual.b)
);
}

// Returns the color at the specified x/y location in the webcam video feed
function getColorAt(webcam, x, y) {

// To be able to access pixel data from the webcam feed, we must first draw the current frame in
// a temporary canvas.
var canvas = document.createElement(‘canvas’);
var context = canvas.getContext(‘2d’);
canvas.width = webcam.width;
canvas.height = webcam.height;
context.drawImage(webcam, 0, 0, webcam.width, webcam.height);

// Then we grab the pixel information from the temp canvas and return it as an object
var pixel = context.getImageData(x, y, 1, 1).data;
return {r: pixel[0], g: pixel[1], b: pixel[2]};

}

// Draw a colored rectangle on the canvas
function drawRect(rect, context, color) {
context.strokeStyle = “rgb(” + color.r + ", " + color.g + ", " + color.b + “)”;
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
}

camerapipeline.js

const myComponent = {
init() {
const onxrloaded = () => {
XR8.addCameraPipelineModule({
name: ‘custom’,
onCameraStatusChange: (event) => {
if (event.status === ‘hasVideo’) {
console.log(window.tracking)
console.log(event.video)
}
},
})
}
window.XR8 ? onxrloaded() : window.addEventListener(‘xrloaded’, onxrloaded)
},
}

and index.css

body {
  display: flex;
  flex-direction: column;
  align-items: center;
  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}

.display {
  position: relative;
}

video {
  display: block;
  /*transform: scaleX(-1);*/
}

canvas {
  display: block;
  position: absolute;
  top: 0;
  pointer-events: none; /* Prevents canvas from intercepting clicks */
  /*transform: scaleX(-1);*/
}

.ui {
  margin-top: 10px;
  width: 640px;
  display: flex;
  justify-content: space-between;
}

  .ui .controls {
    display: flex;
    flex-direction: column;
    justify-content: center;
    width: 70%;
    border: 1px solid gray;
    box-sizing: border-box;
    padding: 0 15px;
    flex-grow: 1;
  }

  .ui .controls p {
    margin-top: 0;
  }

  .ui #color {
    box-sizing: border-box;
    background-color: red;
    width: 100px;
    height: 100px;
    border: 1px solid gray;
    margin-left: 10px;
  }

.controls div {
  display: flex;
}

.controls div label {
  width: 25%;
}

.controls div input {
  width: 75%;
}

Thank you!

Sorry, bad for formatting, but the whole code is above. Thank you!

The first problem is the way that script.js is being loaded. In the cloud editor, app.js is the entry point for custom javascript.

So, instead of loading a custom javascript file in head.html with a script tag:

<script src="script.js"></script>

You should simply import that file in app.js:

import './script'

But this isn’t the right approach, anyways. The next issue you’d encounter with this approach is that the window load event would not consistently emit, so the code in script.js wouldn’t be executed. See here for more information.

The right approach would be to convert script.js into a 8th Wall camera pipeline module, which you have already started in camerapipeline.js.

1 Like

This topic was automatically closed 4 days after the last reply. New replies are no longer allowed.