The first is:
Colour detection in Processing from James Alliban on Vimeo.
A colour detection code that I found on the following website:
http://jamesalliban.wordpress.com
(Specific file can be found at : http://jamesalliban.wordpress.com/2008/11/16/colour-detection-in-processing/)
it involves using a webcam to pick up the object in the far right corner. This might be handy, as I want to develop the idea o f picking things up on the stage. This is a very similar idea to the ford advertisement by monster media that I showed in my last post. It uses the idea of people being able to move items around the screen and therefore in the process of working as an advertisement as it makes people want to find out more. I will look into this idea further when I next go into college, as my webcam at home isn't good enough to pick up all the different colours.
This was the code for the project:
import processing.video.*;
Capture video;
int numPixels; // number of pixels in the video
int rectDivide = 4; // the stage width/height divided by this number is the video width/height
int vidW; // video width
int vidH; // video height
int[][] colouredPixels; // the different colour references for each pixel
int[][] colourCompareData; // captured r, g and b colours
int currR; //
int currG; //
int currB; //
int[][] squareCoords; // x, y, w + h of the coloured areas
color[] colours; // captured colours
int colourRange = 25; // colour threshold
int[][] centrePoints; // centres of the coloured squares
color[] pixelColours;
boolean isShowPixels = false; // determines whether the square and coloured pixels are displayed
int colourMax = 2; // max amount of colours - also adjust the amount of colours added to pixelColours in setup()
int coloursAssigned = 0; // amount of cours currently assigned
void setup()
{
size(640, 480);
vidW = width / rectDivide;
vidH = height / rectDivide;
video = new Capture(this, vidW, vidH, 30);
noStroke();
numPixels = vidW * vidH;
colouredPixels = new int[vidH][vidW];
colourCompareData = new int[colourMax][3];
squareCoords = new int[colourMax][4];
colours = new color[colourMax];
centrePoints = new int[colourMax][2];
color c1 = color(0, 255, 0);
color c2 = color(255, 0, 0);
pixelColours = new color[colourMax];
pixelColours[0] = color(0, 255, 0);
pixelColours[1] = color(255, 0, 0);
}
void captureEvent(Capture video)
{
video.read();
}
void draw()
{
noStroke();
fill(255, 255, 255);
rect(0, 0, width, height);
drawVideo();
for (int i = 0; i < coloursAssigned; i++)
{
if (isShowPixels) drawSquare(i);
}
}
void drawVideo()
{
for (int i = 0; i < coloursAssigned; i++)
{
fill(colours[i]);
rect(i * 10, vidH, 10, 10);
}
image(video, 0, 0);
noFill();
stroke(255, 0, 0);
strokeWeight(2);
rect(vidW - 4, vidH - 4, 4, 4);
}
void drawSquare(int i)
{
int sqX = squareCoords[i][0];
int sqY = squareCoords[i][1];
int sqW = squareCoords[i][2];
int sqH = squareCoords[i][3];
noFill();
stroke(0, 0, 255);
strokeWeight(3);
rect(sqX, sqY, sqW, sqH);
//stroke(0, 0, 255);
//strokeWeight(4);
rect(sqX * rectDivide, sqY * rectDivide, sqW * rectDivide, sqH * rectDivide);
line(sqX * rectDivide, sqY * rectDivide, ((sqX * rectDivide) + (sqW * rectDivide)), ((sqY * rectDivide) + (sqH * rectDivide)));
line(((sqX * rectDivide) + (sqW * rectDivide)), sqY * rectDivide, sqX * rectDivide, (sqY * rectDivide + sqH * rectDivide));
}
void keyPressed()
{
println("key pressed = " + key);
color currPixColor = video.pixels[numPixels - (vidW * 2) - 3];
int pixR = (currPixColor >> 16) & 0xFF;
int pixG = (currPixColor >> 8) & 0xFF;
int pixB = currPixColor & 0xFF;
if (key == 'p')
{
isShowPixels = !isShowPixels;
}
if (key == '1')
{
coloursAssigned = 1;
colourCompareData[0][0] = pixR;
colourCompareData[0][1] = pixG;
colourCompareData[0][2] = pixB;
colours[0] = color(pixR, pixG, pixB);
}
if (colourMax < 2 || coloursAssigned < 1) return;
if (key == '2')
{
coloursAssigned = 2;
colourCompareData[1][0] = pixR;
colourCompareData[1][1] = pixG;
colourCompareData[1][2] = pixB;
colours[1] = color(pixR, pixG, pixB);
}
if (key == '0')
{
coloursAssigned = 0;
}
}
class CoordsCalc
{
CoordsCalc()
{
}
void update()
{
int currX = vidW;
int currW = 0;
boolean isYAssigned = false;
boolean isWAssigned = false;
for (int j = 0; j < coloursAssigned; j++)
{
currX = vidW;
currW = 0;
isYAssigned = false;
isWAssigned = false;
for (int i = 0; i < numPixels; i++)
{
colouredPixels[abs(i / vidW)][i % vidW] = 0;
color currColor = video.pixels[i];
currR = (currColor >> 16) & 0xFF;
currG = (currColor >> 8) & 0xFF;
currB = currColor & 0xFF;
if(isColourWithinRange(j))
{
noStroke();
if (isShowPixels)
{
fill(pixelColours[j]);
rect((i % vidW), (abs(i / vidW)), 1, 1);
rect((i % vidW) * rectDivide, (abs(i / vidW)) * rectDivide, 1 * rectDivide, 1 * rectDivide);
}
if ((i % vidW) < currX)
{
currX = i % vidW;
squareCoords[j][0] = currX;
}
if (!isYAssigned)
{
isYAssigned = true;
squareCoords[j][1] = abs(i / vidW);
}
squareCoords[j][3] = (abs(i / vidW)) - squareCoords[j][1] + 1;
if((i % vidW) > currW)
{
currW = i % vidW;
isWAssigned = true;
}
}
if(i == numPixels - 1 && isWAssigned)
{
squareCoords[j][2] = currW - squareCoords[j][0] + 1;
}
}
}
for (int i = 0; i < coloursAssigned; i++)
{
centrePoints[i][0] = (squareCoords[i][0] * rectDivide) + ((squareCoords[i][2] * rectDivide) / 2);
centrePoints[i][1] = (squareCoords[i][1] * rectDivide) + ((squareCoords[i][3] * rectDivide) / 2);
fill(0, 0, 0);
ellipse(centrePoints[i][0], centrePoints[i][1], 10, 10);
}
}
boolean isColourWithinRange(int j)
{
if(currR > (colourCompareData[j][0] + colourRange) || currR < (colourCompareData[j][0] - colourRange))
{
return false;
}
if(currG > (colourCompareData[j][1] + colourRange) || currG < (colourCompareData[j][1] - colourRange))
{
return false;
}
if(currB > (colourCompareData[j][2] + colourRange) || currB < (colourCompareData[j][2] - colourRange))
{
return false;
}
return true;
}
}
I have also had a look at Myron camera as mouse. It tracks movement and acts accordingly. Here is the code for the project:
/*
the green oval is an averaged position of all the detected dark movement in the camera's view.
physical setup:
- make sure there is a strong value contrast between your hand and a white background.
- set all camera settings to "manual" for the most stable results.
last tested to work in Processing 0090
JTNIMOY
*/
import JMyron.*;
JMyron m;//a camera object
//variables to maintain the floating green circle
float objx = 160;
float objy = 120;
float objdestx = 160;
float objdesty = 120;
void setup(){
size(320,240);
m = new JMyron();//make a new instance of the object
m.start(width,height);//start a capture at 320x240
m.trackColor(255,255,255,256*3-100);//track white
m.update();
m.adaptivity(10);
m.adapt();// immediately take a snapshot of the background for differencing
println("Myron " + m.version());
rectMode(CENTER);
noStroke();
}
void draw(){
m.update();//update the camera view
drawCamera();
int[][] centers = m.globCenters();//get the center points
//draw all the dots while calculating the average.
float avX=0;
float avY=0;
for(int i=0;i<centers.length;i++){
fill(80);
rect(centers[i][0],centers[i][1],5,5);
avX += centers[i][0];
avY += centers[i][1];
}
if(centers.length-1>0){
avX/=centers.length-1;
avY/=centers.length-1;
}
//draw the average of all the points in red.
fill(255,0,0);
rect(avX,avY,5,5);
//update the location of the thing on the screen.
if(!(avX==0&&avY==0)&¢ers.length>0){
objdestx = avX;
objdesty = avY;
}
objx += (objdestx-objx)/10.0f;
objy += (objdesty-objy)/10.0f;
fill(30,100,0);
ellipseMode(CENTER);
ellipse(objx,objy,30,30);
}
void drawCamera(){
int[] img = m.differenceImage(); //get the normal image of the camera
loadPixels();
for(int i=0;i<width*height;i++){ //loop through all the pixels
pixels[i] = img[i]; //draw each pixel to the screen
}
updatePixels();
}
void mousePressed(){
m.settings();//click the window to get the settings
}
public void stop(){
m.stop();//stop the object
super.stop();
}
Again it doesn't work very well with my camera. I can't wait to try them out at College!
0 comments:
Post a Comment