PVector
имеет свойства .x
и .y
, и это все, что вам нужно для визуализации PImage
в этом месте с использованием image()
:
Давайте предположим imageToDrag
- это экземпляр PImage
, загруженный / сгенерированный в setup()
, доступный по всему эскизу, вы просто сделаете что-то подобное в конце draw()
:
image(imageToDrag, v2.x, v2.y);
В контексте:
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
class KinectTracker {
// Depth threshold
int threshold = 745;
// Raw location
PVector loc;
// Interpolated location
PVector lerpedLoc;
// Depth data
int[] depth;
// What we'll show the user
PImage display;
KinectTracker() {
// This is an awkard use of a global variable here
// But doing it this way for simplicity
kinect.initDepth();
kinect.enableMirror(true);
// Make a blank image
display = createImage(kinect.width, kinect.height, RGB);
// Set up the vectors
loc = new PVector(0, 0);
lerpedLoc = new PVector(0, 0);
}
void track() {
// Get the raw depth as array of integers
depth = kinect.getRawDepth();
// Being overly cautious here
if (depth == null) return;
float sumX = 0;
float sumY = 0;
float count = 0;
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int offset = x + y*kinect.width;
// Grabbing the raw depth
int rawDepth = depth[offset];
// Testing against threshold
if (rawDepth < threshold) {
sumX += x;
sumY += y;
count++;
}
}
}
// As long as we found something
if (count != 0) {
loc = new PVector(sumX/count, sumY/count);
}
// Interpolating the location, doing it arbitrarily for now
lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
}
PVector getLerpedPos() {
return lerpedLoc;
}
PVector getPos() {
return loc;
}
void display() {
PImage img = kinect.getDepthImage();
// Being overly cautious here
if (depth == null || img == null) return;
// Going to rewrite the depth image to show which pixels are in threshold
// A lot of this is redundant, but this is just for demonstration purposes
display.loadPixels();
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int offset = x + y * kinect.width;
// Raw depth
int rawDepth = depth[offset];
int pix = x + y * display.width;
if (rawDepth < threshold) {
// A red color instead
display.pixels[pix] = color(150, 50, 50);
} else {
display.pixels[pix] = img.pixels[offset];
}
}
}
display.updatePixels();
// Draw the image
image(display, 0, 0);
}
int getThreshold() {
return threshold;
}
void setThreshold(int t) {
threshold = t;
}
}
KinectTracker tracker;
Kinect kinect;
PImage imageToDrag;
void setup() {
size(640, 520);
kinect = new Kinect(this);
tracker = new KinectTracker();
// generate test image: can swap this to loadImage("yourAwesomeImageHere.png");
imageToDrag = getTestImage(128,128,RGB);
}
void draw() {
background(255);
// Run the tracking analysis
tracker.track();
// Show the image
tracker.display();
// Let's draw the raw location
PVector v1 = tracker.getPos();
fill(50, 100, 250, 200);
noStroke();
ellipse(v1.x, v1.y, 20, 20);
// Let's draw the "lerped" location
PVector v2 = tracker.getLerpedPos();
fill(100, 250, 50, 200);
noStroke();
ellipse(v2.x, v2.y, 20, 20);
// draw an image at the "lerped" location
image(imageToDrag, v2.x, v2.y);
// Display some info
int t = tracker.getThreshold();
fill(0);
text("threshold: " + t + " " + "framerate: " + int(frameRate) + " " +
"UP increase threshold, DOWN decrease threshold", 10, 500);
}
// Adjust the threshold with key presses
void keyPressed() {
int t = tracker.getThreshold();
if (key == CODED) {
if (keyCode == UP) {
t+=5;
tracker.setThreshold(t);
} else if (keyCode == DOWN) {
t-=5;
tracker.setThreshold(t);
}
}
}
PImage getTestImage(int w, int h, int type){
PImage out = createImage(w,h,type);
for(int i = 0 ; i < out.pixels.length; i++){
int x = i % w;
int y = i / w;
int z = (w - (x+y));
out.pixels[i] = color(map(x,0,w,0,255),
map(y,0,w,0,255),
map(z,0,w,0,255));
}
out.updatePixels();
return out;
}