Code for a platform moving back and Forth - phaser-framework

I'am trying to make a game where the character can jump on a platform that's moving Horizontally, once the platform reaches a specific point it comes back and it repeats. However I have had some trouble finding the correct way to write the code for this I tried using setVelocityX() is looks like this
var movingPlatform = {
moveRight : function(){
platforms.setVelocityX(100)
},
moveleft : function(){
platforms.setVelocityX(-100);
}
}
var move = true;
if(move = true){
movingPlatform.moveRight()
}
if(move = false){
movingPlatform.moveleft();
}
if(platforms.x <= platformMinX){
move = true;
}
if(platforms.x >= platformMaxX){
move = false;
}
all this did was when the platform reaches the 'platformMaxX' point it just moved back and forth in that area so 'movingPlatform.moveRight()' is still being called

var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600, loader: {
baseURL: 'https://raw.githubusercontent.com/nazimboudeffa/assets/master/',
crossOrigin: 'anonymous'
},
scene: {
preload: preload,
create: create,
update: update
},
physics: {
default: 'arcade'
}
};
var game = new Phaser.Game(config);
var dude;
var alien1, alien2;
var direction = 1
function preload ()
{
this.load.image('dude', 'sprites/phaser-dude.png');
this.load.image('alien1', 'sprites/phaser-alien.png');
this.load.image('alien2', 'sprites/alien2.png');
}
function create ()
{ dude = this.physics.add.sprite(300, 100, 'dude');
alien1 = this.physics.add.sprite(400, 100, 'alien1');
alien1.body.immovable = true;
alien2 = this.physics.add.sprite(100, 100, 'alien2');
alien2.body.immovable = true;
}
function update ()
{
this.physics.add.collider(dude, alien1, flipX, null, this);
this.physics.add.collider(dude, alien2, flipX, null, this);
dude.setVelocityX(direction * 100)
}
function flipX ()
{
direction = - direction
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.18.1/dist/phaser.min.js"></script>

var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600, loader: {
baseURL: 'https://raw.githubusercontent.com/nazimboudeffa/assets/master/',
crossOrigin: 'anonymous'
},
scene: {
preload: preload,
create: create
},
physics: {
default: 'arcade'
}
};
var game = new Phaser.Game(config);
function preload ()
{
this.load.image('dude', 'sprites/phaser-dude.png');
}
function create ()
{
this.dude = this.physics.add.sprite(100, 100, 'dude');
const tween = this.tweens.add({
targets: this.dude,
x: 300,
ease: 'Power0',
duration: 3000,
flipX: true,
yoyo: true,
repeat: -1,
});
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.19.0/dist/phaser.js"></script>

Related

How to run mediapipe facemesh on a ES6 node.js environment alike react

I am trying to run this HTML example https://codepen.io/mediapipe/details/KKgVaPJ from https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api in a create react application. I have already done:
npm install of all the facemesh mediapipe packages.
Already replaced the jsdelivr tags with node imports and I got the definitions and functions.
Replaced the video element with react-cam
I don't know how to replace this jsdelivr, maybe is affecting:
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/#mediapipe/face_mesh/${file}`;
}
});
So the question is:
Why the facemesh is not showing? Is there any example of what I am trying to do?
This is my App.js code (sorry for the debugugging scaffolding):
import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '#mediapipe/camera_utils'
import {
FaceMesh,
FACEMESH_TESSELATION,
FACEMESH_RIGHT_EYE,
FACEMESH_LEFT_EYE,
FACEMESH_RIGHT_EYEBROW,
FACEMESH_LEFT_EYEBROW,
FACEMESH_FACE_OVAL,
FACEMESH_LIPS
} from '#mediapipe/face_mesh'
import { drawConnectors } from '#mediapipe/drawing_utils'
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
function App() {
const webcamRef = React.useRef(null);
const canvasReference = React.useRef(null);
const [cameraReady, setCameraReady] = useState(false);
let canvasCtx
let camera
const videoElement = document.getElementsByClassName('input_video')[0];
// const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasElement = document.createElement('canvas');
console.log('canvasElement', canvasElement)
console.log('canvasCtx', canvasCtx)
useEffect(() => {
camera = new Camera(webcamRef.current, {
onFrame: async () => {
console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
},
width: 1280,
height: 720
});
canvasCtx = canvasReference.current.getContext('2d');
camera.start();
console.log('canvasReference', canvasReference)
}, [cameraReady]);
function onResults(results) {
console.log('results')
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/#mediapipe/face_mesh/${file}`;
}
});
faceMesh.setOptions({
selfieMode: true,
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
// const camera = new Camera(webcamRef.current, {
// onFrame: async () => {
// await faceMesh.send({ image: videoElement });
// },
// width: 1280,
// height: 720
// });
// camera.start();
return (
<div className="App">
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={() => {
console.log('webcamRef.current', webcamRef.current);
// navigator.mediaDevices
// .getUserMedia({ video: true })
// .then(stream => webcamRef.current.srcObject = stream)
// .catch(console.log);
setCameraReady(true)
}}
/>
<canvas
ref={canvasReference}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>
</div >
);
}
export default App;
You don't have to replace the jsdelivr, that piece of code is fine; also I think you need to reorder your code a little bit:
You should put the faceMesh initialization inside the useEffect, with [] as parameter; therefore, the algorithm will start when the page is rendered for the first time
Also, you don't need to get videoElement and canvasElement with doc.*, because you already have some refs defined
An example of code:
useEffect(() => {
const faceMesh = new FaceDetection({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/#mediapipe/face_detection/${file}`;
},
});
faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
faceMesh.onResults(onResults);
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null
) {
camera = new Camera(webcamRef.current.video, {
onFrame: async () => {
await faceMesh.send({ image: webcamRef.current.video });
},
width: 1280,
height: 720,
});
camera.start();
}
}, []);
Finally, in the onResults callback I would suggest printing first the results, just to check if the Mediapipe implementation is working fine. And don't forget to set the canvas size before drawing something.
function onResults(results){
console.log(results)
canvasCtx = canvasReference.current.getContext('2d')
canvas.width = webcamRef.current.video.videoWidth;
canvas.height = webcamRef.current.video.videoHeight;;
...
}
Good luck! :)

How to attach an Electron window to an application screen and resize dynamically

I am currently writing a program that uses a mix of Electron and React-Redux to create an overlay window on top of screens/applications. I managed to successfully create the transparent overlay window and list all the valid media streams. But I can't figure out how I can have this new overlay window match the size/location of the selected stream and dynamically resize. On top of that, I would like the overlay to be on top of the selected stream alone.
Any tips are welcome :)
// In MainProcess
ipcMain.on(ELECTRON_CREATE_OVERLAY_WINDOW, (event, windowSettings) => {
if (overlayWindow !== null) {
console.error('Trying to create an Overlay window when there is already one!')
return
}
console.log('Creating the Overlay window')
overlayWindow = new BrowserWindow({
width: windowSettings.width,
height: windowSettings.height,
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true,
},
transparent: true,
frame: false,
alwaysOnTop: true,
});
overlayWindow.setIgnoreMouseEvents(true);
overlayWindow.loadURL("http://localhost:3000/overlay");
overlayWindow.on('closed', () => {
console.log('Overlay window closed')
overlayWindow = null
})
});
// In React page / RendererProcess
React.useEffect(async () => {
desktopCapturer
.getSources({
types: ["window", "screen"],
})
.then((inputSources) => {
for (let i = 0; i < inputSources.length; i++) {
let source = inputSources[i];
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
},
},
};
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
inputSources[i].stream = stream;
console.log('stream', stream)
// When we got all streams, update the state
if (i == inputSources.length - 1) {
setSources([...inputSources]);
}
});
}
});
}, []);
...
const launchOverlay = (source) => {
const streamSettings = source.stream.getVideoTracks()[0].getSettings();
console.log(source)
console.log(source.stream)
console.log(streamSettings)
createOverlayWindow({ width: streamSettings.width, height: streamSettings.height })
};
You can use electron-overlay-window package for it.
Readme says it supports Windows and Linux
It tracks target windows by its title and keeps your app window right over it. It also re-attaches itself if you restart the target app/game. The only downside - it's not very documented. But the basic demo is simple.
// ...
import { overlayWindow as OW } from 'electron-overlay-window'
// ...
const win = new BrowserWindow({
...OW.WINDOW_OPTS, // pay attention here
width: 800,
height: 600,
resizable: false,
webPreferences: {
nodeIntegration: process.env.ELECTRON_NODE_INTEGRATION,
},
})
// ... when ready
OW.attachTo(window, 'Untitled - Notepad')
// listen for lifecycle events
OW.on('attach', ev => { console.log('WO: attach', ev) })
OW.on('detach', ev => { console.log('WO: detach', ev) })
OW.on('blur', ev => { console.log('WO: blur', ev)})
OW.on('focus', ev => { console.log('WO: focus', ev)})
OW.on('fullscreen', ev => console.log('WO: fullscreen', ev))
OW.on('moveresize', ev => console.log('WO: fullscreen', ev))
You can look up more examples here:
Ender-O for Elite Dangerous written in JS (demo video)
Awaken PoE Trade for Path of Exile written in TS
Simple demo from author written in TS (I recommend starting with this example)

Phaser 3 - How to create smooth zooming effect

How can I make a zooming effect instead of instant zooming?
Hi there, i'm currently developing a game, which will zoom in when the condition is true. To do that, i followed this link.
https://rexrainbow.github.io/phaser3-rex-notes/docs/site/camera/
camera = this.cameras.main;
if (condition) {
camera.setZoom(3);
camera.zoom = 3;
}
It works, but there's no transition/animation of the zooming. It just simply zoomed.
How can I make a zooming effect instead of instant zooming?
You can play with the pan call and replace 'Power2' by :
Elastic
Sine.easeInOut
Or leave it blank
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600, loader: {
baseURL: 'https://raw.githubusercontent.com/nazimboudeffa/assets/master/',
crossOrigin: 'anonymous'
},
scene: {
preload: preload,
create: create
}
};
var game = new Phaser.Game(config);
function preload ()
{
this.load.image('map', 'pics/hyrule.png');
}
function create ()
{
this.cameras.main.setBounds(0, 0, 1024, 1024);
this.add.image(0, 0, 'map').setOrigin(0);
this.cameras.main.setZoom(1);
this.cameras.main.centerOn(0, 0);
this.input.on('pointerdown', function () {
var cam = this.cameras.main;
cam.pan(500, 500, 2000, 'Power2');
cam.zoomTo(4, 3000);
}, this);
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.17.0/dist/phaser.min.js"></script>

Lottie Animation in fabricjs canvas

Is it possible to load the Lottie animation in fabricjs canvas
I have tried the following samples
bodymovin.loadAnimation({
wrapper: animateElement, // div element
loop: true,
animType: 'canvas', // fabricjs canvas
animationData: dataValue, // AE json
rendererSettings: {
scaleMode: 'noScale',
clearCanvas: true,
progressiveLoad: false,
hideOnTransparent: true,
}
});
canvas.add(bodymovin);
canvas.renderAll();
I cant able to add the animation in the fabric js canvas. if any one overcome this issue kindly do comments on it
I might be late to answer this, but for anyone else looking, this pen could give you some pointers: https://codepen.io/shkaper/pen/oEKEgG
The idea here, first of all, is to extend fabric.Image class overriding its internal render method to render the contents of an arbitrary canvas that you yourself provide:
fabric.AEAnimation = fabric.util.createClass(fabric.Image, {
drawCacheOnCanvas: function(ctx) {
ctx.drawImage(this._AECanvas, -this.width / 2, -this.height / 2);
},
})
You can make this canvas a constructor argument, e.g.
initialize: function (AECanvas, options) {
options = options || {}
this.callSuper('initialize', AECanvas, options)
this._AECanvas = AECanvas
},
Then you'll just need to use lottie's canvas renderer to draw animation on a canvas and pass it to your new fabric.AEAnimation object.
I would assume so, by combining your code with something similar to https://itnext.io/video-element-serialization-and-deserialization-of-canvas-fc5dbf47666d. Depending on your scenario you might be able to get away with using something like http://fabricjs.com/interaction-with-objects-outside-canvas
If of any help, I've created this Lottie class with the support of exporting toObject/JSON
import { fabric } from 'fabric'
import lottie from 'lottie-web'
const Lottie = fabric.util.createClass(fabric.Image, {
type: 'lottie',
lockRotation: true,
lockSkewingX: true,
lockSkewingY: true,
srcFromAttribute: false,
initialize: function (path, options) {
if (!options.width) options.width = 480
if (!options.height) options.height = 480
this.path = path
this.tmpCanvasEl = fabric.util.createCanvasElement()
this.tmpCanvasEl.width = options.width
this.tmpCanvasEl.height = options.height
this.lottieItem = lottie.loadAnimation({
renderer: 'canvas',
loop: true,
autoplay: true,
path,
rendererSettings: {
context: this.tmpCanvasEl.getContext('2d'),
preserveAspectRatio: 'xMidYMid meet',
},
})
// this.lottieItem.addEventListener('DOMLoaded', () => {
// console.log('DOMLoaded')
// })
this.lottieItem.addEventListener('enterFrame', (e) => {
this.canvas?.requestRenderAll()
})
this.callSuper('initialize', this.tmpCanvasEl, options)
},
play: function () {
this.lottieItem.play()
},
stop: function () {
this.lottieItem.stop()
},
getSrc: function () {
return this.path
},
})
Lottie.fromObject = function (_object, callback) {
const object = fabric.util.object.clone(_object)
fabric.Image.prototype._initFilters.call(object, object.filters, function (filters) {
object.filters = filters || []
fabric.Image.prototype._initFilters.call(object, [object.resizeFilter], function (resizeFilters) {
object.resizeFilter = resizeFilters[0]
fabric.util.enlivenObjects([object.clipPath], function (enlivedProps) {
object.clipPath = enlivedProps[0]
const fabricLottie = new fabric.Lottie(object.src, object)
callback(fabricLottie, false)
})
})
})
}
Lottie.async = true
export default Lottie
To create Lottie element just pass JSON
const fabricImage = new fabric.Lottie('https://assets5.lottiefiles.com/private_files/lf30_rttpmsbc.json', {
scaleX: 0.5,
})
canvas.add(fabricImage)

Text area in svg shapes

Created a text area in svg using joint js. However, I am not able to enter any text in the text area. How to make the text area editable?
Code:
var graph = new joint.dia.Graph;
var paper = new joint.dia.Paper({
el: $('#myholder'),
width: 1500,
height: 700,
model: graph
});
// Create a custom element.
// ------------------------
joint.shapes.html = {};
joint.shapes.html.Element = joint.shapes.basic.Generic.extend(_.extend({}, joint.shapes.basic.PortsModelInterface, {
markup: '<g class="rotatable"><g class="scalable"><rect/></g><g class="inPorts"/><g class="outPorts"/></g>',
portMarkup: '<g class="port<%=1%>"><circle/></g>',
defaults: joint.util.deepSupplement({
type: 'html.Element',
size: {width: 100, height: 80},
inPorts: [],
outPorts: [],
attrs: {
'.': {magnet: false},
rect: {
stroke: 'none', 'fill-opacity': 0, width: 150, height: 250,
},
circle: {
r: 6, //circle radius
magnet: true,
stroke: 'black'
},
'.inPorts circle': {fill: 'green', magnet: 'passive', type: 'input'},
'.outPorts circle': {fill: 'red', type: 'output'}
}
}, joint.shapes.basic.Generic.prototype.defaults),
getPortAttrs: function (portName, index, total, selector, type) {
var attrs = {};
var portClass = 'port' + index;
var portSelector = selector + '>.' + portClass;
var portCircleSelector = portSelector + '>circle';
attrs[portCircleSelector] = {port: {id: portName || _.uniqueId(type), type: type}};
attrs[portSelector] = {ref: 'rect', 'ref-y': (index + 1) * (10 / total)};
if (selector === '.outPorts') {
attrs[portSelector]['ref-dx'] = 0;
}
return attrs;
}
}));
// Create a custom view for that element that displays an HTML div above it.
// -------------------------------------------------------------------------
joint.shapes.html.ElementView = joint.dia.ElementView.extend({
template: [
'<div class="html-element">',
'<button class="delete">x</button>',
'<span id="lbl" value="Please write here"></span>',
'<textarea id="txt" type="text" value="Please write here"></textarea>',
'</div>'
].join(''),
initialize: function () {
_.bindAll(this, 'updateBox');
joint.dia.ElementView.prototype.initialize.apply(this, arguments);
this.$box = $(_.template(this.template)());
// Prevent paper from handling pointerdown.
this.$box.find('input,select').on('mousedown click', function (evt) {
evt.stopPropagation();
});
// This is an example of reacting on the input change and storing the input data in the cell model.
this.$box.find('textarea').on('change', _.bind(function (evt) {
this.model.set('textarea', $(evt.target).val());
}, this));
this.$box.find('.delete').on('click', _.bind(this.model.remove, this.model));
// Update the box position whenever the underlying model changes.
this.model.on('change', this.updateBox, this);
// Remove the box when the model gets removed from the graph.
this.model.on('remove', this.removeBox, this);
this.updateBox();
this.listenTo(this.model, 'process:ports', this.update);
joint.dia.ElementView.prototype.initialize.apply(this, arguments);
},
render: function () {
joint.dia.ElementView.prototype.render.apply(this, arguments);
this.paper.$el.prepend(this.$box);
// this.paper.$el.mousemove(this.onMouseMove.bind(this)), this.paper.$el.mouseup(this.onMouseUp.bind(this));
this.updateBox();
return this;
},
renderPorts: function () {
var $inPorts = this.$('.inPorts').empty();
var $outPorts = this.$('.outPorts').empty();
var portTemplate = _.template(this.model.portMarkup);
_.each(_.filter(this.model.ports, function (p) {
return p.type === 'in'
}), function (port, index) {
$inPorts.append(V(portTemplate({id: index, port: port})).node);
});
_.each(_.filter(this.model.ports, function (p) {
return p.type === 'out'
}), function (port, index) {
$outPorts.append(V(portTemplate({id: index, port: port})).node);
});
},
update: function () {
// First render ports so that `attrs` can be applied to those newly created DOM elements
// in `ElementView.prototype.update()`.
this.renderPorts();
joint.dia.ElementView.prototype.update.apply(this, arguments);
},
updateBox: function () {
// Set the position and dimension of the box so that it covers the JointJS element.
var bbox = this.model.getBBox();
// Example of updating the HTML with a data stored in the cell model.
// paper.on('blank:pointerdown', function(evt, x, y) { this.$box.find('textarea').toBack(); });
this.$box.find('span').text(this.model.get('textarea'));
this.model.on('cell:pointerclick', function (evt, x, y) {
this.$box.find('textarea').toFront();
});
this.$box.css({width: bbox.width, height: bbox.height, left: bbox.x, top: bbox.y, transform: 'rotate(' + (this.model.get('angle') || 0) + 'deg)'});
},
removeBox: function (evt) {
this.$box.remove();
}
});
// Create JointJS elements and add them to the graph as usual.
// -----------------------------------------------------------
var el1 = new joint.shapes.html.Element({
position: {x: 600, y: 250},
size: {width: 170, height: 100},
inPorts: ['in'],
outPorts: ['out'],
textarea: 'Start writing'
});
var el2 = new joint.shapes.html.Element({
position: {x: 600, y: 400},
size: {width: 170, height: 100},
inPorts: ['in'],
outPorts: ['out'],
textarea: 'Start writing'
});
graph.addCells([el1, el2]);
Also is it possible to scale the svg shape based on the text inside text area?
I assume that you are using the CSS stylesheet from the JointJS HTML tutorial (http://jointjs.com/tutorial/html-elements).
Note that .html-element has pointer-events set to none. With that being set all events are propagated to the (JointJS) SVG Element under the HTML Element. The paper therefore knows what element was interacted with and e.g. can start dragging it.
.html-element {
pointer-events: none;
}
I suggest to create an exception for the TextArea by adding the following CSS rule.
.html-element textarea {
pointer-events: all;
}

Resources