I have an mp4 video with an animation cut in such a way that when looped would not show a break. When I ask jPlayer in Chrome to do this, it works fine, but in FireFox it pauses itself for about a 1/3 second before re-playing the loop.
So I tried to have the video play itself over again when it gets to the last 1/2 of a second. I added a 1/2 second to the end of the video that matches the first 1/2 second, and when the jPlayer timeUpdate event determined that we where in the last 1/2 second it would call play with a start time that would match it up with the position it was at (0 + (0.5 - timeLeft)). Once again, Chrome did this well and while a bit better of a match, Firefox failed to render a perfect loop.
Is this something mythical with the current state of web browsers, or am I missing something?
function buildJPlayerMotions(video_name) {
return function readyVideo() {
$(this).jPlayer("setMedia", {
m4v: "/assets/videos/" + video_name +".mp4",
});
};
}
function loadMotionsJplayer(id) {
$('#' + id).jPlayer({
ready: buildJPlayerMotions(id),
swfPath: "/assets",
supplied: "m4v",
backgroundColor: "#FFFFFF",
size: { width: '728px',
height: '402px'},
timeupdate: function(){
var currTime = $('#' + id).data().jPlayer.status.currentTime;
var movieLength = $('#' + id).data().jPlayer.status.duration;
var timeRemaining = movieLength - currTime;
if (timeRemaining < 0.5) {
$('#' + id).jPlayer("pause");
$('#' + id).jPlayer("play", 0 + (0.5 - timeRemaining));
}
}
});
}
Related
I'm using chrome.alarm to display countdown on extension icon, which updates every second'ish.
The problem I'm facing is if I change system's time forward (manually or if computer woke up from sleep), the counter starts racing, updating several times a second in attempt to catch up to new system time, or if I change time backwards, the timer stops.
How can I fix this so it would simply "jump" to current time?
Test extension:
manifest.json
{
"manifest_version": 3,
"name": "Test timer",
"author": "test",
"description": "Test chrome.alarm",
"version": "0.0.1",
"permissions":
[
"alarms"
],
"action": {},
"background": {
"service_worker": "service_worker.js"
}
}
service_worker.js
let i = 0,
start = new Date().getTime(),
pad = (n,s=2) => ("0"+n).slice(-s),
time = d => pad(d.getHours()) + ":" + pad(d.getMinutes()) + ":" + pad(d.getSeconds()) + "." + pad(d.getMilliseconds(),3);
chrome.alarms.onAlarm.addListener(loop);
console.log("started");
loop();
function loop()
{
const now = new Date().getTime(),
//make sure timer doesn't drift from starting point
next = now - ((now - start) % 1000);
//repeat after 1sec
chrome.alarms.create({ when: next + 1000 });
chrome.action.setBadgeText({text:"" + (i = ++i % 1000)});
console.log("Date:", time(new Date(now)), "alarm:", time(new Date(next)));
}
I've tested your code, and I've got some new discoveries. I've run into some Service Worker issues and I think it might have something to do with your "racing" alarm.
If I keep the service worker page open all the time, it runs smoothly and properly.
If I don't open the service worker, it will either "racing" or restart after a while, even if I don't change the system time or let my device fall asleep.
Since you're using Manifest V3, I have to tell you that Manifest V3 has some issues of Service Worker. It breaks sometimes. For more information, you can read this doc. You can refer to these workarounds for sure.
Patient: "doctor, I have a pain in my back".
Doctor: "and when does it hurt?"
Patient: "only when I breathe"
Doctor: "thus it's all resolved. Don't breathe anymore"
Fiddling with system time and chorme.alarms isn't very wise.
Normally you can move the clock forward (just for testing), but turning it back can create unusual issues.
(unless You refresh\reload the extension first).
Instead, we should investigate the behavior after waking up the PC.
Furthermore, these alarms have been designed for intervals of not less than one minute.
We agree that this limitation doesn't apply in development, but setting one-second alarms is a bit bold, isn't it?
So avoid breathing and the problem will resolve itself :-)
Joke apart, You could open a minimized tab and establish a connection between service worker and that tab making a continous ping-pong.
Till the service worker will be alive you''ll be able to dispay the countdown in extension icon using just setTimeour\ setInterval.
Here is my current solution by using combination of performance.now() and setTimeout:
let i = 0,
start = new Date().getTime(),
pad = (n,s=2) => ("0"+n).slice(-s),
time = d => pad(d.getHours()) + ":" + pad(d.getMinutes()) + ":" + pad(d.getSeconds()) + "." + pad(d.getMilliseconds(),3),
timer,
perfPrev = 0;
chrome.alarms.onAlarm.addListener(loop);
console.log("started");
loop();
function loop()
{
const now = new Date().getTime(),
perfNow = performance.now();
//make sure timer doesn't drift from starting point
const next = now - ((now - start) % 1000);
//repeat after 1sec
chrome.alarms.create("loop", { when: next + 1000 });
//detect "racing", when system time changed forward
if (perfNow - perfPrev < 800)
return;
perfPrev = perfNow;
clearTimeout(timer);
//backup plan for when system time changed backwards the alarm won't fire on time
timer = setTimeout(() =>
{
chrome.alarms.clear("loop");
loop();
}, 1000);
chrome.action.setBadgeText({text:"" + (i = ++i % 1000)});
console.log("Date:", time(new Date(now)), "alarm:", time(new Date(next)));
}
I want to implement session expire functionality. so when the user stays on that page for more than 15 min then it should show an alert that the session is expired now. also, meanwhile, if someone copies that URL and pastes it to another tab/browser/incognito (where he was already logged in) or refreshes the page, it should not retake the countdown. for example, if it's in the middle of the countdown let's say 5 min left to reach 15min then after copy-pasting URl on another tab at that time it should start from 5min left then 4 min left, and so on without retaking the countdown from 15 min.
I am not sure what is the best way to implement this in the MERN stack project (should use any library or cookie or local storage) also with security?
I tried sample implementation but it does not work for cross-browser or for incognito it retakes the session also after 15min if I refresh the page countdown timer again starts. someone has a better example or suggestion regarding how to implement the functionality is really appreciated TIA :-)
My dummy implementation example -
countdowntimer.tsx file
const history = useHistory();
const [countdownSessionExpired, setCountdownSessionExpired] = React.useState(false);
React.useEffect(() => {
const countDownTime = localStorage.getItem(COUNTER_KEY) || 10;
countDown(Number(countDownTime), () => {
console.log("countDownTime:", countDownTime);
setCountdownSessionExpired(true);
});
}, [history]);
return (
<>
{countdownSessionExpired ? (
<div>sessoin is expired</div>
) : (
<div>view the page</div>
)}
</>
);
};
==================================================================================================
countdowntimer.utils.tsx file
export const COUNTER_KEY = "myCounter";
export function countDown(i: number, callback: Function) {
const timer = setInterval(() => {
let minutes = i / 60;
let seconds = i % 60;
minutes = minutes < 10 ? 0 + minutes : minutes;
seconds = seconds < 10 ? 0 + seconds : seconds;
// document.getElementById("displayDiv")!.innerHTML = "Time (h:min:sec) left for this station is " + "0:" + minutes + ":" + seconds;
console.log("Time (h:min:sec) left for this station is:", seconds, i--);
if (i-- > 0) {
localStorage.setItem(COUNTER_KEY, String(i));
} else {
localStorage.removeItem(COUNTER_KEY);
clearInterval(timer);
callback();
}
}, 1000);
}```
I was able to print raw ZPL commands from PHP directly to the printer, except that I can't print more than 1 label at once after windows update to windows-10 on the TLP 2844-Z printer and my first time when installing WebClientPrint Processor (WCPP) in windows-10. When I was trying to emulate ZPL printer in the ZPL Printer app it also happened. The only exception was when I try this on the mac Safari browser, it's doing fine.
Working request script (still working in Safari, and previously in all other browser):
for(var i=0; i<rows.length; i++){
javascript:jsWebClientPrint.print('useDefaultPrinter=' + $('#useDefaultPrinter').attr('checked') + '&printerName=' + $('#installedPrinterName').val() + '¶m=' + rows[i].value);
}
What's preventing me was the permission asking:
on Chrome weren't generated as many time as the request were (which aren't the problem on Safari).
Example when request were 2:
it only ask for permission once, resulting (only 1 label printed):
when it supposed to be (2 labels printed):
I was able to reproduce the above by using the following script:
for (var i = 0; i < rows.length; i++) {
var url = ('useDefaultPrinter=' + $('#useDefaultPrinter').attr('checked') + '&printerName=' + $('#installedPrinterName').val() + '¶m=' + rows[i].value);
window.open('webclientprint:' + domain + url);
}
Which aren't ideal since many tabs would be generated only to print, where previously you don't need any new tab to do the same.
Any idea how to solve this? So that it would print as many as the request ask?
What I did to solve this was to make each request on a separate tab and closed the tab once it's executed. To make it simple I make it into a separate function.
Request script changed into:
for (var i = 0; i < rows.length; i++) {
if (i > 0)
delayPrint(rows[i], i); // separate function
else
javascript: jsWebClientPrint.print('useDefaultPrinter=' + $('#useDefaultPrinter').attr('checked') + '&printerName=' + $('#installedPrinterName').val() + '¶m=' + rows[i].value);
}
separate function used for delaying request and to make each request on a separate tab and closed the tab once it's executed:
function delayPrint(data, interval) {
setTimeout(function() {
var wnd = window.open('webclientprint:' + domain + ('useDefaultPrinter=' + $('#useDefaultPrinter').attr('checked') + '&printerName=' + $('#installedPrinterName').val() + '¶m=' + rows[i].value));
setTimeout(function() {
wnd.close(); // close once it's done
}, 1000);
}, interval * 3000);
}
I have a simple node.js app with a javascript file and a pug template file.
In the javascript file I required iohook and the code logic itself works. It detects pressed keys and I can do actions based on that.
But here is my problem: I have a pug-template for my layout with a few simple buttons and divs. If you click the button, the div starts a timer - imagine: 60 seconds timer starts onClick. This also works.
Now I want to combine these two: If I press a certain key like "1" it should start the timer for the first div. But here is my problem: I failed to combine these two.
Now my question is what are you recommendations to solve this? I don't really know how to continue. Any tips are appreciated!
I tried to require iohook inside the PUG template but that doesn't work because the scope of the template doesn't know the require function.
If i try to pass ioHook to the template, it always throws the error, that it is undefined.
index.js
const ioHook = require('iohook');
ioHook.on('keydown', event => {
if ( event['keycode'] == 55 ) {
// do something
});
ioHook.start(true);
index.pug
html
body
.button
.top-div(onclick="startTimer(60, document.querySelector('#top-span'));")
| TOP
span#top-span
script.
function startTimer(duration, display) {
let timer = duration, minutes, seconds;
let IntervalId = setInterval(function () {
minutes = parseInt(timer / 60, 10)
seconds = parseInt(timer % 60, 10);
minutes = minutes < 10 ? "0" + minutes : minutes;
seconds = seconds < 10 ? "0" + seconds : seconds;
display.textContent = minutes + ":" + seconds;
display.style.backgroundColor = "grey";
display.parentNode.style.backgroundColor = "grey";
if (timer > 0)
{
--timer;
}
else
{
display.style.backgroundColor = "limegreen";
display.parentNode.style.backgroundColor = "limegreen";
clearInterval(IntervalId);
}
}, 1000);
}
I am trying to run PhantomJS from within a scala application using akka actors:
val process = Process("phantomjs --ignore-ssl-errors=yes " + myrenderscript.js + args ...)
val result = process.run(processLogger, true).exitValue() match {
case ExitCode.SUCCESS => Left(Success)
case ExitCode.TIMEOUT => Right(TimeoutError)
case ExitCode.OPEN_FAILED => Right(NetworkError)
case _ => Right(UnknownError)
}
the myrenderscript.js looks like this:
var version = "1.1";
var TIMEOUT = 30000,
EXIT_SUCCESS = 0,
EXIT_TIMEOUT = 2,
EXIT_OPEN_FAILED = 3;
if (phantom.args.length < 2) {
console.log("Usage: phantomjs render.js parentUrl output [width height]");
phantom.exit(1);
}
var url = phantom.args[0];
var output = phantom.args[1];
var width = parseInt(phantom.args[2] || 1024);
var height = parseInt(phantom.args[3] || 1024);
var clipwidth = parseInt(phantom.args[4] || 1024);
var clipheight = parseInt(phantom.args[5] || 1024);
var zoom = parseFloat(phantom.args[6] || 1.0);
var phantom_version = phantom.version.major + "." + phantom.version.minor + "." + phantom.version.patch;
var userAgentString = "PhantomJS/" + phantom_version + " screenshot-webservice/" + version;
renderUrlToFile(url, output, width, height, clipwidth, clipheight, zoom, userAgentString, function (url, file) {
console.log("Rendered '" + url + "' at size (" + width + "," + height + ") into '" + output + "'");
phantom.exit(EXIT_SUCCESS);
phantom = null;
});
setTimeout(function () {
console.error("Timeout reached (" + TIMEOUT + "ms): " + url);
phantom.exit(EXIT_TIMEOUT);
}, TIMEOUT);
function renderUrlToFile(url, file, width, height, clipwidth, clipheight, zoom, userAgentString, callback) {
console.log("renderUrlToFile start: " + url)
var page = new WebPage();
page.viewportSize = { width: width, height: height };
page.clipRect = { top: 0, left: 0, width: clipwidth, height: clipheight};
page.settings.userAgent = userAgentString;
page.zoomFactor = zoom;
page.open(url, function (status) {
console.log("renderUrlToFile open page: " + url)
if (status !== "success") {
console.log("Unable to render '" + url + "' (" + status + ")");
page.release();
page.close();
page = null;
phantom.exit(EXIT_OPEN_FAILED);
} else {
console.log("renderUrlToFile open page success and pre-render: " + url)
page.render(file);
console.log("renderUrlToFile open page post-render: " + url)
page.release();
page.close();
page = null;
callback(url, file);
}
});
}
prior to creating the process and after it finishes running, about 4 new threads are being created.
Each time the method that creates the process is called, new threads are created and started. After the process is done, the threads go back to a state of monitoring. Eventually my application takes upwards to 500+ threads (I'm capturing a large website and the internal links)
How do I get scala to clean up the threads that are created when running phantomjs?
Edit:
I've changed the scala code to do the following:
val process = Process("phantomjs --ignore-ssl-errors=yes " + myrenderscript.js + args ...).run(processLogger, connectInput)
val result = process.exitValue() match {
case ExitCode.SUCCESS => Left(Success)
case ExitCode.TIMEOUT => Right(TimeoutError)
case ExitCode.OPEN_FAILED => Right(NetworkError)
case _ => Right(UnknownError)
}
process.destroy()
Yet still the threads live on....
I figured out why it is not cleaning up the threads but I don't fully understand it. So if someone posts the true answer on here, I'll vote that answer up.
The problem was I was setting the connectInput value to true. When I set it to false, the threads get destroyed as expected. I'm not sure as to why.
When set to true, a thread dump reveals that one of the threads was blocking the others:
Thread-3#2830 daemon, prio=5, in group 'main', status: 'RUNNING'
blocks Thread-63#4131
blocks Thread-60#4127
blocks Thread-57#4125
blocks Thread-54#4121
blocks Thread-51#4103
blocks Thread-48#4092
blocks Thread-45#4072
blocks Thread-42#4061
blocks Thread-39#4054
blocks Thread-36#4048
blocks Thread-33#4038
blocks Thread-30#4036
blocks Thread-27#4008
blocks Thread-24#3996
blocks Thread-21#3975
blocks Thread-18#3952
blocks Thread-15#3939
blocks Thread-12#3905
blocks Thread-9#3885
blocks Thread-6#3850
at java.io.FileInputStream.readBytes(FileInputStream.java:-1)
at java.io.FileInputStream.read(FileInputStream.java:220)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at java.io.FilterInputStream.read(FilterInputStream.java:116)
at java.io.FilterInputStream.read(FilterInputStream.java:90)
at scala.sys.process.BasicIO$.loop$1(BasicIO.scala:225)
at scala.sys.process.BasicIO$.transferFullyImpl(BasicIO.scala:233)
at scala.sys.process.BasicIO$.transferFully(BasicIO.scala:214)
at scala.sys.process.BasicIO$.connectToIn(BasicIO.scala:183)
at scala.sys.process.BasicIO$$anonfun$input$1.apply(BasicIO.scala:190)
at scala.sys.process.BasicIO$$anonfun$input$1.apply(BasicIO.scala:189)
at scala.sys.process.ProcessBuilderImpl$Simple$$anonfun$2.apply$mcV$sp(ProcessBuilderImpl.scala:72)
at scala.sys.process.ProcessImpl$Spawn$$anon$1.run(ProcessImpl.scala:22)
I initially thought it was the process logger, but that wasn't the case.
Can someone explain this to me?