Read media keys from Go program - linux

I am writing a media cross-platform distributed media player for use on my own network.
The current version has three/four parts:
A NAS holding the audio files.
A metadata server holding information about the files.
A HTML/JS client that allows manipulation of the metadata server and queuing media for the:
A player deamon.
My problem lies with part 4. The player has no UI, nor does it need one. It will be controlled via network commands from the client and by listening to the media keys on its current host.
The player daemon needs to work on both Windows and Linux, but I can't seem to figure out a way (any way) to read these keys on either OS. Most of the way I know to read the keyboard will not read these keys at all.

With the help of several commenters, I now have it all figured out.
The Linux version is as follows:
package main
import (
“bytes”
“encoding/binary”
“fmt”
“os”
“os/exec”
“syscall”
)
// parses through the /proc/bus/input/devices file for keyboard devices.
// Copied from `github.com/gearmover/keylogger` with trivial modification.
func dumpDevices() ([]string, error) {
cmd := exec.Command(“/bin/sh”, “-c”, “/bin/grep -E ‘Handlers|EV=’ /proc/bus/input/devices | /bin/grep -B1 ‘EV=120013’ | /bin/grep -Eo ‘event[0-9]+’”)
output, err := cmd.Output()
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(output)
var devices []string
for line, err := buf.ReadString(‘\n’); err == nil; {
devices = append(devices, “/dev/input/”+line[:len(line)-1])
line, err = buf.ReadString(‘\n’)
}
return devices, nil
}
// Using MS names, just because I don’t feel like looking up the Linux versions.
var keys = map[uint16]string{
0xa3: “VK_MEDIA_NEXT_TRACK”,
0xa5: “VK_MEDIA_PREV_TRACK”,
0xa6: “VK_MEDIA_STOP”,
0xa4: “VK_MEDIA_PLAY_PAUSE”,
}
// Most of the code here comes from `github.com/gearmover/keylogger`.
func main() {
// drop privileges when executing other programs
syscall.Setgid(65534)
syscall.Setuid(65534)
// dump our keyboard devices from /proc/bus/input/devices
devices, err := dumpDevices()
if err != nil {
fmt.Println(err)
}
if len(devices) == 0 {
fmt.Println(“No input devices found”)
return
}
// bring back our root privs
syscall.Setgid(0)
syscall.Setuid(0)
// Open the first keyboard device.
input, err := os.OpenFile(devices[0], os.O_RDONLY, 0600)
if err != nil {
fmt.Println(err)
return
}
defer input.Close()
// Log media keys
var buffer = make([]byte, 24)
for {
// read the input events as they come in
n, err := input.Read(buffer)
if err != nil {
return
}
if n != 24 {
fmt.Println(“Weird Input Event Size: “, n)
continue
}
// parse the input event according to the <linux/input.h> header struct
binary.LittleEndian.Uint64(buffer[0:8]) // Time stamp stuff I could care less about
binary.LittleEndian.Uint64(buffer[8:16])
etype := binary.LittleEndian.Uint16(buffer[16:18]) // Event Type. Always 1 for keyboard events
code := binary.LittleEndian.Uint16(buffer[18:20]) // Key scan code
value := int32(binary.LittleEndian.Uint32(buffer[20:24])) // press(1), release(0), or repeat(2)
if etype == 1 && value == 1 && keys[code] != “” {
// In a real application I would send a message here.
fmt.Println(keys[code])
}
}
}
And the Windows version:
package main
import (
“fmt”
“syscall”
“time”
)
var user32 = syscall.NewLazyDLL(“user32.dll”)
var procGAKS = user32.NewProc(“GetAsyncKeyState”)
// Key codes from MSDN
var keys = [4]uint{
0xb0, // VK_MEDIA_NEXT_TRACK
0xb1, // VK_MEDIA_PREV_TRACK
0xb2, // VK_MEDIA_STOP
0xb3, // VK_MEDIA_PLAY_PAUSE
}
var names = [4]string{
“VK_MEDIA_NEXT_TRACK”,
“VK_MEDIA_PREV_TRACK”,
“VK_MEDIA_STOP”,
“VK_MEDIA_PLAY_PAUSE”,
}
func main() {
fmt.Println(“Running…”)
// Since I don’t want to trigger dozens of times for each key I need to track state.
// I could check the bits of GAKS’ return value, but that is not reliable.
down := [4]bool{false, false, false, false}
for {
time.Sleep(1 * time.Millisecond)
for i, key := range keys {
// val is not a simple boolean!
// 0 means “not pressed” (also certain errors)
// If LSB is set the key was just pressed (this may not be reliable)
// If MSB is set the key is currently down.
val, _, _ := procGAKS.Call(uintptr(key))
// Turn a press into a transition and track key state.
goingdown := false
if int(val) != 0 && !down[i] {
goingdown = true
down[i] = true
}
if int(val) == 0 && down[i] {
down[i] = false
}
if goingdown {
// In a real application I would send a message here.
fmt.Println(names[i])
}
}
}
}
The only "issue" is that the Linux version must be run as root. For me this is not a problem. If running as root is a problem I think there is a way that involves X11...

Related

Is it possible to do append blob restore using multiple threads?

Which version of the SDK was used?
v0.11.0
Which platform are you using? (ex: Windows, Linux, Debian)
Windows
What problem was encountered?
[Approach]
Acquire lease before goroutine started
Calling AppendBlock(ctx, bytes.NewReader(rangeData), azblob.AppendBlobAccessConditions{}, nil)
concurrently inside go routine.
We are using "azblob.AppendPositionAccessConditions{IfAppendPositionEqual: subRangeSize}}" in
AppendBlock call.
It is working well without threads but fails when using goroutine
===== RESPONSE ERROR (ServiceCode=AppendPositionConditionNotMet) =====
Description=The append position condition specified was not met.
FourMegaByteAsBytes := common.FourMegaByteAsBytes
var strLeaseID string = ""
var respAcquireLease *azblob.BlobAcquireLeaseResponse
subRangeSize := int64(0)
//Restore data to Append Blob
for currpos := int64(0); currpos < SourceBlobLength; {
subRangeSize = int64(math.Min(float64(SourceBlobLength-currpos), float64(FourMegaByteAsBytes)))
rangeData := make([]byte, subRangeSize)
if len(strLeaseID) == 0 {
//Acquire the Lease for Restore Blob
respAcquireLease, err = blobURL.AcquireLease(ctx, "", -1, azblob.ModifiedAccessConditions{})
if err != nil {
_, err = blobURL.AppendBlock(ctx, bytes.NewReader(rangeData),
azblob.AppendBlobAccessConditions{}, nil)
} else {
strLeaseID = respAcquireLease.LeaseID()
_, err1 := blobURL.AppendBlock(ctx, bytes.NewReader(rangeData),
azblob.AppendBlobAccessConditions{
azblob.ModifiedAccessConditions{},
azblob.LeaseAccessConditions{LeaseID: strLeaseID},
azblob.AppendPositionAccessConditions{},
}, nil)
if err1 != nil {
log.Fatal(err1)
return
}
}
} else {
_, err = blobURL.AppendBlock(ctx, bytes.NewReader(rangeData),
azblob.AppendBlobAccessConditions{
azblob.ModifiedAccessConditions{},
azblob.LeaseAccessConditions{LeaseID: strLeaseID},
azblob.AppendPositionAccessConditions{}}, nil)
}
currpos += subRangeSize
}
Have you found a mitigation/solution?
No
Appending to a blob requires that you have a lease. Therefore, only the client (aka thread) that has the lease can write to the blob.
So the answer to your question is No, it is not possible to do it at the same time.
There are 2 possible work arounds:
If all your threads write to a queue. Then a single process reads from the queue an writes to the blob.
Program such that the tread waits for the lease to be available. Note the minimum duration of a lease is 15 seconds.

bufio scanner and handling new lines

I've got 2 processes communicating over TCP sockets. Side A sends a string to side B, which is sometimes encrypted using standard crypto/cipher package. The resulting string may include a new line character but Side B's bufio scanner is interpreting it as the end of the request. I want side B to continue accepting lines, append them and wait for a known end-of-command character before further processing it. Side B will return a response to Side A, so the connection remains open and therefore cannot use a close-connection event as a command delimiter.
Everything is working fine for single-line commands, but these new line characters in the encrypted output cause issues (about 10% of the time).
Side A will send in the following formats (the third is a legitimate example of a problem string I'm trying to process correctly):
callCommand()
callCommand("one","two","three")
callCommand("string","encrypted-data-to-follow","[7b��Cr��l��G���bH�#x��������� �(z�$�a��0��ڢ5Y7+��U�QT�ΐl�K�(�n�U��J����QK�BX�+�l\8H��-g�y.�.�1�f��I�C�Ȓ㳿���o�xz�8?��c�e ��Tb��?4�hDW���
�<���Е�gc�������N�V���ۓP8 �����O3")
We can fairly reliably say the end-of-command keys are a close parentheses ")" and a new line character.
Side A's function to send to side B:
func writer(text string) string {
conn, err := net.Dial("tcp", TCPdest)
t := time.Now()
if err != nil {
if _, t := err.(*net.OpError); t {
fmt.Println("Some problem connecting.\r\n")
} else {
fmt.Println("Unknown error: " + err.Error()+"\r\n")
}
} else {
conn.SetWriteDeadline(time.Now().Add(1 * time.Second))
_, err = conn.Write([]byte(text+"\r\n"))
if err != nil {
fmt.Println("Error writing to stream.\r\n")
} else {
timeNow := time.Now()
if timeNow.Sub(t.Add(time.Duration(5*time.Second))).Seconds() > 5 {
return "timeout"
}
scanner := bufio.NewScanner(conn)
for {
ok := scanner.Scan()
if !ok {
break
}
if strings.HasPrefix(scanner.Text(), "callCommand(") && strings.HasSuffix(scanner.Text(), ")") {
conn.Close()
return scanner.Text()
}
}
}
}
return "unspecified error"
}
Side B's handling of incoming connections:
src := "192.168.68.100:9000"
listener, _ := net.Listen("tcp", src)
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
fmt.Println("Some connection error: %s\r\n", err)
}
go handleConnection(conn)
}
func handleConnection(conn net.Conn) {
remoteAddr := conn.RemoteAddr().String()
fmt.Println("Client connected from " + remoteAddr + "\r\n")
scanner := bufio.NewScanner(conn)
wholeString := ""
for {
ok := scanner.Scan()
if !ok {
break
}
//Trying to find the index of a new-line character, to help me understand how it's being processed
fmt.Println(strings.Index(scanner.Text(), "\n"))
fmt.Println(strings.Index(wholeString, "\n"))
//for the first line received, add it to wholeString
if len(wholeString) == 0 {
wholeString = scanner.Text()
}
re := regexp.MustCompile(`[a-zA-Z]+\(.*\)\r?\n?`)
if re.Match([]byte(wholeString)) {
fmt.Println("Matched command format")
handleRequest(wholeString, conn)
} else if len(wholeString) > 0 && !re.Match([]byte(wholeString)) {
//Since we didn't match regex, we can assume there's a new-line mid string, so append to wholeString
wholeString += "\n"+scanner.Text()
}
}
conn.Close()
fmt.Println("Client at " + remoteAddr + " disconnected.\r\n")
}
func handleRequest(request string, conn net.Conn) {
fmt.Println("Received: "+request)
}
I'm not really sure this approach on Side B is correct but included my code above. I've seen a few implementations but a lot seem to rely on a close of connection to begin processing the request, which doesn't suit my scenario.
Any pointers appreciated, thanks.
Your communication "protocol" (one line being one message, not quite a protocol) clearly cannot handle binary data. If you want to send text data in your protocol, you could convert your binary data to text, using a Base64 encoding for example. You would also need some semantics to indicate that some text was converted from binary.
Or you could change your protocol to handle binary data natively. You could prepend the length of the binary data to follow, so that you know you have to read this data as binary and not interpret a newline character as the end of the message.
There are many protocols doing this very well, perhaps you don't need to come up with your custom one. If you want to send text messages, HTTP is very simple to use, you could format your data as JSON, using Base64 to convert your binary data to text:
{
"command": "string",
"args": [
"binaryDataAsBase64"
]
}

AT commands exchange with /dev/tty*

I have a device with a GPRS onboard. GPRS connects with the third-party application and it works. I need to know the signal strength of the connection, so, I use ATZ, then AT+CSQ commands. When I work using some terminal software it works. Then, I had tried to use https://github.com/ishuah/bifrost soft as a terminal. It works as well. But how can I simply communicate with a device, not using terminal, without re-connection or connection abortion, etc?
I tried simply echo ATZ > /dev/ttyX - no answer
// This writes, but reads only zeros (((
package main
import (
"github.com/jacobsa/go-serial/serial"
"io"
"log"
"time"
"fmt"
)
func Sleep(duration int) {
time.Sleep(time.Second * time.Duration(duration))
}
func printBuf(b []byte){
for _, val:=range b {
fmt.Printf("%x ", val)
}
}
func main(){
options := serial.OpenOptions{
PortName: "/dev/ttyX",
BaudRate: 115200,
DataBits: 8,
StopBits: 1,
MinimumReadSize: 0,
InterCharacterTimeout: 50,
}
port, err := serial.Open(options)
if err != nil {
log.Printf("port.Read: %v", err)
return
}
// Make sure to close it later.
defer port.Close()
var s string = `AT+CSQ`
b:=[]byte(s)
n, err := port.Write(b)
if err != nil {
log.Printf("port.Write: %v", err)
}
log.Println("Written bytes: ", n)
//Sleep(1)
res := make([]byte, 64)
n, err = port.Read(res)
if err != nil && err != io.EOF {
log.Printf("port.Read: %v", err)
}
log.Println("READ bytes: ", n)
printBuf(res)
}
/*
I expect (for example):
---------
ATZ
OK
AT+CSQ
+CSQ 22.4
*/
Most serial devices need a termination character to react to the commands they receive.
If you add it, your code should work:
var s string = `AT+CSQ\r`
I don't see any other differences from your code and sending a command using a serial terminal. The same should apply when you echo the command directly onto the port file descriptor.

Golang processing images via multipart and streaming to Azure

In the process of learning golang, I'm trying to write a web app with multiple image upload functionality.
I'm using Azure Blob Storage to store images, but I am having trouble streaming the images from the multipart request to Blob Storage.
Here's the handler I've written so far:
func (imgc *ImageController) UploadInstanceImageHandler(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
reader, err := r.MultipartReader()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for {
part, partErr := reader.NextPart()
// No more parts to process
if partErr == io.EOF {
break
}
// if part.FileName() is empty, skip this iteration.
if part.FileName() == "" {
continue
}
// Check file type
if part.Header["Content-Type"][0] != "image/jpeg" {
fmt.Printf("\nNot image/jpeg!")
break
}
var read uint64
fileName := uuid.NewV4().String() + ".jpg"
buffer := make([]byte, 100000000)
// Get Size
for {
cBytes, err := part.Read(buffer)
if err == io.EOF {
fmt.Printf("\nLast buffer read!")
break
}
read = read + uint64(cBytes)
}
stream := bytes.NewReader(buffer[0:read])
err = imgc.blobClient.CreateBlockBlobFromReader(imgc.imageContainer, fileName, read, stream, nil)
if err != nil {
fmt.Println(err)
break
}
}
w.WriteHeader(http.StatusOK)
}
In the process of my research, I've read through using r.FormFile, ParseMultipartForm, but decided on trying to learn how to use MultiPartReader.
I was able to upload an image to the golang backend and save the file to my machine using MultiPartReader.
At the moment, I'm able to upload files to Azure but they end up being corrupted. The file sizes seem on point but clearly something is not working.
Am I misunderstanding how to create a io.Reader for CreateBlockBlobFromReader?
Any help is much appreciated!
As #Mark said, you can use ioutil.ReadAll to read the content into a byte array, the code like below.
import (
"bytes"
"io/ioutil"
)
partBytes, _ := ioutil.ReadAll(part)
size := uint64(len(partBytes))
blob := bytes.NewReader(partBytes)
err := blobClient.CreateBlockBlobFromReader(container, fileName, size, blob, nil)
According to the godoc for CreateBlockBlobFromReader, as below.
The API rejects requests with size > 64 MiB (but this limit is not checked by the SDK). To write a larger blob, use CreateBlockBlob, PutBlock, and PutBlockList.
So if the size is larger than 64MB, the code shoule be like below.
import "encoding/base64"
const BLOB_LENGTH_LIMITS uint64 = 64 * 1024 * 1024
partBytes, _ := ioutil.ReadAll(part)
size := uint64(len(partBytes))
if size <= BLOB_LENGTH_LIMITS {
blob := bytes.NewReader(partBytes)
err := blobClient.CreateBlockBlobFromReader(container, fileName, size, blob, nil)
} else {
// Create an empty blob
blobClient.CreateBlockBlob(container, fileName)
// Create a block list, and upload each block
length := size / BLOB_LENGTH_LIMITS
if length%limits != 0 {
length = length + 1
}
blocks := make([]Block, length)
for i := uint64(0); i < length; i++ {
start := i * BLOB_LENGTH_LIMITS
end := (i+1) * BLOB_LENGTH_LIMITS
if end > size {
end = size
}
chunk := partBytes[start: end]
blockId := base64.StdEncoding.EncodeToString(chunk)
block := Block{blockId, storage.BlockStatusCommitted}
blocks[i] = block
err = blobClient.PutBlock(container, fileName, blockID, chunk)
if err != nil {
.......
}
}
err = blobClient.PutBlockList(container, fileName, blocks)
if err != nil {
.......
}
}
Hope it helps.
A Reader can return both an io.EOF and a valid final bytes read, it looks like the final bytes (cBytes) is not added to read total bytes. Also, careful: if an error is returned by part.Read(buffer) other than io.EOF, the read loop might not exit. Consider ioutil.ReadAll instead.
CreateBlockBlobFromReader takes a Reader, and part is a Reader, so you may be able to pass the part in directly.
You may also want to consider Azure block size limits might be smaller than the image, see Asure blobs.

go websockets eof

I'm trying to make a simple command forwarder to connect my home computer to a server I own, so that I can push commands to my server and my home pc gets it. Those commands are simple pause/resume for my downloader. My design is, that on a server, I run a hub instance, which creates a window for passing commands and a window for backend to pass those commands to my pc. I'm bounding those two "windows" with a channel, they run a server. When a client connects and sends a message to the hub, it gets streamed through a channel to backend window and then to the real backend (on my home pc). When backend responds to the backend window on the hub, the hub prints the result back to the client.
With this approach, only the first message passes and works with my downloader. I have to reconnect the backend from my home pc with the hub each time I get a message to get this working properly. I don't think that's the proper way with websockets, so here I am. After one successful request (when the backend finishes it's work and replies the result), it gets looped forever with EOF error.
The important parts of the code are:
main executable
hub handlers
backend connector
If you put the source in your GOPATH (i'm developing it for the tip version of go to support modern websockets), to compile it:
go build gosab/cmd, to run it:
./cmd -mode="hub" hub
./cmd -mode="backend" --address="localhost:8082" backend
To pass messages to the hub, use this javascript:
var s = new WebSocket("ws://localhost:8082")
s.send("1 5")
So how do I handle it? Are channels a good way to communicate between two different requests?
I'm surprised you haven't received an answer to this.
What you need to do is something like the code below. When you receive an incoming websocket connection, a new goroutine is spawned for that connection. If you let that goroutine end, it'll disconnect the websocket client.
I'm making an assumption that you're not necessarily going to be running the client and server on the same computer. If you always are, then it'd be better to do the communication internally via channels or such instead of using websockets or a network port. I only mention this because I'm not completely sure what you're using this for. I just hope I answered the right part of your question.
package main
import (
"code.google.com/p/go.net/websocket"
"flag"
"fmt"
"net/http"
"os"
"time"
)
type Message struct {
RequestID int
Command string
SomeOtherThing string
Success bool
}
var mode *string = flag.String("mode", "<nil>", "Mode: server or client")
var address *string = flag.String("address", "localhost:8080", "Bind address:port")
func main() {
flag.Parse()
switch *mode {
case "server":
RunServer()
case "client":
RunClient()
default:
flag.Usage()
}
}
func RunServer() {
http.Handle("/", http.FileServer(http.Dir("www")))
http.Handle("/server", websocket.Handler(WSHandler))
fmt.Println("Starting Server")
err := http.ListenAndServe(*address, nil)
if err != nil {
fmt.Printf("HTTP failed: %s\n", err.Error())
os.Exit(1)
}
}
func WSHandler(ws *websocket.Conn) {
defer ws.Close()
fmt.Println("Client Connected")
for {
var message Message
err := websocket.JSON.Receive(ws, &message)
if err != nil {
fmt.Printf("Error: %s\n", err.Error())
return
}
fmt.Println(message)
// do something useful here...
response := new(Message)
response.RequestID = message.RequestID
response.Success = true
response.SomeOtherThing = "The hot dog left the castle as requested."
err = websocket.JSON.Send(ws, response)
if err != nil {
fmt.Printf("Send failed: %s\n", err.Error())
os.Exit(1)
}
}
}
func RunClient() {
fmt.Println("Starting Client")
ws, err := websocket.Dial(fmt.Sprintf("ws://%s/server", *address), "", fmt.Sprintf("http://%s/", *address))
if err != nil {
fmt.Printf("Dial failed: %s\n", err.Error())
os.Exit(1)
}
incomingMessages := make(chan Message)
go readClientMessages(ws, incomingMessages)
i := 0
for {
select {
case <-time.After(time.Duration(2e9)):
i++
response := new(Message)
response.RequestID = i
response.Command = "Eject the hot dog."
err = websocket.JSON.Send(ws, response)
if err != nil {
fmt.Printf("Send failed: %s\n", err.Error())
os.Exit(1)
}
case message := <-incomingMessages:
fmt.Println(message)
}
}
}
func readClientMessages(ws *websocket.Conn, incomingMessages chan Message) {
for {
var message Message
err := websocket.JSON.Receive(ws, &message)
if err != nil {
fmt.Printf("Error: %s\n", err.Error())
return
}
incomingMessages <- message
}
}

Resources