Hyperledger fabric metrics - hyperledger-fabric

Just wanted to confirm if the fabric metrics is working for anyone? I'm using 1.1.0 release.
metrics:
# enable or disable metrics server
enabled: true
# when enable metrics server, must specific metrics reporter type
# currently supported type: "statsd","prom"
reporter: statsd
# determines frequency of report metrics(unit: second)
interval: 1s
statsdReporter:
# statsd server address to connect
address: 172.18.19.29:8125
# determines frequency of push metrics to statsd server(unit: second)
flushInterval: 2s
# max size bytes for each push metrics request
# intranet recommend 1432 and internet recommend 512
flushBytes: 1432
promReporter:
# prometheus http server listen address for pull metrics
listenAddress: 0.0.0.0:8080
This is my core.yaml configuration, I tried with both Prometheus and Statsd but can't see any metrics with either of the methods. I can see the code present in the peer to export the metrics.
Can anyone please help with the configuration?

It's working on my environment with an exporter newly added by myself.
You need not only to change configuration in core.yaml, but also to implement your own exporter by using metrics package.
Making your exporter
Initialize viper & load core.yaml to enable metrics feature on peer node.
Configure metrics package by using core.yaml through viper.
Setup your metrics label. (In sample code below, label hyperledger_fabric_peer_blocknum is created)
Call Start() function. The function is blocked until when finishing export. So we need to execute it as a go routine.
And also you need to update your exported metrics periodically on another go routine. (In sample code below, the number of blocks is used as a metric)
In main thread, start to listen a channel to wait until interrupting.
package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"time"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/peer/common"
)
var logger = flogging.MustGetLogger("fabexporter")
var block_num metrics.Gauge
func main() {
err := common.InitConfig("core")
if err != nil {
logger.Error(err)
}
opts := metrics.NewOpts()
if err := metrics.Init(opts); err != nil {
logger.Error(err)
return
}
s := metrics.RootScope.SubScope("peer")
block_num = s.Gauge("blocknum")
go startMonitor()
go func() {
metrics.RootScope.Start()
}()
server := make(chan int)
<-server
}
type Retdata struct {
Height int `json:"height"`
CurrentBlockHash string `json:"currentBlockHash"`
PreviousBlockHash string `json:"previousBlockHash"`
}
func startMonitor() {
var ret Retdata
for {
time.Sleep(5 * time.Second)
cmd := exec.Command("peer", "channel", "getinfo", "-c", "mychannel")
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "CORE_LOGGING_LEVEL=CRITICAL")
out, err := cmd.Output()
if err != nil {
logger.Error(err)
continue
} else if len(out) == 0 {
continue
}
jsond := ([]byte)(out[17:]) // To trim "Blockchain info: "
if err := json.Unmarshal(jsond, &ret); err != nil {
logger.Error(err)
break
}
logger.Info(fmt.Sprintf("num of block : %d\n", ret.Height))
block_num.Update(float64(ret.Height))
}
}
I also wrote an article about it.
I hope it would be your help.
https://medium.com/#nekiaiken/hyperledger-fabric-meets-prometheus-649c452ba26a

Related

How to make work concurrent while sending data on a stream in golang?

I have a golang grpc server which has streaming endpoint. Earlier I was doing all the work sequentially and sending on the stream but then I realize I can make the work concurrent and then send on stream. From grpc-go docs: I understood that I can make the work concurrent, but you can't make sending on the stream concurrent so I got below code which does the job.
Below is the code I have in my streaming endpoint which sends data back to client in a streaming way. This does all the work concurrently.
// get "allCids" from lot of files and load in memory.
allCids := .....
var data = allCids.([]int64)
out := make(chan *custPbV1.CustomerResponse, len(data))
wg := &sync.WaitGroup{}
wg.Add(len(data))
go func() {
wg.Wait()
close(out)
}()
for _, cid := range data {
go func (id int64) {
defer wg.Done()
pd := repo.GetCustomerData(strconv.FormatInt(cid, 10))
if !pd.IsCorrect {
return
}
resources := us.helperCom.GenerateResourceString(pd)
val, err := us.GenerateInfo(clientId, resources, cfg)
if err != nil {
return
}
out <- val
}(cid)
}
for val := range out {
if err := stream.Send(val); err != nil {
log.Printf("send error %v", err)
}
}
Now problem I have is size of data slice can be approx a million so I don't want to spawn million go routine doing the job. How do I handle that scenario here? If instead of len(data) I use 100 then will that work for me or I need to slice data as well in 100 sub arrays? I am just confuse on what is the best way to deal with this problem?
I recently started with golang so pardon me if there are any mistakes in my above code while making it concurrent.
Please check this pseudo code
func main() {
works := make(chan int, 100)
errChan := make(chan error, 100)
out := make(chan *custPbV1.CustomerResponse, 100)
// spawn fixed workers
var workerWg sync.WaitGroup
for i := 0; i < 100; i++ {
workerWg.Add(1)
go worker(&workerWg, works, errChan, out)
}
// give input
go func() {
for _, cid := range data {
// this will be blocked if all the workers are busy and no space is left in the channel.
works <- cid
}
close(works)
}()
var analyzeResults sync.WaitGroup
analyzeResults.Add(2)
// process errors
go func() {
for err := range errChan {
log.Printf("error %v", err)
}
analyzeResults.Done()
}()
// process outout
go func() {
for val := range out {
if err := stream.Send(val); err != nil {
log.Printf("send error %v", err)
}
}
analyzeResults.Done()
}()
workerWg.Wait()
close(out)
close(errChan)
analyzeResults.Wait()
}
func worker(job *sync.WaitGroup, works chan int, errChan chan error, out chan *custPbV1.CustomerResponse) {
defer job.Done()
// Idle worker takes the work from this channel.
for cid := range works {
pd := repo.GetCustomerData(strconv.FormatInt(cid, 10))
if !pd.IsCorrect {
errChan <- errors.New(fmt.Sprintf("pd %d is incorrect", pd))
// we can not return here as the total number of workers will be reduced. If all the workers does this then there is a chance that no workers are there to do the job
continue
}
resources := us.helperCom.GenerateResourceString(pd)
val, err := us.GenerateInfo(clientId, resources, cfg)
if err != nil {
errChan <- errors.New(fmt.Sprintf("got error", err))
continue
}
out <- val
}
}
Explanation:
This is a worker pool implementation where we spawn a fixed number of goroutines(100 workers here) to do the same job(GetCustomerData() & GenerateInfo() here) but with different input data(cid here). 100 workers here does not mean that it is parallel but concurrent(depends on the GOMAXPROCS). If one worker is waiting for io result(basically some blocking operation)then that particular goroutine will be context switched and other worker goroutine gets a chance to execute. But increasing goroutuines (workers) may not give much performance but can leads to contention on the channel as more workers are waiting for the input job on that channel.
The benefit over splitting the 1 million data to subslice is that. Lets say we have 1000 jobs and 100 workers. each worker will get assigned to the jobs 1-10, 11-20 etc... What if the first 10 jobs is taking more time than others. In that case the first worker is overloaded and the other workers will finish the tasks and will be idle even though there are pending tasks. So to avoid this situation, this is the best solution as the idle worker will take the next job. So that no worker is more overloaded compared to the other workers

Linux Allows Only 99 TCP Connections with a single remote IP

When I try to create more than 99 TCP connections in less than a millisecond from my local computer with a TCP listener running at a DigitalOcean droplet with Ubuntu, only 99 of them works and rest says connectex: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond. Sending the same amount of connection requests to multiple ports seem to work okay.
I already tried to increase tcp_max_syn_backlog and somaxconn but it didn't help. I tried to track TcpExtListenOverflows and TcpExtListenDrops but they don't seem to be increasing as well. I tried to do the same thing with another server to make sure the problem is not about my local computer. I also contacted to DigitalOcean support and they told they are not aware of any hard limits placed by their infrastructure.
Can anyone give me any pointers to create more than 99 TCP connections in less than a millisecond with a single port at an Ubuntu server and a single remote IP?
Scripts I'm using:
Listener:
package main
import (
"log"
"net"
)
const port = "1919"
func main() {
listener, err := net.Listen("tcp", ":"+port)
if err != nil {
log.Fatal(err)
}
for {
_, err := listener.Accept()
if err != nil {
log.Fatal(err)
}
}
}
Dialer:
package main
import (
"fmt"
"net"
"sync"
)
const ConnectionCount = 250
const raddr = "<IP_ADDRESS_IS_INSERTED_HERE>:1919"
func main() {
wg := &sync.WaitGroup{}
mu := &sync.Mutex{}
var success, fail int
for i := 0; i < ConnectionCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
_, err := net.Dial("tcp", raddr)
mu.Lock()
defer mu.Unlock()
if err == nil {
success++
} else {
fail++
}
fmt.Print("\033[2K\rSuccess: ", success, ", Fail: ", fail)
}()
}
wg.Wait()
}
Output is always 99 Success and 151 Fail. Tried to same thing with direct HTTP requests as well, still only 99 of them works if they are sent at the same time.

Sending a GUI/TUI Over a Socket Connection

Recently I have been trying to create a program in golang, which runs on a server, and accepts telnet connections. I would then like to open a TUI (text user interface) such as a curses menu (in the case of golang, something like: termui, gocui, etc) over that telnet connection. My question is, how exactly could I do this and/or would it even be possible? I have played around trying to start TUIs when a connection is accepted, but it just opens it on the server side, not on the telnet client side. From what I can tell, there is no easy way to just send a TUI over a telnet or any other socket IO connection for that matter.
Any help is appreciated in trying to figure this out. Thanks! :D
First, you should note that the example I give is completely insecure (don't expose it over the Internet!) and also doesn't provide for things like signal handling or resizing of the terminal (you may want to consider using SSH instead).
But to answer your question, here is an example of running a TCP server and connecting remote clients to a termui program running in a local PTY (uses both the https://github.com/gizak/termui and https://github.com/kr/pty packages):
package main
import (
"flag"
"io"
"log"
"net"
"os"
"os/exec"
ui "github.com/gizak/termui"
"github.com/kr/pty"
)
var termuiFlag = flag.Bool("termui", false, "run a termui example")
func main() {
flag.Parse()
var err error
if *termuiFlag {
err = runTermui()
} else {
err = runServer()
}
if err != nil {
log.Fatal(err)
}
}
// runTermui runs the termui "Hello World" example.
func runTermui() error {
if err := ui.Init(); err != nil {
return err
}
defer ui.Close()
p := ui.NewParagraph("Hello World!")
p.Width = 25
p.Height = 5
ui.Render(p)
for e := range ui.PollEvents() {
if e.Type == ui.KeyboardEvent {
break
}
}
return nil
}
// runServer listens for TCP connections on a random port and connects
// remote clients to a local PTY running the termui example.
func runServer() error {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return err
}
defer ln.Close()
log.Printf("Listening for requests on %v", ln.Addr())
for {
conn, err := ln.Accept()
if err != nil {
return err
}
log.Printf("Connecting remote client %v to termui", conn.RemoteAddr())
go connectTermui(conn)
}
}
// connectTermui connects a client connection to a termui process running in a
// PTY.
func connectTermui(conn net.Conn) {
defer func() {
log.Printf("Closing remote client %v", conn.RemoteAddr())
conn.Close()
}()
t, err := pty.StartWithSize(
exec.Command(os.Args[0], "--termui"),
&pty.Winsize{Cols: 80, Rows: 24},
)
if err != nil {
log.Printf("Error starting termui: %v", err)
return
}
defer t.Close()
go io.Copy(t, conn)
io.Copy(conn, t)
}
Example usage is to run this program in one window and connect to it using nc in another:
$ go run server.go
2019/01/18 01:39:37 Listening for requests on 127.0.0.1:56192
$ nc 127.0.0.1 56192
You should see the "Hello world" box (hit enter to disconnect).

Read media keys from Go program

I am writing a media cross-platform distributed media player for use on my own network.
The current version has three/four parts:
A NAS holding the audio files.
A metadata server holding information about the files.
A HTML/JS client that allows manipulation of the metadata server and queuing media for the:
A player deamon.
My problem lies with part 4. The player has no UI, nor does it need one. It will be controlled via network commands from the client and by listening to the media keys on its current host.
The player daemon needs to work on both Windows and Linux, but I can't seem to figure out a way (any way) to read these keys on either OS. Most of the way I know to read the keyboard will not read these keys at all.
With the help of several commenters, I now have it all figured out.
The Linux version is as follows:
package main
import (
“bytes”
“encoding/binary”
“fmt”
“os”
“os/exec”
“syscall”
)
// parses through the /proc/bus/input/devices file for keyboard devices.
// Copied from `github.com/gearmover/keylogger` with trivial modification.
func dumpDevices() ([]string, error) {
cmd := exec.Command(“/bin/sh”, “-c”, “/bin/grep -E ‘Handlers|EV=’ /proc/bus/input/devices | /bin/grep -B1 ‘EV=120013’ | /bin/grep -Eo ‘event[0-9]+’”)
output, err := cmd.Output()
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(output)
var devices []string
for line, err := buf.ReadString(‘\n’); err == nil; {
devices = append(devices, “/dev/input/”+line[:len(line)-1])
line, err = buf.ReadString(‘\n’)
}
return devices, nil
}
// Using MS names, just because I don’t feel like looking up the Linux versions.
var keys = map[uint16]string{
0xa3: “VK_MEDIA_NEXT_TRACK”,
0xa5: “VK_MEDIA_PREV_TRACK”,
0xa6: “VK_MEDIA_STOP”,
0xa4: “VK_MEDIA_PLAY_PAUSE”,
}
// Most of the code here comes from `github.com/gearmover/keylogger`.
func main() {
// drop privileges when executing other programs
syscall.Setgid(65534)
syscall.Setuid(65534)
// dump our keyboard devices from /proc/bus/input/devices
devices, err := dumpDevices()
if err != nil {
fmt.Println(err)
}
if len(devices) == 0 {
fmt.Println(“No input devices found”)
return
}
// bring back our root privs
syscall.Setgid(0)
syscall.Setuid(0)
// Open the first keyboard device.
input, err := os.OpenFile(devices[0], os.O_RDONLY, 0600)
if err != nil {
fmt.Println(err)
return
}
defer input.Close()
// Log media keys
var buffer = make([]byte, 24)
for {
// read the input events as they come in
n, err := input.Read(buffer)
if err != nil {
return
}
if n != 24 {
fmt.Println(“Weird Input Event Size: “, n)
continue
}
// parse the input event according to the <linux/input.h> header struct
binary.LittleEndian.Uint64(buffer[0:8]) // Time stamp stuff I could care less about
binary.LittleEndian.Uint64(buffer[8:16])
etype := binary.LittleEndian.Uint16(buffer[16:18]) // Event Type. Always 1 for keyboard events
code := binary.LittleEndian.Uint16(buffer[18:20]) // Key scan code
value := int32(binary.LittleEndian.Uint32(buffer[20:24])) // press(1), release(0), or repeat(2)
if etype == 1 && value == 1 && keys[code] != “” {
// In a real application I would send a message here.
fmt.Println(keys[code])
}
}
}
And the Windows version:
package main
import (
“fmt”
“syscall”
“time”
)
var user32 = syscall.NewLazyDLL(“user32.dll”)
var procGAKS = user32.NewProc(“GetAsyncKeyState”)
// Key codes from MSDN
var keys = [4]uint{
0xb0, // VK_MEDIA_NEXT_TRACK
0xb1, // VK_MEDIA_PREV_TRACK
0xb2, // VK_MEDIA_STOP
0xb3, // VK_MEDIA_PLAY_PAUSE
}
var names = [4]string{
“VK_MEDIA_NEXT_TRACK”,
“VK_MEDIA_PREV_TRACK”,
“VK_MEDIA_STOP”,
“VK_MEDIA_PLAY_PAUSE”,
}
func main() {
fmt.Println(“Running…”)
// Since I don’t want to trigger dozens of times for each key I need to track state.
// I could check the bits of GAKS’ return value, but that is not reliable.
down := [4]bool{false, false, false, false}
for {
time.Sleep(1 * time.Millisecond)
for i, key := range keys {
// val is not a simple boolean!
// 0 means “not pressed” (also certain errors)
// If LSB is set the key was just pressed (this may not be reliable)
// If MSB is set the key is currently down.
val, _, _ := procGAKS.Call(uintptr(key))
// Turn a press into a transition and track key state.
goingdown := false
if int(val) != 0 && !down[i] {
goingdown = true
down[i] = true
}
if int(val) == 0 && down[i] {
down[i] = false
}
if goingdown {
// In a real application I would send a message here.
fmt.Println(names[i])
}
}
}
}
The only "issue" is that the Linux version must be run as root. For me this is not a problem. If running as root is a problem I think there is a way that involves X11...

go websockets eof

I'm trying to make a simple command forwarder to connect my home computer to a server I own, so that I can push commands to my server and my home pc gets it. Those commands are simple pause/resume for my downloader. My design is, that on a server, I run a hub instance, which creates a window for passing commands and a window for backend to pass those commands to my pc. I'm bounding those two "windows" with a channel, they run a server. When a client connects and sends a message to the hub, it gets streamed through a channel to backend window and then to the real backend (on my home pc). When backend responds to the backend window on the hub, the hub prints the result back to the client.
With this approach, only the first message passes and works with my downloader. I have to reconnect the backend from my home pc with the hub each time I get a message to get this working properly. I don't think that's the proper way with websockets, so here I am. After one successful request (when the backend finishes it's work and replies the result), it gets looped forever with EOF error.
The important parts of the code are:
main executable
hub handlers
backend connector
If you put the source in your GOPATH (i'm developing it for the tip version of go to support modern websockets), to compile it:
go build gosab/cmd, to run it:
./cmd -mode="hub" hub
./cmd -mode="backend" --address="localhost:8082" backend
To pass messages to the hub, use this javascript:
var s = new WebSocket("ws://localhost:8082")
s.send("1 5")
So how do I handle it? Are channels a good way to communicate between two different requests?
I'm surprised you haven't received an answer to this.
What you need to do is something like the code below. When you receive an incoming websocket connection, a new goroutine is spawned for that connection. If you let that goroutine end, it'll disconnect the websocket client.
I'm making an assumption that you're not necessarily going to be running the client and server on the same computer. If you always are, then it'd be better to do the communication internally via channels or such instead of using websockets or a network port. I only mention this because I'm not completely sure what you're using this for. I just hope I answered the right part of your question.
package main
import (
"code.google.com/p/go.net/websocket"
"flag"
"fmt"
"net/http"
"os"
"time"
)
type Message struct {
RequestID int
Command string
SomeOtherThing string
Success bool
}
var mode *string = flag.String("mode", "<nil>", "Mode: server or client")
var address *string = flag.String("address", "localhost:8080", "Bind address:port")
func main() {
flag.Parse()
switch *mode {
case "server":
RunServer()
case "client":
RunClient()
default:
flag.Usage()
}
}
func RunServer() {
http.Handle("/", http.FileServer(http.Dir("www")))
http.Handle("/server", websocket.Handler(WSHandler))
fmt.Println("Starting Server")
err := http.ListenAndServe(*address, nil)
if err != nil {
fmt.Printf("HTTP failed: %s\n", err.Error())
os.Exit(1)
}
}
func WSHandler(ws *websocket.Conn) {
defer ws.Close()
fmt.Println("Client Connected")
for {
var message Message
err := websocket.JSON.Receive(ws, &message)
if err != nil {
fmt.Printf("Error: %s\n", err.Error())
return
}
fmt.Println(message)
// do something useful here...
response := new(Message)
response.RequestID = message.RequestID
response.Success = true
response.SomeOtherThing = "The hot dog left the castle as requested."
err = websocket.JSON.Send(ws, response)
if err != nil {
fmt.Printf("Send failed: %s\n", err.Error())
os.Exit(1)
}
}
}
func RunClient() {
fmt.Println("Starting Client")
ws, err := websocket.Dial(fmt.Sprintf("ws://%s/server", *address), "", fmt.Sprintf("http://%s/", *address))
if err != nil {
fmt.Printf("Dial failed: %s\n", err.Error())
os.Exit(1)
}
incomingMessages := make(chan Message)
go readClientMessages(ws, incomingMessages)
i := 0
for {
select {
case <-time.After(time.Duration(2e9)):
i++
response := new(Message)
response.RequestID = i
response.Command = "Eject the hot dog."
err = websocket.JSON.Send(ws, response)
if err != nil {
fmt.Printf("Send failed: %s\n", err.Error())
os.Exit(1)
}
case message := <-incomingMessages:
fmt.Println(message)
}
}
}
func readClientMessages(ws *websocket.Conn, incomingMessages chan Message) {
for {
var message Message
err := websocket.JSON.Receive(ws, &message)
if err != nil {
fmt.Printf("Error: %s\n", err.Error())
return
}
incomingMessages <- message
}
}

Resources