Get client identity fabric-contract-api-go - hyperledger-fabric

There's a method in fabric-contract-api-go for getting transaction initiator's Identity
func (ctx *TransactionContext) GetClientIdentity() cid.ClientIdentity
How'd we use it to return client ID when, e.g., create is invoked in this contract https://github.com/hyperledger/fabric-contract-api-go/blob/master/tutorials/getting-started.md
// ...
// ...
// Create adds a new key with value to the world state
func (sc *SimpleContract) Create(ctx contractapi.TransactionContextInterface, key string, value string) error {
existing, err := ctx.GetStub().GetState(key)
if err != nil {
return errors.New("Unable to interact with world state")
}
if existing != nil {
return fmt.Errorf("Cannot create world state pair with key %s. Already exists", key)
}
err = ctx.GetStub().PutState(key, []byte(value))
if err != nil {
return errors.New("Unable to interact with world state")
}
return nil
}
// ...
// ...

GetClientIdentity returns you an interface of type ClientIdentity defined here
https://github.com/hyperledger/fabric-chaincode-go/blob/master/pkg/cid/interfaces.go
This shows you the functions you can then invoke to retrieve information about the transaction submitter (ie the client identity)

Related

Hyperledger fabric: Querying implicit data collection

I am seeing this when querying data from implicit private data collection.
Please see code snippet below.
When I query individual key (using QueryBidPrivate/GetPrivateData), I get corresponding data.
But if I query the complete collection (using GetPrivateDataByRange(collection, "", "")), I get nothing from the Iterator.
peer chaincode query -C mychannel -n govtcontract -c '{"function":"QueryBidPrivate","Args":["100", "1035"]}'
{"bidamt":100,"biddate":"2022-05-04","contractid":"1035","salt":"4567ab4567","vendorid":"100"}
peer chaincode query -C mychannel -n govtcontract -c '{"function":"ListAllBids","Args":[]}'
No output
Is there anything I am missing here ?
// ListAllBids returns all Bids details from private state
func (s *SmartContract) ListAllBids(ctx contractapi.TransactionContextInterface) ([]VendorBid, error) {
// Get client org id and verify it matches peer org id.
// In this scenario, client is only authorized to read/write private data from its own peer.
clientOrgID, err := getClientOrgID(ctx, true)
if err != nil {
return nil, fmt.Errorf("failed to get verified OrgID: %s", err.Error())
}
collection := "_implicit_org_" + clientOrgID
BidIterator, err := ctx.GetStub().GetPrivateDataByRange(collection, "", "")
if err != nil {
logger.Infof("ListAllBids error: %s", err.Error())
return nil, fmt.Errorf("failed to read bid list : %s", err.Error())
}
if BidIterator == nil {
logger.Infof("ListAllBids : null iterator ")
return nil, fmt.Errorf("bid private details does not exist ")
}
defer BidIterator.Close()
logger.Infof("ListAllBids in govtcontract: no error")
var allbids []VendorBid
myMSPID, err := ctx.GetClientIdentity().GetMSPID()
logger.Infof("myMSPID: %s", myMSPID)
for BidIterator.HasNext() {
logger.Infof("Iterator has element: ")
entrybid, err := BidIterator.Next()
if err != nil {
return nil, err
}
var bidvar VendorBid
err = json.Unmarshal(entrybid.Value, &bidvar)
if err != nil {
return nil, err
}
allbids = append(allbids, bidvar)
logger.Infof("Iterator element: %s", entrybid.Value)
}
return allbids, nil
}
=========================================
// QueryBidPrivate returns the Bid details from owner's private data collection
func (s *SmartContract) QueryBidPrivate(ctx contractapi.TransactionContextInterface, vendorId string, contractId string) (string, error) {
// Get client org id and verify it matches peer org id.
// In this scenario, client is only authorized to read/write private data from its own peer.
clientOrgID, err := getClientOrgID(ctx, true)
if err != nil {
return "", fmt.Errorf("failed to get verified OrgID: %s", err.Error())
}
collection := "_implicit_org_" + clientOrgID
bidconkey, err := ctx.GetStub().CreateCompositeKey(vendorId, []string{contractId})
bidDetails, err := ctx.GetStub().GetPrivateData(collection, bidconkey)
if err != nil {
return "", fmt.Errorf("failed to read bid private properties from client org's collection: %s", err.Error())
}
if bidDetails == nil {
return "", fmt.Errorf("bid private details does not exist in client org's collection: %s", contractId)
}
return string(bidDetails), nil
}
GetPrivateDataByPartialCompositeKey() is the function for querying a range of private data that was stored using a composite key. GetPrivateDataByRange() won't retrieve the data stored with a composite key. I think in the above code snippet you have to replace the function call GetPrivateDataByRange(collection, "", "") with GetPrivateDataByPartialCompositeKey(collection, vendorId, []string{})
Sample usage can be found here.
I faced the same error in the smart contract. The issue here is because of storing data on the composite key.
Instead of below code :
for BidIterator.HasNext() {
logger.Infof("Iterator has element: ")
entrybid, err := BidIterator.Next()
if err != nil {
return nil, err
}
var bidvar VendorBid
err = json.Unmarshal(entrybid.Value, &bidvar)
if err != nil {
return nil, err
}
allbids = append(allbids, bidvar)
logger.Infof("Iterator element: %s", entrybid.Value)
}
Use the below function
func constructQueryResponseFromIterator(resultsIterator shim.StateQueryIteratorInterface)
(*bytes.Buffer, error)
{
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
bArrayMemberAlreadyWritten := false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return nil, err
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{")
//buffer.WriteString("{\"Key\":")
//buffer.WriteString("\"")
//buffer.WriteString(queryResponse.Key)
//buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
return &buffer, nil
}

Load data from reading files during startup and then process new files and clear old state from the map

I am working on a project where during startup I need to read certain files and store it in memory in a map and then periodically look for new files if there are any and then replace whatever I had in memory in the map earlier during startup with this new data. Basically every time if there is a new file which is a full state then I want to refresh my in memory map objects to this new one instead of appending to it.
Below method loadAtStartupAndProcessNewChanges is called during server startup which reads the file and store data in memory. Also it starts a go-routine detectNewFiles which periodically checks if there are any new files and store it on a deltaChan channel which is later accessed by another go-routine processNewFiles to read that new file again and store data in the same map. If there is any error then we store it on err channel. loadFiles is the function which will read files in memory and store it in map.
type customerConfig struct {
deltaChan chan string
err chan error
wg sync.WaitGroup
data *cmap.ConcurrentMap
}
// this is called during server startup.
func (r *customerConfig) loadAtStartupAndProcessNewChanges() error {
path, err := r.GetPath("...", "....")
if err != nil {
return err
}
r.wg.Add(1)
go r.detectNewFiles(path)
err = r.loadFiles(4, path)
if err != nil {
return err
}
r.wg.Add(1)
go r.processNewFiles()
return nil
}
This method basically figures out if there are any new files that needs to be consumed and if there is any then it will put it on the deltaChan channel which will be later on consumed by processNewFiles go-routine and read the file in memory. If there is any error then it will add error to the error channel.
func (r *customerConfig) detectNewFiles(rootPath string) {
}
This will read all s3 files and store it in memory and return error. In this method I clear previous state of my map so that it can have fresh state from new files. This method is called during server startup and also called whenever we need to process new files from processNewFiles go-routine.
func (r *customerConfig) loadFiles(workers int, path string) error {
var err error
...
var files []string
files = .....
// reset the map so that it can have fresh state from new files.
r.data.Clear()
g, ctx := errgroup.WithContext(context.Background())
sem := make(chan struct{}, workers)
for _, file := range files {
select {
case <-ctx.Done():
break
case sem <- struct{}{}:
}
file := file
g.Go(func() error {
defer func() { <-sem }()
return r.read(spn, file, bucket)
})
}
if err := g.Wait(); err != nil {
return err
}
return nil
}
This method read the files and add in the data concurrent map.
func (r *customerConfig) read(file string, bucket string) error {
// read file and store it in "data" concurrent map
// and if there is any error then return the error
var err error
fr, err := pars3.NewS3FileReader(context.Background(), bucket, file, r.s3Client.GetSession().Config)
if err != nil {
return errs.Wrap(err)
}
defer xio.CloseIgnoringErrors(fr)
pr, err := reader.NewParquetReader(fr, nil, 8)
if err != nil {
return errs.Wrap(err)
}
if pr.GetNumRows() == 0 {
spn.Infof("Skipping %s due to 0 rows", file)
return nil
}
for {
rows, err := pr.ReadByNumber(r.cfg.RowsToRead)
if err != nil {
return errs.Wrap(err)
}
if len(rows) <= 0 {
break
}
byteSlice, err := json.Marshal(rows)
if err != nil {
return errs.Wrap(err)
}
var invMods []CompModel
err = json.Unmarshal(byteSlice, &invMods)
if err != nil {
return errs.Wrap(err)
}
for i := range invMods {
key := strconv.FormatInt(invMods[i].ProductID, 10) + ":" + strconv.Itoa(int(invMods[i].Iaz))
hasInventory := false
if invMods[i].Available > 0 {
hasInventory = true
}
r.data.Set(key, hasInventory)
}
}
return nil
}
This method will pick what is there on the delta channel and if there are any new files then it will start reading that new file by calling loadFiles method. If there is any error then it will add error to the error channel.
// processNewFiles - load new files found by detectNewFiles
func (r *customerConfig) processNewFiles() {
// find new files on delta channel
// and call "loadFiles" method to read it
// if there is any error, then it will add it to the error channel.
}
If there is any error on the error channel then it will log those errors from below method -
func (r *customerConfig) handleError() {
// read error from error channel if there is any
// then log it
}
Problem Statement
Above logic works for me without any issues but there is one small bug in my code which I am not able to figure out on how to solve it. As you can see I have a concurrent map which I am populating in my read method and also clearing that whole map in loadFiles method. Because whenever there is a new file on delta channel I don't want to keep previous state in the map so that's why I am removing everything from the map and then adding new state from new files to it.
Now if there is any error in read method then the bug happens bcoz I have already cleared all the data in my data map which will have empty map which is not what I want. Basically if there is any error then I would like to preserve previous state in the data map. How can I resolve this issue in my above current design.
Note: I am using golang concurrent map
I think your design is over complicated. It can be solved much simpler, which gives all the benefits you desire:
safe for concurrent access
detected changes are reloaded
accessing the config gives you the most recent, successfully loaded config
the most recent config is always, immediately accessible, even if loading a new config due to detected changes takes long
if loading new config fails, the previous "snapshot" is kept and remains the current
as a bonus, it's much simpler and doesn't even use 3rd party libs
Let's see how to achieve this:
Have a CustomerConfig struct holding everything you want to cache (this is the "snapshot"):
type CustomerConfig struct {
Data map[string]bool
// Add other props if you need:
LoadedAt time.Time
}
Provide a function that loads the config you wish to cache. Note: this function is stateless, it does not access / operate on package level variables:
func loadConfig() (*CustomerConfig, error) {
cfg := &CustomerConfig{
Data: map[string]bool{},
LoadedAt: time.Now(),
}
// Logic to load files, and populate cfg.Data
// If an error occurs, return it
// If loading succeeds, return the config
return cfg, nil
}
Now let's create our "cache manager". The cache manager stores the actual / current config (the snapshot), and provides access to it. For safe concurrent access (and update), we use a sync.RWMutex. Also has means to stop the manager (to stop the concurrent refreshing):
type ConfigCache struct {
configMu sync.RWMutex
config *CustomerConfig
closeCh chan struct{}
}
Creating a cache loads the initial config. Also launches a goroutine that will be responsible to periodically check for changes.
func NewConfigCache() (*ConfigCache, error) {
cfg, err := loadConfig()
if err != nil {
return nil, fmt.Errorf("loading initial config failed: %w", err)
}
cc := &ConfigCache{
config: cfg,
closeCh: make(chan struct{}),
}
// launch goroutine to periodically check for changes, and load new configs
go cc.refresher()
return cc, nil
}
The refresher() periodically checks for changes, and if changes are detected, calls loadConfig() to load new data to be cached, and stores it as the current / actual config (while locking configMu). It also monitors closeCh to stop if that is requested:
func (cc *ConfigCache) refresher() {
ticker := time.NewTicker(1 * time.Minute) // Every minute
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Check if there are changes
changes := false // logic to detect changes
if !changes {
continue // No changes, continue
}
// Changes! load new config:
cfg, err := loadConfig()
if err != nil {
log.Printf("Failed to load config: %v", err)
continue // Keep the previous config
}
// Apply / store new config
cc.configMu.Lock()
cc.config = cfg
cc.configMu.Unlock()
case <-cc.closeCh:
return
}
}
}
Closing the cache manager (the refresher goroutine) is as easy as:
func (cc *ConfigCache) Stop() {
close(cc.closeCh)
}
The last missing piece is how you access the current config. That's a simple GetConfig() method (that also uses configMu, but in read-only mode):
func (cc *ConfigCache) GetConfig() *CustomerConfig {
cc.configMu.RLock()
defer cc.configMu.RUnlock()
return cc.config
}
This is how you can use this:
cc, err := NewConfigCache()
if err != nil {
// Decide what to do: retry, terminate etc.
}
// Where ever, whenever you need the actual (most recent) config in your app:
cfg := cc.GetConfig()
// Use cfg
Before you shut down your app (or you want to stop the refreshing), you may call cc.Stop().
Added RWMutex for collectedData concurrent write protecting by worker goroutine
type customerConfig struct {
...
m sync.RWMutex
}
Instead of updating map in read method let read method just return the data and error
func (r *customerConfig) read(file string, bucket string) ([]CompModel, error) {
// read file data and return with error if any
var err error
fr, err := pars3.NewS3FileReader(context.Background(), bucket, file, r.s3Client.GetSession().Config)
if err != nil {
return (nil, errs.Wrap(err))
}
defer xio.CloseIgnoringErrors(fr)
pr, err := reader.NewParquetReader(fr, nil, 8)
if err != nil {
return (nil, errs.Wrap(err))
}
if pr.GetNumRows() == 0 {
spn.Infof("Skipping %s due to 0 rows", file)
return (nil, errors.New("No Data"))
}
var invMods = []CompModel{}
for {
rows, err := pr.ReadByNumber(r.cfg.RowsToRead)
if err != nil {
return (nil, errs.Wrap(err))
}
if len(rows) <= 0 {
break
}
byteSlice, err := json.Marshal(rows)
if err != nil {
return (nil, errs.Wrap(err))
}
var jsonData []CompModel
err = json.Unmarshal(byteSlice, &jsonData)
if err != nil {
return (nil, errs.Wrap(err))
}
invMods = append(invMods, jsonData...)
}
return invMods, nil
}
And then loadFiles you can collect the data return by read
method and if no error only then clear and update the map else
leave the old data as it was before
func (r *customerConfig) loadFiles(workers int, path string) error {
var err error
...
var files []string
files = .....
// reset the map so that it can have fresh state from new files.
// r.data.Clear() <- remove the clear from here
g, ctx := errgroup.WithContext(context.Background())
sem := make(chan struct{}, workers)
collectedData := []CompModel{}
for _, file := range files {
select {
case <-ctx.Done():
break
case sem <- struct{}{}:
}
file := file
g.Go(func() error {
defer func() { <-sem }()
data, err:= r.read(spn, file, bucket)
if err != nil {
return err
}
r.m.Lock()
append(collectedData, data...)
r.m.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
r.data.Clear()
for i := range collectedData {
key := strconv.FormatInt(collectedData[i].ProductID, 10) + ":" + strconv.Itoa(int(collectedData[i].Iaz))
hasInventory := false
if collectedData[i].Available > 0 {
hasInventory = true
}
r.data.Set(key, hasInventory)
}
return nil
}
Note: Since the code is not runnable just updated methods for reference and I have not include mutex lock for updating the slice you may need to handle for the case.
The same can be achieved with just 3 functions - detect, read, load, detect will check for new files by interval and push to delta channel if found any, load will get file path to read from delta channel and call read method to get the data and error then checks if no error then clear the map and update with new content else log the error, so you would have 2 go routines and 1 function which would be called by load routine
package main
import (
"fmt"
"time"
"os"
"os/signal"
"math/rand"
)
func main() {
fmt.Println(">>>", center("STARTED", 30), "<<<")
c := &Config{
InitialPath: "Old Path",
DetectInterval: 3000,
}
c.start()
fmt.Println(">>>", center("ENDED", 30), "<<<")
}
// https://stackoverflow.com/questions/41133006/how-to-fmt-printprint-this-on-the-center
func center(s string, w int) string {
return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w + len(s))/2, s))
}
type Config struct {
deltaCh chan string
ticker *time.Ticker
stopSignal chan os.Signal
InitialPath string
DetectInterval time.Duration
}
func (c *Config) start() {
c.stopSignal = make(chan os.Signal, 1)
signal.Notify(c.stopSignal, os.Interrupt)
c.ticker = time.NewTicker(c.DetectInterval * time.Millisecond)
c.deltaCh = make(chan string, 1)
go c.detect()
go c.load()
if c.InitialPath != "" {
c.deltaCh <- c.InitialPath
}
<- c.stopSignal
c.ticker.Stop()
}
// Detect New Files
func (c *Config) detect() {
for {
select {
case <- c.stopSignal:
return
case <- c.ticker.C:
fmt.Println(">>>", center("DETECT", 30), "<<<")
c.deltaCh <- fmt.Sprintf("PATH %f", rand.Float64() * 1.5)
}
}
}
// Read Files
func read(path string) (map[string]int, error) {
data := make(map[string]int)
data[path] = 0
fmt.Println(">>>", center("READ", 30), "<<<")
fmt.Println(path)
return data, nil
}
// Load Files
func (c *Config) load() {
for {
select {
case <- c.stopSignal:
return
case path := <- c.deltaCh:
fmt.Println(">>>", center("LOAD", 30), "<<<")
data, err := read(path)
if err != nil {
fmt.Println("Log Error")
} else {
fmt.Println("Success", data)
}
fmt.Println()
}
}
}
Note: Not included map in sample code it can be easily updated to include map
Just allocate new one map. Like this:
var mu sync.Mutex
before := map[string]string{} // Some map before reading
after := make(map[string]string)
// Read files and fill `after` map
mu.Lock()
before = after
mu.Unlock()
Instead of clearing the map in loadFile method, do something like this in read
func (r *customerConfig) read(file string, bucket string) error {
m := cmap.New() // create a new map
// ...
for {
rows, err := pr.ReadByNumber(r.cfg.RowsToRead)
if err != nil {
return errs.Wrap(err)
}
if len(rows) <= 0 {
break
}
byteSlice, err := json.Marshal(rows)
if err != nil {
return errs.Wrap(err)
}
var invMods []CompModel
err = json.Unmarshal(byteSlice, &invMods)
if err != nil {
return errs.Wrap(err)
}
for i := range invMods {
key := strconv.FormatInt(invMods[i].ProductID, 10) + ":" + strconv.Itoa(int(invMods[i].Iaz))
hasInventory := false
if invMods[i].Available > 0 {
hasInventory = true
}
m.Set(key, hasInventory)
}
}
r.data = m // Use the new map
return nil
}

Creating a custom error in golang for http responses

i wanted to create custom errors for my authentication service that i am currently working on.
Since i have to create a error for every http responses, and i am fairly new to golang i am facing difficulties.
The below code is the replica of the javascript code code that i wanted to implement here in go.
export abstract class CustomError extends Error {
abstract statusCode: number;
constructor(message: string) {
super(message);
Object.setPrototypeOf(this, CustomError.prototype);
}
abstract serializeErrors(): { message: string; field?: string }[];
}
To create extended classes based on the custom error like this
import { CustomError } from "./custom-error";
export class NotFoundError extends CustomError {
statusCode = 404;
constructor() {
super("Route not found");
Object.setPrototypeOf(this, NotFoundError.prototype);
}
serializeErrors() {
return [{ message: "Not Found" }];
}
}
so that i can be thrown or logged from the main file i.e.
const existingUser = await User.findOne({ email });
if (existingUser) {
throw new BadRequestError("Email is already in use");
}
so in simple language i wanted to create an object/model of CustomErrors that can help to create more diverse Erros like BadRequestError()
so, i need help regarding creating this one. And this is my first question here
In Go you create a custom error type simply by implementing the error interface.
The error interface is:
type error interface {
Error() string
}
[if you're completely new to Go, I suggest starting with the Tour of Go on interfaces]
For example:
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
See this official Go blog post for more details.
This is for error types; if you're specifically looking for HTTP errors, those in a server are done by writing an error status to a http.ResponseWriter along with the error message you need, and you can use the helper http.Error for this. Example:
func myHandler(w http.ResponseWriter, req *http.Request) {
if (somethingIsWrong) {
http.Error(w, "the error message", http.StatusBadRequest)
}
}
The last param to http.Error is the HTTP status; check out the net/http stdlib package docs for the options you have there.
Now, to connect the two, http.Error would typically use the Error() method of your custom error as the message, and the status is really application specific.
If you want http error responses in plain text, http.Error should be your choice. But if your authentication service needs error response in specific format (JSON/XML) then you need to create custom http errors which can be serialized and written into the response.
To create custom http error responses for JSON format (for XML format modify serialization), first you need to create some types -
type ErrFields map[string]string // Error field-value pair type
type ResponseError struct {
Msg string `json:"message"` // Error message
Status int `json:"status"` // Http status code
Data ErrFields // For extra error fields e.g. reason, details, etc.
}
type ErrList []ResponseError // Multiple http errors type
Methods for ResponseError type -
// AddErrField adds a new field to the response error with given key and value
func (err *ResponseError) AddErrField(key, value string) {
if err.Data == nil {
err.Data = make(ErrFields)
}
err.Data[key] = value
}
// RemoveErrField removes existing field matching given key from response error
func (err *ResponseError) RemoveErrField(key string) {
delete(err.Data, key)
}
// MarshalJSON marshals the response error into json
func (err *ResponseError) MarshalJSON() ([]byte, error) {
// Determine json field name for error message
errType := reflect.TypeOf(*err)
msgField, ok := errType.FieldByName("Msg")
msgJsonName := "message"
if ok {
msgJsonTag := msgField.Tag.Get("json")
if msgJsonTag != "" {
msgJsonName = msgJsonTag
}
}
// Determine json field name for error status code
statusField, ok := errType.FieldByName("Status")
statusJsonName := "status"
if ok {
statusJsonTag := statusField.Tag.Get("json")
if statusJsonTag != "" {
statusJsonName = statusJsonTag
}
}
fieldMap := make(map[string]string)
fieldMap[msgJsonName] = err.Msg
fieldMap[statusJsonName] = fmt.Sprintf("%d", err.Status)
for key, value := range err.Data {
fieldMap[key] = value
}
return json.Marshal(fieldMap)
}
// SerializeJSON converts response error into serialized json string
func (resErr *ResponseError) SerializeJSON() (string, error) {
value, err := json.Marshal(resErr)
if err != nil {
return "", err
}
return string(value), nil
}
Methods for ErrList type -
// SerializeJSON converts error list into serialized json string
func (errList ErrList) SerializeJSON() (string, error) {
value, err := json.Marshal(errList)
if err != nil {
return "", err
}
return string(value), nil
}
Now you can create custom http error responses by creating different values of ResponseError type -
// Error returns a general response error
func Error(msg string, status int) ResponseError {
return ResponseError{msg, status, nil}
}
// Errors returns a error list containing given response errors
func Errors(errors ...ResponseError) ErrList {
return errors
}
// Specific HTTP error responses
func ErrorNotFound() ResponseError {
return Error("not found", http.StatusNotFound)
}
func ErrorBadRequest() ResponseError {
return Error("bad request", http.StatusBadRequest)
}
func ErrorInternalServerError() ResponseError {
return Error("internal server error", http.StatusInternalServerError)
}
func ErrorForbidden() ResponseError {
return Error("forbidden", http.StatusForbidden)
}
You can add/remove custom fields to the ResponseError values -
notFoundErr := ErrorNotFound()
notFoundErr.AddErrField("reason", "given 'id' does not exist")
notFoundErr.RemoveErrField("reason")
Since in Go there is no concept of throw, you can only return response error from a function -
func Foo() (resErr ResponseError, ok bool) {
...
if existingUser {
resErr = ErrorBadRequest()
resErr.AddErrField("reason", "Email is already in use")
return resErr, true
}
...
return ResponseError{}, false
}
To serialize response error into JSON -
resErr, ok := Foo()
if !ok {
json, err := resErr.SerializeJSON()
if err != nil {
// Handle serialization error
}
}
See the Go playground example here.

Hyperledger Build error: Error creating new Smart Contract: Error chaincode id not provided

I am learning how to use Hyperledger Fabric DLT framework.
I am building an application that allows a seller of Tuna to transact with a buyer at a discount without compromising the marker price at which the seller sells to other buyers.
Problem: However each time a execute the code this message is thrown
2018-01-14 18:45:11.292 EST [shim] SetupChaincodeLogging -> INFO 001 Chaincode log level not provided; defaulting to: INFO
2018-01-14 18:45:11.292 EST [shim] SetupChaincodeLogging -> INFO 002 Chaincode (build level: ) starting up ...
This is the error that I constantly get each time I run the code:
Error creating new Smart Contract: Error chaincode id not providedeloiim:fabric_
This is the code I created
package main
import ( "github.com/hyperledger/fabric/core/chaincode/shim"; sc"github.com/hyperledger/fabric/protos/peer"; "bytes"; "encoding/json"; "strconv"; "fmt"; )
type SmartContract struct {
}
type Tuna struct {
TxID int `json:"txid"`
Container string `json:"container"`
Timestamp string `json:"timestamp"`
Location string `json:"location"`
Owner string `json:"owner"`
}
changeTunaHolder - As the tuna fish is passed to different parties in the supply chain, the data in the world state can be updated with who has possession. The changeTunaHolder method takes in 2 arguments, tuna id and new holder name.
I suspect the error is with the changeTunaHolder method given that it takes the id and Owner fields
func (S_ *SmartContract) changeTunaHolder(APIstub shim.ChaincodeStubInterface, parameters []string) sc.Response {
if len(parameters) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
tunaAsBytes, _ := APIstub.GetState(parameters[0])
if tunaAsBytes == nil {
return shim.Error("Could not locate tuna")
}
TUNA := Tuna{}
json.Unmarshal(tunaAsBytes, &TUNA)
// Normally check that the specified argument is a valid holder of tuna
// we are skipping this check for this example
TUNA.Owner = parameters[1]
tunaAsBytes, _ = json.Marshal(TUNA)
_error := APIstub.PutState(parameters[0], tunaAsBytes)
if _error != nil {
return shim.Error(fmt.Sprintf("Failed to change tuna holder: %s", parameters[0]))
}
return shim.Success(nil)
}
Init METHOD - In this the code attempts to retrieve the requested Smart Contract function and arguments
func (S_ *SmartContract) Init(APIstub shim.ChaincodeStubInterface) sc.Response {
return shim.Success(nil)
}
func (S_ *SmartContract ) Invoke(APIstub shim.ChaincodeStubInterface) sc.Response {
function, parameters := APIstub.GetFunctionAndParameters()
if function == "queryTuna" {
return S_.queryTuna(APIstub, parameters)
} else if function == "initLedger"{
return S_.initLedger(APIstub)
} else if function == "recordTuna" {
return S_.recordTuna(APIstub, parameters)
} else if function == "changeTunaHolder" {
return S_.changeTunaHolder(APIstub, parameters)
}
return shim.Error("Invalid Smart Contract function name.")
}
queryTuna - The queryTuna method would be used by a fisherman, regulator, or restaurateur to view the record of one particular tuna. It takes one argument - the key for the tuna in question.
func (S_ *SmartContract) queryTuna(APIstub shim.ChaincodeStubInterface, parameters []string) sc.Response {
if len(parameters) != 1{
return shim.Error("Incorrect number of arguments. Expecting 1")
}
tunaAsBytes, _ := APIstub.GetState(parameters[0])
if tunaAsBytes == nil {
return shim.Error("Could not locate tuna")
}
return shim.Success(tunaAsBytes)
}
initLedger - The initLedger method will add test data to our network.
func (S_ *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response{
TUNA := []Tuna {
Tuna{TxID: 1,Container: "923F", Location: "67.0006, -70.5476", Timestamp: "1504054225", Owner: "Miriam"},
Tuna{TxID: 2,Container: "M83T", Location: "91.2395, -49.4594", Timestamp: "1504057825", Owner: "Dave"},
Tuna{TxID: 3,Container: "T012", Location: "58.0148, 59.01391", Timestamp: "1493517025", Owner: "Igor"},
Tuna{TxID: 4,Container: "P490", Location: "-45.0945, 0.7949", Timestamp: "1496105425", Owner: "Amalea"},
Tuna{TxID: 5,Container: "S439", Location: "-107.6043, 19.5003", Timestamp: "1493512301", Owner: "Rafa"},
Tuna{TxID: 6,Container: "J205", Location: "-155.2304, -15.8723", Timestamp: "1494117101", Owner: "Shen"},
Tuna{TxID: 7,Container: "S22L", Location: "103.8842, 22.1277", Timestamp: "1496104301", Owner: "Leila"},
Tuna{TxID: 8,Container: "EI89", Location: "-132.3207, -34.0983", Timestamp: "1485066691", Owner: "Yuan"},
Tuna{TxID: 9,Container: "129R", Location: "153.0054, 12.6429", Timestamp: "1485153091", Owner: "Carlo"},
Tuna{TxID: 10,Container: "49W4", Location: "51.9435, 8.2735", Timestamp: "1487745091", Owner: "Fatima"},
}
iter := 0
for iter < len(TUNA) {
println("iter is", iter)
tunaAsBytes, _ := json.Marshal(TUNA[iter])
APIstub.PutState(strconv.Itoa(iter+1), tunaAsBytes)
fmt.Println("ADDED", TUNA[iter])
iter += 1
}
return shim.Success(nil)
}
recordTuna - The recordTuna method is the method a fisherman like Sarah would use to record each of her tuna catches. This method takes in five arguments (attributes to be saved in the ledger).
func (S_ *SmartContract) recordTuna(APIstub shim.ChaincodeStubInterface, parameters[]string) sc.Response {
if len(parameters) != 5 {
return shim.Error("Incorrect number of arguments. Expecting 5")
}
TUNA := Tuna{Container:parameters[1], Location:parameters[2], Timestamp:parameters[3], Owner:parameters[4]}
tunaAsBytes, _:=json.Marshal(TUNA)
_error := APIstub.PutState(parameters[0], tunaAsBytes)
if _error != nil {
return shim.Error(fmt.Sprintf("Failed to record tuna catch: %s", parameters[0]))
}
return shim.Success(nil)
}
queryAllTuna - The queryAllTuna method allows for assessing all the records; in this case, all the Tuna records added to the ledger. This method does not take any arguments. It will return a JSON string containing the results.
func (S_ *SmartContract) queryAllTuna(APIstub shim.ChaincodeStubInterface) sc.Response {
init_key := "0"
end_key := "999"
results_iterator, _error := APIstub.GetStateByRange(init_key, end_key)
if _error != nil {
return shim.Error(_error.Error())
}
defer results_iterator.Close()
var buffer bytes.Buffer
buffer.WriteString("[")
b_array_member_already_written := false
for results_iterator.HasNext() {
query_response, _error := results_iterator.Next()
if _error != nil {
return shim.Error(_error.Error())
}
if b_array_member_already_written == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(query_response.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
buffer.WriteString(string(query_response.Value))
buffer.WriteString("}")
b_array_member_already_written = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllTuna:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
MAIN FUNCTION
func main() {
_error := shim.Start(new(SmartContract))
if _error != nil {
fmt.Printf("Error creating new Smart Contract: %s", _error)
}
}
have you tried to go with the terminal in chaincode-docker-devmode?
cd fabric-samples/chaincode-docker-devmode
as in Chaincode for Developers
Me I got the same error when I forgot to jump in the repository
And make sure to put the version in CORE_CHAINCODE_ID_NAME (e.g: CORE_CHAINCODE_ID_NAME=mycc:0)

Query latest world state using Hyperledger 1.0 Nodejs SDK

Based on hyperledger SDK doc, we can use nodeJS SDK to query for the block and the transaction info. Is it possible to use this SDK to query the latest world state, e.g, query the value for a given key?
To being able to query for latest world state your chaincode has to provide this capability, namely you have to implement this logic and incorporate it into your chaincode. Then it will simply require to execute the chaincode to get the value for the key you are interested it.
For example you can do something similar to this:
package main
import (
"fmt"
"github.com/hyperledger/fabric/core/chaincode/shim"
"github.com/hyperledger/fabric/protos/peer"
)
// Person
type Asset struct {
ID string `json:"id"`
Name string `json:"name"`
Price string `json:"price"`
}
// assetManagement the chaincode interface implementation to manage
// the ledger of person records
type assetManagement struct {
}
func (p *assetManagement) Init(stub shim.ChaincodeStubInterface) peer.Response {
return shim.Success(nil)
}
func (p *assetManagement) Invoke(stub shim.ChaincodeStubInterface) peer.Response {
actionName, params := stub.GetFunctionAndParameters()
if actionName == "addAsset" {
return p.addAsset(stub)
} else if actionName == "getAsset" {
return p.getAsset(stub)
}
return shim.Error("Unknown function name")
}
func (p *assetManagement) getAsset(stub shim.ChaincodeStubInterface) peer.Response {
_, params := stub.GetFunctionAndParameters()
assetID := params[0]
state, err := stub.GetState(assetID)
if err != nil {
return shim.Error(fmt.Sprintf("%s", err))
}
return shim.Success(state)
}
func (p *assetManagement) addAsset(stub shim.ChaincodeStubInterface) peer.Response {
// TODO add loggic adding new asset
}
func main() {
err := shim.Start(new(assetManagement))
if err != nil {
fmt.Printf("Error starting Simple chaincode: %s", err)
}
}
Next all you need is to invoke chaincode passing function name getAsset with asset ID and will get latest state for that asset. Here is the code based on Go SDK:
// Skipped initialization.
txRequest := apitxn.ChaincodeInvokeRequest{
Targets: []apitxn.ProposalProcessor{p},
Fcn: "getAsset",
Args: []string{"42"},
TransientMap: map[string][]byte{},
ChaincodeID: "assetChaincode",
}
proposalResponse, _, err := ch.SendTransactionProposal(txRequest)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%v\n", proposalResponse[0].ProposalResponse)
tx, err := ch.CreateTransaction(proposalResponse)
if err != nil {
fmt.Println(err)
return
}
txResponse, err := ch.SendTransaction(tx)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(txResponse[0])

Resources