I've been working on PIC18F55K42 chip for a while. The PIC is setup as a slave and it's receiving bytes correctly. But I encountered a few problems.
For example, when I do:
i2cset -y 1 0x54 0x80 0x01
It looks correct on the controller side and I can see the address 0x80(data address) and byte value 0x01.
When I send in block mode like:
i2cset -y 1 0x54 0x80 0x01 0x02 0x03 0x04 i
I see spurious bytes appearing on the controller. More precisely, it looks like this:
ADDRESS 80 6c 00 2f 01 02 03 04 STOP
At first I thought this is something to do with my controller and even tried digging into it's clock settings. Used Salae logic analyser too. There's nothing wrong with the controller or it's set up. The only place I can think of is the complex onion driver layering done by Linux.
I'd like to know why Linux is sending the 3 extra bytes (6c 00 2f). Why does i2c_smbus_write_block_data send extra bytes and how can it be avoided?
It's a bug in the i2cset implementation in Busybox. See miscutils/i2c_tools.c:
/* Prepare the value(s) to be written according to current mode. */
switch (mode) {
case I2C_SMBUS_BYTE_DATA:
val = xstrtou_range(argv[3], 0, 0, 0xff);
break;
case I2C_SMBUS_WORD_DATA:
val = xstrtou_range(argv[3], 0, 0, 0xffff);
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
for (blen = 3; blen < (argc - 1); blen++)
block[blen] = xstrtou_range(argv[blen], 0, 0, 0xff);
val = -1;
break;
default:
val = -1;
break;
}
Should be block[blen - 3] = xstrtou_range(argv[blen], 0, 0, 0xff);. The bug results in 3 extra garbage bytes from stack being sent.
Use i2c_smbus_write_i2c_block_data for raw i2c transfers
i2c_smbus_write_block_data makes data transfer using SMBUS protocol
Related
I am writing a kernel module on Linux (Xubuntu x64). The version of the kernel is 5.4.0-52-generic. My kernel module is capturing traffic from an interface and printing it in hex:
Nov 10 14:04:34 ubuntu kernel: [404009.566887] Packet hex dump:
Nov 10 14:04:34 ubuntu kernel: [404009.566889] 000000 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00
Nov 10 14:04:34 ubuntu kernel: [404009.566899] 000010 00 54 49 4C 40 00 40 01 A7 EF C0 A8 64 0E C0 A8
Nov 10 14:04:34 ubuntu kernel: [404009.566907] 000020 64 0E 08 00 9E FE 00 03 00 08 72 0E AB 5F 00 00
Nov 10 14:04:34 ubuntu kernel: [404009.566914] 000030 00 00 7B B5 01 00 00 00 00 00 10 11 12 13 14 15
Nov 10 14:04:34 ubuntu kernel: [404009.566922] 000040 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25
Nov 10 14:04:34 ubuntu kernel: [404009.566929] 000050 26 27 28 29
This output I've got using this command under root: tail -f /var/log/kern.log
The whole problem is that I need to save this output as pcap-file. I know that there is text2pcap but its library (libpcap) is user-mode only so I can't use it in kernel module (or maybe not? Correct me if I'm wrong).
Is it possible to use text2pcap in kernel module? Otherwise, How can I save an output as pcap file while being in kernel module?
Source code:
#include <linux/module.h> // included for all kernel modules
#include <linux/kernel.h> // included for KERN_INFO
#include <linux/init.h> // included for __init and __exit macros
#include <linux/skbuff.h> // included for struct sk_buff
#include <linux/if_packet.h> // include for packet info
#include <linux/ip.h> // include for ip_hdr
#include <linux/netdevice.h> // include for dev_add/remove_pack
#include <linux/if_ether.h> // include for ETH_P_ALL
#include <linux/unistd.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tester");
MODULE_DESCRIPTION("Sample linux kernel module program to capture all network packets");
struct packet_type ji_proto;
void pkt_hex_dump(struct sk_buff *skb)
{
size_t len;
int rowsize = 16;
int i, l, linelen, remaining;
int li = 0;
uint8_t *data, ch;
printk("Packet hex dump:\n");
data = (uint8_t *) skb_mac_header(skb);
if (skb_is_nonlinear(skb)) {
len = skb->data_len;
} else {
len = skb->len;
}
remaining = len;
for (i = 0; i < len; i += rowsize) {
printk("%06d\t", li);
linelen = min(remaining, rowsize);
remaining -= rowsize;
for (l = 0; l < linelen; l++) {
ch = data[l];
printk(KERN_CONT "%02X ", (uint32_t) ch);
}
data += linelen;
li += 10;
printk(KERN_CONT "\n");
}
}
int ji_packet_rcv (struct sk_buff *skb, struct net_device *dev,struct packet_type *pt, struct net_device *orig_dev)
{
printk(KERN_INFO "New packet captured.\n");
/* linux/if_packet.h : Packet types */
// #define PACKET_HOST 0 /* To us */
// #define PACKET_BROADCAST 1 /* To all */
// #define PACKET_MULTICAST 2 /* To group */
// #define PACKET_OTHERHOST 3 /* To someone else */
// #define PACKET_OUTGOING 4 /* Outgoing of any type */
// #define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
// #define PACKET_USER 6 /* To user space */
// #define PACKET_KERNEL 7 /* To kernel space */
/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
// #define PACKET_FASTROUTE 6 /* Fastrouted frame */
switch (skb->pkt_type)
{
case PACKET_HOST:
printk(KERN_INFO "PACKET to us − ");
break;
case PACKET_BROADCAST:
printk(KERN_INFO "PACKET to all − ");
break;
case PACKET_MULTICAST:
printk(KERN_INFO "PACKET to group − ");
break;
case PACKET_OTHERHOST:
printk(KERN_INFO "PACKET to someone else − ");
break;
case PACKET_OUTGOING:
printk(KERN_INFO "PACKET outgoing − ");
break;
case PACKET_LOOPBACK:
printk(KERN_INFO "PACKET LOOPBACK − ");
break;
case PACKET_FASTROUTE:
printk(KERN_INFO "PACKET FASTROUTE − ");
break;
}
//printk(KERN_CONT "Dev: %s ; 0x%.4X ; 0x%.4X \n", skb->dev->name, ntohs(skb->protocol), ip_hdr(skb)->protocol);
struct ethhdr *ether = eth_hdr(skb);
//printk("Source: %x:%x:%x:%x:%x:%x\n", ether->h_source[0], ether->h_source[1], ether->h_source[2], ether->h_source[3], ether->h_source[4], ether->h_source[5]);
//printk("Destination: %x:%x:%x:%x:%x:%x\n", ether->h_dest[0], ether->h_dest[1], ether->h_dest[2], ether->h_dest[3], ether->h_dest[4], ether->h_dest[5]);
//printk("Protocol: %d\n", ether->h_proto);
pkt_hex_dump(skb);
kfree_skb (skb);
return 0;
}
static int __init ji_init(void)
{
/* See the <linux/if_ether.h>
When protocol is set to htons(ETH_P_ALL), then all protocols are received.
All incoming packets of that protocol type will be passed to the packet
socket before they are passed to the protocols implemented in the kernel. */
/* Few examples */
//ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
//ETH_P_IP 0x0800 /* Internet Protocol packet */
//ETH_P_ARP 0x0806 /* Address Resolution packet */
//ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */
//ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
//ETH_P_802_2 0x0004 /* 802.2 frames */
//ETH_P_SNAP 0x0005 /* Internal only */
ji_proto.type = htons(ETH_P_IP);
/* NULL is a wildcard */
//ji_proto.dev = NULL;
ji_proto.dev = dev_get_by_name (&init_net, "enp0s3");
ji_proto.func = ji_packet_rcv;
/* Packet sockets are used to receive or send raw packets at the device
driver (OSI Layer 2) level. They allow the user to implement
protocol modules in user space on top of the physical layer. */
/* Add a protocol handler to the networking stack.
The passed packet_type is linked into kernel lists and may not be freed until
it has been removed from the kernel lists. */
dev_add_pack (&ji_proto);
printk(KERN_INFO "Module insertion completed successfully!\n");
return 0; // Non-zero return means that the module couldn't be loaded.
}
static void __exit ji_cleanup(void)
{
dev_remove_pack(&ji_proto);
printk(KERN_INFO "Cleaning up module....\n");
}
module_init(ji_init);
module_exit(ji_cleanup);
The problem was solved using call_usermodehelper() to call user-mode text2pcap with arguments as if text2pcap was called using terminal.
Is it possible to use text2pcap in kernel module?
Not without putting it and the code it uses to write a pcap file (which isn't from libpcap, it's from a small library that's part of Wireshark, also used by dumpcap to write pcap and pcapng files) into the kernel.
How can I save an output as pcap file while being in kernel module?
You could write your own code to open a file and write to it in the kernel module; "Writing to a file from the Kernel" talks about that.
It also says
A "preferred" technique would be to pass the parameters in via IOCTLs and implement a read() function in your module. Then reading the dump from the module and writing into the file from userspace.
so you might want to consider that; the userspace code could just use libpcap to write the file.
I wrote a code that for java card 2.2.1 and I test it eith JCIDE.
I get error in method Setoutgoinglength()
public void getoutput(APDU apdu)
{
byte [] buffer = apdu.getBuffer();
byte hello[] = {'H','E','L','L','O',' ','W','O','R','L','D',' ', 'J','A','V','A',' ','C','A','R','D'};
short le = apdu.setOutgoing();
short totalBytes = (short) hello.length;
Util.arrayCopyNonAtomic(hello, (short)0, buffer, (short)0, (short)totalBytes);
apdu.setOutgoingLength(totalBytes);
apdu.sendBytes((short) 0, (short) hello.length);}
6CXX means that your Le is not equal to the correct length of response data (XX is equal to the length of correct response data). 6C15 specifically means that the correct Le to be used should be 0x15.
What happened is that your Le field is 0x00 (which is actually interpreted by the card as 256 in decimal form) but you used totalBytes, which has a value of 0x15, as parameter to apdu.setOutgoingLength() which is not equal to 256.
The correct APDU to send is 00 40 00 00 15
I wrote a Java Card applet that saves some data into the APDU buffer at offset ISO7816.OFFSET_CDATA and sends those bytes as a response.
Util.arrayCopy(Input_Data, (short)0, buffer, (short) ISO7816.OFFSET_CDATA, (short)Datalength);
apdu.setOutgoing();
apdu.setOutgoingLength((short)(DataLength) );
apdu.sendBytesLong(buffer, ISO7816.OFFSET_CDATA, (short)(DataLength));
I tested this in a simulator without any problem. But when I test this on a real smart card (Java Card v2.2.1 manufactured by Gemalto), I get the status word 0x6180 as response.
My command APDU is 00 40 00 00 80 Data, where data has a length of 128 bytes, so I have 4+128 bytes in the buffer and (260-(4+128)) byte is null.
Your simulator probably uses T=1 transport protocol, but your real card does not. It uses T=0 protocol, which means it can either receive data, or send data in a single APDU.
Status word 0x6180 indicates there are 0x80 bytes to receive from the card. Generally, 61XX means XX bytes to receive.
How to receive them? Well, there is a special APDU command called GET RESPONSE. You should call it each time you get 61XX status word. Use XX as the Le byte of your GET RESPONSE APDU
APDU -> 61 XX
00 C0 00 00 XX -> your data 90 00
A few other notes on your code:
Datalength vs DataLength?
Copy your output data to 0 instead of ISO7816.OFFSET_CDATA
Why do you cast DataLength to short each time? Is it short? Do not cast then. Is it byte? You cast it in a wrong way then, because unsigned byte value > 0x80 will be cast to a negative short. The correct cast from an unsigned byte to a short is (short) (DataLength & 0xFF)
Use setOutgoingAndSend whenever you can. It is much simpler.
Use arrayCopyNonAtomic instead of arrayCopy whenever you are not copying to a persistent array. Performance of arrayCopyNonAtomic is much better.
I'm trying to retrieve my sensor data from my Raspberry Pi using nrf24l01+ network receiver.
I'm sending it from an Arduino nano board. Here is the setting of my Arduino:
STATUS = 0x0e RX_DR=0 TX_DS=0 MAX_RT=0 RX_P_NO=7 TX_FULL=0
RX_ADDR_P0-1 = 0xcccccc3ccc 0xcccccc3c3c
RX_ADDR_P2-5 = 0x33 0xce 0x3e 0xe3
TX_ADDR = 0xcccccccc3c
RX_PW_P0-6 = 0x20 0x20 0x20 0x20 0x20 0x20
EN_AA = 0x3e
EN_RXADDR = 0x3f
RF_CH = 0x5a
RF_SETUP = 0x07
CONFIG = 0x0f
DYNPD/FEATURE = 0x3f 0x04
Data Rate = 1MBPS
Model = nRF24L01+
CRC Length = 16 bits
PA Power = PA_MAX
My Raspberry Pi is plugged with nrf24l01+ through GPIO. I made sure the connection is OK by using the C++ example given on https://github.com/TMRh20/RF24:
RF24 radio(RPI_BPLUS_GPIO_J8_15,RPI_BPLUS_GPIO_J8_24, BCM2835_SPI_SPEED_8MHZ);
The data is OK. Now i want to use a nodeJS program to get this data. I'm using this library: https://github.com/natevw/node-nrf
The code is very simple, but somehow is not working (console is silent):
var spiDev = "/dev/spidev0.0";
var cePin = 15; //RPI_BPLUS_GPIO_J8_15
var irqPin = null;
var channel = 0x5a; //90
var radio = require('nrf').connect(spiDev, cePin, irqPin);
radio
.channel(channel)
.dataRate('1Mbps')
.crcBytes(1)
// .autoRetransmit({count:15, delay:4000})
;
radio.begin(function () {
var rx = radio.openPipe('rx', 0xcccccccc3c);
rx.pipe(process.stdout);
});
I'm wondering what I'm doing wrong. Hardware is OK and the setting seems pretty good, what do you think?
Thanks
Usually to find out what is wrong with NRF you should start from basics:
Try simpler NRF configs to test if its working, especially with no CRC bytes etc.
Try it w/o dynamic payload and try fixed payload size on both ends.
Auto-acknowledge also can be an issue (note that when auto-ack is enabled, CRC can't be disabled as it is used to ensure transmission acknowledgement in this mode).
Ensure that CRC lengths match on both ends. In your example on Arduino you have CRC Length = 16 bits whether Raspberry configured with radio.crcBytes(1).
Don't rely on default values, always provide same full configuration on both ends.
These steps can considerably reduce time to locate the problem especially when using different libraries and platforms.
I have a C++ program to do a basic TPM_GetCapabilities Through TPM Base Services and the Windows 7 SDK.
I've setup the program below
int _tmain(int argc, _TCHAR* argv[])
{
TBS_CONTEXT_PARAMS pContextParams;
TBS_HCONTEXT hContext;
TBS_RESULT rv;
pContextParams.version = TBS_CONTEXT_VERSION_ONE;
rv = Tbsi_Context_Create(&pContextParams, &hContext);
printf("\n1 RESULT : %x STATUS : %x", rv, hContext);
BYTE data[200] =
{0,0xc1, /* TPM_TAG_RQU_COMMAND */
0,0,0,18, /* blob length, bytes */
0,0,0,0x65, /* TPM_ORD_GetCapability */
0,0,0,0x06, /* TPM_CAP_VERSION */
0,0,0,0}; /* 0 bytes subcap */
BYTE buf[4000];
UINT32 len = 4000;
rv = Tbsip_Submit_Command(hContext,0,TBS_COMMAND_PRIORITY_NORMAL,data,18,buf,&len);
//CAPABILITY_RETURN* retVal = new CAPABILITY_RETURN(buf);
//printf("\n2 Response Tag: %x Output Bytes: %x",tag,);
printf("\n2 RESULT : %x STATUS : %x\n", rv, hContext);
printBuf(buf,len);
rv = Tbsip_Context_Close(hContext);
printf("\n3 RESULT : %x STATUS : %x", rv, hContext);
My Return Buffer looks like:
00:C4:00:00:00:12:00:00:00:00:00:00:00:04:01:01:00:00
According to this doc, Section 7.1 TPM_GetCapability I should get the following:
Looking at my output buffer, I am getting TPM_TAG_RSP_COMMAND,a value of 18 for my paramSize, 0 for my TPM_RESULT, 0x...04 for ordinal (Not sure what this is supposed to mean.) then 1,1,0,0 for my final bits. I'm at a loss as to how to decipher this.
The answer to your question:
You don't get a nonstandard response.
The response is perfectly fine, there is nothing nonstandard in it. It looks exactly like it is defined in the spec.
The response' content resp you get is also what is to be expected. A Standard conform TPM has to answer with 01 01 00 00 when asked for TPM_CAP_VERSION.
Why?
First of all: The line stating TPM_COMMAND_CODE ordinal is not part of the response.
It has no PARAM # and no PARAM SZ. It is only relevant for calculating the HMAC of the response.
So the response is the following:
00 C4 tag
00 00 00 12 paramSize
00 00 00 00 returnCode
00 00 00 04 respSize
01 01 00 00 resp
You asked for the capability TPM_CAP_VERSION. Here is what the spec says:
Value: 0x00000006
Capability Name: TPM_CAP_VERSION
Sub cap: Ignored
TPM_STRUCT_VER structure.
The major and minor version MUST indicate 1.1.
The firmware revision MUST indicate 0.0.
The use of this value is deprecated, new software SHOULD
use TPM_CAP_VERSION_VAL to obtain version and revision information
regarding the TPM.
So when you decode resp, which is a TPM_STRUCT_VER, you get the following:
typedef struct tdTPM_STRUCT_VER {
BYTE major; // ==> 1
BYTE minor; // ==> 1
BYTE revMajor; // ==> 0
BYTE revMinor; // ==> 0
} TPM_STRUCT_VER;
So 1.1 and 0.0, exactly according to specification.