How to calculate tcp packet checksum correctly? - linux

I am trying to implement a checksum calculation algorithm using this link
Here is an example of a byte slice and checksums for it.
And also my implementation code, tell me what I'm doing wrong.
//https://gist.github.com/david-hoze/0c7021434796997a4ca42d7731a7073a
func chksum(iph *ipv4.Header,payload []byte)uint16{
// https://github.com/mikioh/-stdyng/blob/master/afpacket.go
var htons = func(i uint16) uint16 {return (i<<8)&0xff00 | i>>8}
tcpLen := iph.TotalLen - iph.Len
src := iph.Src
dst := iph.Dst
var csum uint32
for i := 0; i < 16; i += 2 {
csum += uint32(src[i]) << 8
csum += uint32(src[i+1])
csum += uint32(dst[i]) << 8
csum += uint32(dst[i+1])
}
csum += uint32(htons(6)) // TCP PROTO
csum += uint32(htons(uint16(tcpLen)))
// old chksum
payload[16] = 0
payload[17] = 0
var i int
for tcpLen > 1 {
csum += uint32(payload[i]) << 8
csum += uint32(payload[i+1])
tcpLen -= 2
i++
}
if tcpLen % 2 == 1 {
csum += uint32(payload[tcpLen - 1]) << 8
}
for csum > 0xffff{
csum = (csum >> 16) + (csum & 0xffff)
}
return ^uint16(csum)
}
// output
[69 0 0 60 179 32 64 0 64 6 108 0 192 168 77 61 192 168 77 13 203 50
0 22 69 21 146 157 0 0 0 0 160 194 250 240 65 21 0 0 2 4 5 180 4 2 8
10 38 105 38 58 0 0 0 0 1 3 3 7]
src chkSumOriginaltcp = 0x4115 calc chkSumFaketcp = 0x72e8
Please tell me where I could be mistaken, it is very important to me

func chksum(data []byte, srcip, dstip []byte) uint16 {
data[16],data[17] = 0,0 //zero on original check sum
psheader := []byte{
srcip[0], srcip[1], srcip[2], srcip[3],
dstip[0], dstip[1], dstip[2], dstip[3],
0,
6, // TCP
0, byte(len(data)), // length (16 bits)
}
csum := make([]byte, 0, len(psheader)+len(data))
csum = append(csum, psheader...)
csum = append(csum, data...)
lenSumThis := len(csum)
var word uint16
var sum uint32
for i := 0; i+1 < lenSumThis; i += 2 {
word = uint16(csum[i])<<8 | uint16(csum[i+1])
sum += uint32(word)
}
if lenSumThis%2 != 0 {
sum += uint32(csum[len(csum)-1])
}
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
return uint16(^sum)
}

Related

Obtain decimal netmask from prefix length python 3.x

I created this code because I was not able to find any functional that accomplishes my requirement.
If you can reduce it will be better.
Just enter de prefix lenght from 1 to 32 and you will get the decimal mask.
This code help me with my scripts for cisco.
import math
#Netmask octets
octet1 = [0,0,0,0,0,0,0,0]
octet2 = [0,0,0,0,0,0,0,0]
octet3 = [0,0,0,0,0,0,0,0]
octet4 = [0,0,0,0,0,0,0,0]
#POW list
pow_list = [7,6,5,4,3,2,1,0]
#Introduce prefix lenght
mask = int(input("Introduce the prefix lenght: "))
#According to the number of bits we will change the array elements from 0 to 1
while mask >= 25 and mask <= 32:
octet4[mask-25] = 1
mask -= 1
while mask >= 17 and mask <= 24:
octet3[mask-17] = 1
mask -= 1
while mask >= 9 and mask <= 16:
octet2[mask-9] = 1
mask -= 1
while mask >= 1 and mask <= 8:
octet1[mask-1] = 1
mask -= 1
#Obtain the number of ones
ones1 = octet1.count(1)
ones2 = octet2.count(1)
ones3 = octet3.count(1)
ones4 = octet4.count(1)
#Summary and reuslt of each octet.
sum1 = 0
for i in range(0,ones1):
sum1 = sum1 + math.pow(2,pow_list[i])
sum1 = int(sum1)
sum2 = 0
for i in range(0,ones2):
sum2 = sum2 + math.pow(2,pow_list[i])
sum2 = int(sum2)
sum3 = 0
for i in range(0,ones3):
sum3 = sum3 + math.pow(2,pow_list[i])
sum3 = int(sum3)
sum4 = 0
for i in range(0,ones4):
sum4 = sum4 + math.pow(2,pow_list[i])
sum4 = int(sum4)
#Join the results with a "."
decimal_netmask = str(sum1) + "." + str(sum2) + "." + str(sum3) + "." + str(sum4)
#Result
print("Decimal netmask is: "+ decimal_netmask)
Result:
Introduce the prefix lenght: 23
Decimal netmask is: 255.255.254.0
As you are probably doing more than just converting CIDR to netmask, I recommend checking out the built-in library ipaddress
from ipaddress import ip_network
cidr = input("Introduce the prefix length: ")
decimal_netmask = str(ip_network(f'0.0.0.0/{cidr}').netmask)
You can simplify your code by computing the overall mask value as an integer using the formula:
mask = 2**32 - 2**(32-prefix_length)
Then you can compute the 4 8-bit parts of the mask (by shifting and masking), appending the results to a list and then finally joining each element of the list with .:
def decimal_netmask(prefix_length):
mask = 2**32 - 2**(32-prefix_length)
octets = []
for _ in range(4):
octets.append(str(mask & 255))
mask >>= 8
return '.'.join(reversed(octets))
for pl in range(33):
print(f'{pl:3d}\t{decimal_netmask(pl)}')
Output:
0 0.0.0.0
1 128.0.0.0
2 192.0.0.0
3 224.0.0.0
4 240.0.0.0
5 248.0.0.0
6 252.0.0.0
7 254.0.0.0
8 255.0.0.0
9 255.128.0.0
10 255.192.0.0
11 255.224.0.0
12 255.240.0.0
13 255.248.0.0
14 255.252.0.0
15 255.254.0.0
16 255.255.0.0
17 255.255.128.0
18 255.255.192.0
19 255.255.224.0
20 255.255.240.0
21 255.255.248.0
22 255.255.252.0
23 255.255.254.0
24 255.255.255.0
25 255.255.255.128
26 255.255.255.192
27 255.255.255.224
28 255.255.255.240
29 255.255.255.248
30 255.255.255.252
31 255.255.255.254
32 255.255.255.255

Groovy Turkish tax number verification

I need to calculate Turkish Tax number via Groovy.. Basically it's 10 digit number..
Here is the math behind it..
First 9 digit number (d), last 10 digit control ( c )value
Tr tax number = d1 d2 d3 d4 d5 d6 d7 d8 d9 c1
So d1..d9 >> d[i]
p[i] = (d[i] + 10 — i) mod 10
p[i] = 9 => q[i] = 9
p[i] != 9 => q[i] = (p[i] * 2^(10 — i)) mod 9
c1 = (10 — (Σ q[i] mod 10)) mod 10
Let’s say tax number : 018273645x
p[1] = ( d[1] + 10–1 ) mod 10 = (0 + 10–1) mod 10 = 9
p[2] = ( d[2] + 10–2) mod 10 = (1 + 10–2) mod 10 = 9
p[3] = ( d[3] + 10–3 ) mod 10 = (8 + 10–3) mod 10 = 5
p[4] = ( d[4] + 10–4 ) mod 10 = (2 + 10–4) mod 10 = 8
p[5] = ( d[5] + 10–5 ) mod 10 = (7 + 10–5) mod 10 = 2
p[6] = ( d[6] + 10–6 ) mod 10 = (3 + 10–6) mod 10 = 7
p[7] = ( d[7] + 10–7 ) mod 10 = (6 + 10–7) mod 10 = 9
p[8] = ( d[8] + 10–8 ) mod 10 = (4 + 10–8) mod 10 = 6
p[9] = ( d[9] + 10–9 ) mod 10 = (5 + 10–9) mod 10 = 6
q[1] = 9
q[2] = 9
q[3] = (p[3] * 2^(10–3)) mod 9 = (5 * 2⁷) mod 9 = 1
q[4] = (p[4] * 2^(10–4)) mod 9 = (8 * 2⁶) mod 9 = 8
q[5] = (p[5] * 2^(10–5)) mod 9 = (2 * 2⁵) mod 9 = 1
q[6] = (p[6] * 2^(10–6)) mod 9 = (7 * 2⁴) mod 9 = 4
q[7] = 9
q[8] = (p[8] * 2^(10–8)) mod 9 = (6 * 2²) mod 9 = 6
q[9] = (p[9] * 2^(10–9)) mod 9 = (6 * 2¹) mod 9 = 3
i = 1..9 >> Σ q[i] = 9 + 9 + 1 + 8 + 1 + 4 + 9 + 6 + 3 = 50
c1 = (10 — (Σ q[i] mod 10)) mod 10 = (10–50 mod 10) mod 10
c1 = (10–0) mod 10 = 0
So it would be >> 0182736450
Here is my code.
boolean isTurkishVID(String idnum) {
d=idnum;
int v1 = 0;
int v2 = 0;
int v3 = 0;
int v4 = 0;
int v5 = 0;
int v6 = 0;
int v7 = 0;
int v8 = 0;
int v9 = 0;
int v11 = 0;
int v22 = 0;
int v33 = 0;
int v44 = 0;
int v55 = 0;
int v66 = 0;
int v77 = 0;
int v88 = 0;
int v99 = 0;
int v_last_digit = 0;
int total = 0;
if (d.size()==10) {
v1 = (d[0].toInteger() + 9) % 10;
v2 = (d[1].toInteger() + 8) % 10;
v3 = (d[2].toInteger() + 7) % 10;
v4 = (d[3].toInteger() + 6) % 10;
v5 = (d[4].toInteger() + 5) % 10;
v6 = (d[5].toInteger() + 4) % 10;
v7 = (d[6].toInteger() + 3) % 10;
v8 = (d[7].toInteger() + 2) % 10;
v9 = (d[8].toInteger() + 1) % 10;
v11 = (v1 * 512) % 9;
v22 = (v2 * 256) % 9;
v33 = (v3 * 128) % 9;
v44 = (v4 * 64) % 9;
v55 = (v5 * 32) % 9;
v66 = (v6 * 16) % 9;
v77 = (v7 * 8) % 9;
v88 = (v8 * 4) % 9;
v99 = (v9 * 2) % 9;
if (v1 != 0 && v11 == 0) v11 = 9;
if (v2 != 0 && v22 == 0) v22 = 9;
if (v3 != 0 && v33 == 0) v33 = 9;
if (v4 != 0 && v44 == 0) v44 = 9;
if (v5 != 0 && v55 == 0) v55 = 9;
if (v6 != 0 && v66 == 0) v66 = 9;
if (v7 != 0 && v77 == 0) v77 = 9;
if (v8 != 0 && v88 == 0) v88 = 9;
if (v9 != 0 && v99 == 0) v99 = 9;
total = v11 + v22 + v33 + v44 + v55 + v66 + v77 + v88 + v99;
if (total % 10 == 0) total = 0;
else total = (10 - (total % 10));
if (total == v_last_digit) {
return true;
} else return false;
} else return false;
}
It returns error when I use it like
isTurkishVI("1234567891");
Returns;
groovy.lang.MissingMethodException: No signature of method: ConsoleScript20.isTurkishVI() is applicable for argument types: (java.lang.Integer) values: [1234567891]
Possible solutions: isTurkishVID(java.lang.String)
at ConsoleScript20.run(ConsoleScript20:74)

sum one variable (X[i][j]) in one constraint with choco

I am using choco to solve a CSP , and one of my constraints is that the sum of one variables (X[i][j]) is less than N=10, and i=j=1....N.
How do I accomplish this? thank you for your help.
sum(X[i][j]) = 1 for i=j=1....N
You need a 1d array and call model.sum():
import org.chocosolver.solver.Model;
import org.chocosolver.solver.Solver;
import org.chocosolver.solver.variables.BoolVar;
public class SumBooleans {
private final static int N = 10;
public static void main(String[] args) {
Model model = new Model("Boolean sum");
BoolVar[][] vars = new BoolVar[N][N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
vars[i][j] = model.boolVar("vars[" + i + "][" + j + "]");
}
}
BoolVar[] flatArray = new BoolVar[N * N];
for (int index = 0; index < N * N; index++) {
int i = index / N;
int j = index % N;
flatArray[index] = vars[i][j];
}
model.sum(flatArray, "=", 1).post();
//model.sum(flatArray, "<", N).post();
//model.sum(flatArray, ">=", 8).post();
Solver solver = model.getSolver();
if (solver.solve()) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
System.out.print(vars[i][j].getValue() + " ");
}
System.out.println();
}
}
}
}
Output:
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 1 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0

How can I find the alphabetic position of the letters in a word and then add the numbers up?

I wrote some of the code which gives me the total of the word with the position of the English alphabet but I am looking for something that prints the line like this:
book: 2 + 15 + 15 + 11 = 43
def convert(string):
sum = 0
for c in string:
code_point = ord(c)
location = code_point - 65 if code_point >= 65 and code_point <= 90 else code_point - 97
sum += location + 1
return sum
print(convert('book'))
def convert(string):
parts = []
sum = 0
for c in string:
code_point = ord(c)
location = code_point - 65 if code_point >= 65 and code_point <= 90 else code_point - 97
sum += location + 1
parts.append(str(location + 1))
return "{0}: {1} = {2}".format(string, " + ".join(parts), sum)
print(convert('book'))
Heres the output:
book: 2 + 15 + 15 + 11 = 43
More info on string.format and string.join.

Two-bit branch prediction should give higher percentage

I have small program that is supposed to calculate percentage of successfull predicition of a 2-bit branch predictor. I have it all done but my output isn't what I have expected, the percantage stops at about 91% instead of what I think should be at 98% or 99%. I think the problem might be with how apply the mask to the address. Can some one look at my code and verify if that is the issue.
The program iterates through a file that has a branch history of a run of the gcc compiler consisting of about 1792 addresses and a single digit column with 1 for a branch taken and a 0 for a branch not taken.
static void twoBitPredictor_v1(StreamWriter sw)
{
uint hxZero = 0x000000000;
uint uMask1 = 0x00000000;
int nCorrectPrediction = 0;
uint uSize2;
int nSize;
int nTotalReads;
int nTableMin = 2;
int nTableMax = 16;
int nTaken = 0;
uint[] uArrBt1;
sw.WriteLine("\n\nTwo-Bit Predictor Results Ver. 1\n");
sw.WriteLine("-------------------------\n");
sw.WriteLine("Total" + "\t" + "Correct");
sw.WriteLine("Reads" + "\t" + "Prediction" + "\t" + "Percentage");
System.Console.WriteLine("\n\nTwo-Bit Predictor Results Ver. 1\n");
System.Console.WriteLine("-------------------------\n");
System.Console.WriteLine("Total" + "\t" + "Correct");
System.Console.WriteLine("Reads" + "\t" + "Prediction" + "\t" + "Percentage");
for (int _i = nTableMin; _i <= nTableMax; _i++)
{
StreamReader sr2 = new StreamReader(#"C:\Temp\gccHist.txt");
nSize = _i;
uSize2 = (uint)(Math.Pow(2, nSize));
uArrBt1 = new uint[2 * uSize2];
for (int i = 0; i < uSize2; i++)
uArrBt1[i] = hxZero;
nCorrectPrediction = 0;
nTotalReads = 0;
while (!sr2.EndOfStream)
{
String[] strLineRead = sr2.ReadLine().Split(',');
uint uBRAddress = Convert.ToUInt32(strLineRead[0], 16);
uint bBranchTaken = Convert.ToUInt32(strLineRead[2]);
>>>>> In the line below is where I think lies the problem but not sure how to correct it.
uMask1 = uBRAddress & (0xffffffff >> 32 - nSize);
int _mask = Convert.ToInt32(uMask1);
nTaken = Convert.ToInt32(uArrBt1[2 * _mask]);
switch (Convert.ToInt32(uArrBt1[_mask]))
{
case 0:
if (bBranchTaken == 0) // Branch Not Taken
nCorrectPrediction++;
else
uArrBt1[_mask] = 1;
break;
case 1:
if (bBranchTaken == 0)
{
uArrBt1[_mask] = 0;
nCorrectPrediction++;
}
else
uArrBt1[_mask] = 3;
break;
case 2:
if (bBranchTaken == 0)
{
uArrBt1[_mask] = 3;
nCorrectPrediction++;
}
else
uArrBt1[_mask] = 0;
break;
case 3:
if (bBranchTaken == 0)
uArrBt1[_mask] = 2;
else
nCorrectPrediction++;
break;
}
nTotalReads++;
}
sr2.Close();
double percentage = ((double)nCorrectPrediction / (double)nTotalReads) * 100;
sw.WriteLine(nTotalReads + "\t" + nCorrectPrediction + "\t\t" + Math.Round(percentage, 2) + "%");
System.Console.WriteLine(nTotalReads + "\t" + nCorrectPrediction + "\t\t" + Math.Round(percentage, 2) + "%");
}
}
Here is the output:
Two-Bit Predictor Results Ver. 1
-------------------------
Total Correct
Reads Prediction Percentage
1792 997 55.64%
1792 997 55.64%
1792 1520 84.82%
1792 1522 84.93%
1792 1521 84.88%
1792 1639 91.46%
1792 1651 92.13%
1792 1649 92.02%
1792 1649 92.02%
1792 1648 91.96%
1792 1646 91.85%
1792 1646 91.85%
1792 1646 91.85%
1792 1646 91.85%
1792 1646 91.85%

Resources