I am a beginner about TPM programming. Because of law, I have to use IBM software TPM. I hope I can provide the function of computer A to computer B.
When I want to connect computers between TSS, The TSS on computer A will shut down with tcsd free() invalid pointer.
I make sure that I have never used free() in my programming. So I suspected that something wrong with TSS.
result = Tspi_Context_Create(&hContext);
if (result != TSS_SUCCESS) {
print_error("Tspi_Context_Create ", result);
print_error_exit(nameOfFunction, err_string(result));
exit(result);
}
//Connect Context
result = Tspi_Context_Connect(hContext,NULL);
if (result != TSS_SUCCESS) {
print_error("Tspi_Context_Connect", result);
print_error_exit(nameOfFunction, err_string(result));
Tspi_Context_FreeMemory(hContext, NULL);
Tspi_Context_Close(hContext);
exit(result);
}
/* Create TPM NV object */
result = Tspi_Context_CreateObject(hContext, TSS_OBJECT_TYPE_NV, 0,&hNVStore);
if (result != TSS_SUCCESS)
{
print_error("Tspi_Context_CreateObject", result);
print_error_exit(nameOfFunction, err_string(result));
Tspi_Context_FreeMemory(hContext, NULL);
Tspi_Context_Close(hContext);
exit(result);
}
Related
Im trying to create a multithreaded namedpipe server as outlined in the msdn sample here https://learn.microsoft.com/en-us/windows/win32/ipc/multithreaded-pipe-server but Im trying to restrict the namedpipe to access by adminstrators group members only.
The example works correctly when no SECURITY_ATTRIBUTES structure is specified but when an SA is specified the first call is successful, but following calls to CreateNamedPipe fail as long as the first pipe is listening or communicating with a client. The create call fails, usually with ACCESS_DENIED, but sometimes with error 1305 The revision level is unknown. When the first pipe closes due to client disconnecting the following call will be successful for the next createnamedpipe call but will in turn fail once that pipe has a client.
I have tried multiple values for the grfInheritance field with no avail. This is my first adventure into explicitly specifying SECURITY so forgive me if I have missed something obvious. Note that in the Function that calls createnamedpipe I create a new SA structure with each create attempt but I have also tried creating one and sharing it outside the create loop.
Relevant code follows:
function that creates the pipe:
HRESULT DapiSettingsSvr::DapiSettingsListener()
{
while(m_run)
{
//find an unused control array member. If they are all used we have max connection so dont create a pipe.
UINT connectId = 0;
for (connectId; connectId < MAX_CONNECTIONS; connectId++)
{
if (m_controlArray[connectId].inuse == false)
break;
}
SECURITY_ATTRIBUTES sa;
HRESULT hr = InitializeSecurity(&sa);
if (FAILED(hr))
{
return hr;
}
if (connectId < MAX_CONNECTIONS)
{
HANDLE hpipe;
hpipe = CreateNamedPipe(
lpszPipename, // pipe name
PIPE_ACCESS_DUPLEX, // read/write access
PIPE_TYPE_BYTE | // byte bipe
PIPE_READMODE_BYTE | // read as bytes
PIPE_WAIT | // do not return until data is recieved
PIPE_REJECT_REMOTE_CLIENTS, // no remote connections
MAX_CONNECTIONS, // max. instances
OUTPUT_BUFFER_SIZE, // output buffer size
INPUT_BUFFER_SIZE, // input buffer size
0, // client time-out
&sa); // default security attribute
// CleanUpSecurityResources();
if (hpipe == INVALID_HANDLE_VALUE)
{
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"CreateNamedPipe failed, GLE=%d.\n", GetLastError());
DapiSettingLogger(logbuffer);
}
else
{
m_controlArray[connectId].inuse = true;
m_controlArray[connectId].pThis = this;
m_controlArray[connectId].connectId = connectId;
m_controlArray[connectId].pipehandle = hpipe;
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"\nPipe Server: Main thread awaiting client connection on %s\n", lpszPipename);
DapiSettingLogger(logbuffer);
// block until a client tries to connect.success is non zero. However a client can connect between the create call and ConnectNamedPipe call.
// In this case ConnectNamedPipe returns zero but GLE = ERROR_PIPE_CONNECTED and a valid connection exists. Check for this case.
fConnected = ConnectNamedPipe(hpipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
if (fConnected)
{
// Create a thread for this client.
m_controlArray[connectId].threadHandle = CreateThread(
NULL, // no security attribute
0, // default stack size
WorkerInstance, // thread proc
(LPVOID)&m_controlArray[connectId], // thread parameter
0, // not suspended
&m_controlArray[connectId].threadId); // returns thread ID
if (m_controlArray[connectId].threadHandle == NULL)
{
swprintf_s(logbuffer, ARRAYSIZE(logbuffer), L"CreateThread failed, GLE=%d.\n", GetLastError());
DapiSettingLogger(logbuffer);
CloseHandle(m_controlArray[connectId].pipehandle);
ZeroMemory(&m_controlArray[connectId], sizeof(WORKER_INFO));
}
}
else
{
// The client could not connect, so close the pipe.
CloseHandle(m_controlArray[connectId].pipehandle);
ZeroMemory(&m_controlArray[connectId], sizeof(WORKER_INFO));
}
} //else valid connection
}
else
{
DapiSettingLogger((LPWSTR) L"Max Connections reached\n");
}
}
return S_OK;
}
Function that creates the SA
HRESULT DapiSettingsSvr::InitializeSecurity(SECURITY_ATTRIBUTES* psa)
{
HRESULT result = S_OK;
DWORD res, error;
EXPLICIT_ACCESS ea[1];
SID_IDENTIFIER_AUTHORITY SIDAuthNT = SECURITY_NT_AUTHORITY;
// Create a SID for the BUILTIN\Administrators group.
if (!AllocateAndInitializeSid(&SIDAuthNT, 2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
&m_pAdminSID))
{
error = GetLastError();
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"AllocateAndInitializeSid Error %u\n", error);
DapiSettingLogger(logbuffer);
result = HRESULT_FROM_WIN32(error);
goto Cleanup;
}
ea[0].grfAccessPermissions = GENERIC_ALL;
ea[0].grfAccessMode = GRANT_ACCESS;
ea[0].grfInheritance = SUB_CONTAINERS_AND_OBJECTS_INHERIT; //changing
ea[0].Trustee.TrusteeForm = TRUSTEE_IS_SID;
ea[0].Trustee.TrusteeType = TRUSTEE_IS_GROUP;
ea[0].Trustee.ptstrName = (LPTSTR)m_pAdminSID;
// Create a new ACL that contains the new ACE.
res = SetEntriesInAcl(1, ea, NULL, &m_pACL);
if (ERROR_SUCCESS != res)
{
swprintf(logbuffer, ARRAYSIZE(logbuffer),L"SetEntriesInAcl Error %u\n", res);
DapiSettingLogger(logbuffer);
result = HRESULT_FROM_WIN32(res);
goto Cleanup;
}
// Initialize a descriptor Use localalloc as it allows memory moving without changing handle value
m_pSD = (PSECURITY_DESCRIPTOR) LocalAlloc(LPTR,
SECURITY_DESCRIPTOR_MIN_LENGTH);
if (NULL == m_pSD)
{
error = GetLastError();
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"LocalAlloc Error %u\n", error);
result = HRESULT_FROM_WIN32(error);
goto Cleanup;
}
if (!InitializeSecurityDescriptor(m_pSD,
SECURITY_DESCRIPTOR_REVISION))
{
error = GetLastError();
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"InitializeSecurityDescriptor Error %u\n", error);
result = HRESULT_FROM_WIN32(error);
goto Cleanup;
}
// Add the ACL to the security descriptor.
if (!SetSecurityDescriptorDacl(m_pSD,
TRUE, // bDaclPresent flag
m_pACL,
FALSE)) // not a default DACL
{
error = GetLastError();
swprintf(logbuffer, ARRAYSIZE(logbuffer), L"SetSecurityDescriptorDacl Error %u\n", error);
result = HRESULT_FROM_WIN32(error);
goto Cleanup;
}
Cleanup:
if (FAILED(result))
{
CleanUpSecurityResources();
}
else
{
// Initialize a security attributes structure.
psa->nLength = sizeof(SECURITY_ATTRIBUTES);
psa->lpSecurityDescriptor = m_pSD;
psa->bInheritHandle = TRUE; /// NOTE I have toyed with this value also
}
return result;
}
Any input on what Im doing incorrectly would be greatly appriciated!!
Thanks!
According to Named Pipe Security and Access Rights,
In addition to the requested access rights, the DACL must allow the
calling thread FILE_CREATE_PIPE_INSTANCE access to the named pipe.
Ok, I figured this out. Im going to mark YangXiaoPo's answer as correct as this pointed me in the right direction but for clarification GENERIC_ALL already includes the right to FILE_CREATE_PIPE_INSTANCE or at least thats what my testing indcates. So setting the EXPICIT_ACCESS structure field to ea[0].grfAccessPermissions = GENERIC_ALL | FILE_CREATE_PIPE_INSTANCE; does not resolve this issue.
The answer lies in the fact that I was running the PipeServer program from within visual studio ( debug ) and thus as a generic user. So the first time through the loop a pipe gets created and the SA with the local administrators group ACE is then applied to the pipe.
So we get a pipe created in the listening state. As soon as a client connects the working thread is created and then the the while(m_run) loop does another iteration and tries to create a new pipe instance. This attempt fails ( actually a looping fail ) because the security attribute with the administrators only ACL is now looked at and the program is not running as an administrator. As soon as the first client disconnects the working thread closes the pipe handle ( effectively destroying the pipe ) and then in the next iteration a pipe is again created.
Running the program as Administrator ( or starting Visual studio as Admin and then debugging ) resolves the issue, though I think a fully correct solution would be to create a second ACE that specified Creator Owner in addition to Admin for the SA DACL.
Thanks!!
I've been tinkering with the MFRC-522 (RC-522) RFID module for a project.
I'm testing authentication using an old key to check if the RFID's key A for a sector (in my case, Sector 2) is different from the original, if it was, then I would continue, if not I would like to "register" the card by changing the key.
I was caught at the beginning on checking different keys authentication, if I tested a working key and then an incorrect key, it works as expected, but if I test with the incorrect key first, it doesn't allow even the correct key to authenticate.
If I ran the code below everything in serial is
PCD_Authenticate() failed NEW(read): Timeout in communication.
PCD_Authenticate() failed OLD(read): Timeout in communication.
repeatedly but if I flipped old() and neww() I get
OLD WORKS
PCD_Authenticate() failed NEW(read): Timeout in communication.
Why does it work as such?
#include <SPI.h>
#include <MFRC522.h>
#define RST_PIN 22 // Configurable, see typical pin layout above
#define SS_PIN 21 // Configurable, see typical pin layout above
MFRC522 mfrc522(SS_PIN, RST_PIN); // Create MFRC522 instance
MFRC522::MIFARE_Key old_key = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
MFRC522::MIFARE_Key new_key = {0x23,0x54,0x64,0x3a,0x32,0x66};
void setup() {
Serial.begin(115200); // Initialize serial communications with the PC
while (!Serial); // Do nothing if no serial port is opened (added for Arduinos based on ATMEGA32U4)
SPI.begin(); // Init SPI bus
mfrc522.PCD_Init(); // Init MFRC522
delay(4); // Optional delay. Some board do need more time after init to be ready, see Readme
mfrc522.PCD_DumpVersionToSerial(); // Show details of PCD - MFRC522 Card Reader details
Serial.println(F("Scan PICC to see UID, SAK, type, and data blocks..."));
}
void loop() {
// Reset the loop if no new card present on the sensor/reader. This saves the entire process when idle.
if ( ! mfrc522.PICC_IsNewCardPresent()) {
return;
}
// Select one of the cards
if ( ! mfrc522.PICC_ReadCardSerial()) {
return;
}
neww();
old();
}
void old(){
//authentication of the desired block for access
byte status = mfrc522.PCD_Authenticate(MFRC522::PICC_CMD_MF_AUTH_KEY_A, 15, &old_key, &(mfrc522.uid));
if (status != MFRC522::STATUS_OK) {
Serial.print("PCD_Authenticate() failed OLD(read): ");
Serial.println(mfrc522.GetStatusCodeName((MFRC522::StatusCode)status));
return;
}else {Serial.println("OLD WORKS");}
//delay(1000);
mfrc522.PICC_HaltA();
mfrc522.PCD_StopCrypto1();
}
void neww() {
//authentication of the desired block for access
byte status_new = mfrc522.PCD_Authenticate(MFRC522::PICC_CMD_MF_AUTH_KEY_A, 15, &new_key, &(mfrc522.uid));
if (status_new != MFRC522::STATUS_OK) {
Serial.print("PCD_Authenticate() failed NEW(read): ");
Serial.println(mfrc522.GetStatusCodeName((MFRC522::StatusCode)status_new));
return;
} else {Serial.println("NEW WORKS");}
//delay(1000);
mfrc522.PICC_HaltA();
mfrc522.PCD_StopCrypto1();
}
}
So, after reading the datasheet extra hard I came to the conclusion that the state of the card was not ready for the next read, so I came with a fire-all-guns solution that helped my case, The Serial prints are for debugging so if using the code feel free to comment them out.
bool reselect_Card() {
//-------------------------------------------------------
// Can also be used to see if card still available,
// true means it is false means card isnt there anymore
//-------------------------------------------------------
byte s;
byte req_buff[2];
byte req_buff_size=2;
mfrc522.PCD_StopCrypto1();
s = mfrc522.PICC_HaltA();
Serial.print("Halt Status: ");
Serial.println(mfrc522.GetStatusCodeName((MFRC522::StatusCode)s));
delay(100);
s = mfrc522.PICC_WakeupA(req_buff,&req_buff_size);
Serial.print("Request: ");
Serial.println(mfrc522.GetStatusCodeName((MFRC522::StatusCode)s));
Serial.print("ATQA : ");
Serial.println(dump_byte_array_to_string(req_buff,req_buff_size));
delay(100);
s = mfrc522.PICC_Select( &(mfrc522.uid),0);
Serial.print("Selected : ");
Serial.println(mfrc522.GetStatusCodeName((MFRC522::StatusCode)s));
if( mfrc522.GetStatusCodeName((MFRC522::StatusCode)s) == F("Timeout in communication.") ) { return false;}
return true;
}
I am writing a Windows Filtering Platform Kernel Mode Driver, the goal of the driver is to capture all traffic on a particular layer, and communicate this traffic back down to user-mode so that it can be further analyses. The driver never needs to block any traffic, the classifyOut is always set to FWP_ACTION_CONTINUE.
The following code is used in my Classify function to queue up the packets that are received.
classifyOut->actionType = FWP_ACTION_CONTINUE;
do
{
if ((classifyOut->rights & FWPS_RIGHT_ACTION_WRITE) == 0)
{
break;
}
if (layerData != NULL)
{
PNET_BUFFER_LIST netBufferList = (PNET_BUFFER_LIST) layerData;
PNET_BUFFER netBuffer = NET_BUFFER_LIST_FIRST_NB(netBufferList);
if (packetQueueSize >= 2048)
{
ExInterlockedRemoveHeadList(&packetQueue, &packetQueueLock);
packetQueueSize--;
}
ULONG netBufferSize = NET_BUFFER_DATA_LENGTH(netBuffer);
PACKET_ITEM* allocatedPacket = InitalizePacketItem(
netBuffer,
netBufferSize
);
if (allocatedPacket == NULL)
{
classifyOut->actionType = FWP_ACTION_BLOCK;
classifyOut->rights &= ~FWPS_RIGHT_ACTION_WRITE;
break;
}
ExInterlockedInsertTailList(
&packetQueue,
&allocatedPacket->listEntry,
&packetQueueLock
);
allocatedPacket = NULL;
packetQueueSize++;
}
} while (FALSE);
The PACKET_ITEM struct is defined as the following
typedef struct _PACKET_ITEM {
LIST_ENTRY listEntry;
PVOID data;
ULONG dataLen;
} PACKET_ITEM;
I am using the inverted call model to communicate this packet data from kernel mode to user mode. The following code is used in the kernel driver once it detects the correct IOCTL has been sent.
status = WdfRequestRetrieveOutputBuffer(request, 0, &buffer, &bufferSize);
if (!NT_SUCCESS(status))
{
break;
}
PLIST_ENTRY listEntry = ExInterlockedRemoveHeadList(&packetQueue, &packetQueueLock);
if (listEntry == NULL)
{
break;
}
PACKET_ITEM* packetItem = CONTAINING_RECORD(
listEntry,
struct _PACKET_ITEM,
listEntry
);
RtlCopyMemory(
buffer,
packetItem->data,
packetItem->dataLen);
status = STATUS_SUCCESS;
WdfRequestCompleteWithInformation(
request,
status,
packetItem->dataLen
);
FreePacketItem(packetItem);
This code seems to slow the network down greatly after a short while, causing timeouts when trying to load websites in a web browser, for example.
I assume this is being caused by the spinlocks and the sheer volume of packets being transferred across the network that are being captured by this driver.
My questions are the following
Is it likely the spinlock is definitely causing my problems here? If so
Is it possible to set the classifyOut->actionType immediately and return this value before allocating any memory to copy the data into my queue. I assume this would prevent the slow down from happening?
What else should I be doing differently to prevent this?
If not,
What is causing the issue with the slow down here?
I am working on two completely separate applications that will need to use System V shared memory as a means of IPC. After reading The Linux man page, it seems like I will have to provide both applications with an address hint in order to guarantee that they point to the exact same memory location. I will be able to (almost) guarantee that they both have the same shmid, as described below. So I was wondering, 1. If NULL is passed as the second param and 0 as the third, will I be able to be 100% certain that the system will point both applications to the same starting location in memory if given the same shmid? And 2. If not, is there is a way to, at runtime, figure out what addresses the system is using for shared memory to make sure both applications use an address hint that won't cause the shmat to fail?
Example of code being used:
typedef struct
{
uint8_t dataBuffer[SHARED_MEM_BUFFER_SIZE]; //8 byte char array
} SharedData;
typedef struct
{
int32_t dataIndex;
SharedData data;
} SharedDataStructure;
bool initialize()
{
//Parse JSON file for key gen file path and char
auto keyGenFilePath = ...//Parsed file path
auto keyGenChar = ...//Parsed char
//Both applications will be reading the exact same json file, to ensure
//they both receive the same key.
key_t sharedMemKey = ftok(keyGenFilePath.c_str(), keyGenChar[0]);
if (sharedMemKey == -1)
{
//Log error
return false;
}
//m_shMemId is an int, m_params is a std::vector<SharedDataStructure>
m_shMemId = shmget(sharedMemKey, m_params.size() * sizeof(SharedData), IPC_CREAT | 0666);
if (m_shMemId == -1)
{
//Log error
return false;
}
//m_attachedSharedMem is a SharedData pointer
m_attachedSharedMem = (SharedData *)shmat(m_shMemId, NULL, 0);
if (m_attachedSharedMem == (void *)-1)
{
//Log error
return false;
}
//Zero out shared memory
return true;
}
Also, please note that both applications will initialize their shared memory this way (Only one will zero the memory). This is also going on a very barebones system, so these two applications WILL be the only two applications using shared memory outside of the OS. Also, using POSIX shared memory is not an option, not because of system limitations, but due to other factors.
I apologize for not being able to provide a copy-paste compilable example, the application(s) need to be highly configurable to avoid having the change code in the future.
I am currently in the process of writing an application using OPAL that makes H323 calls. I have looked online for a number of working examples and have managed to put something together that resembles what it should look like, at present I am able to call an external ip via my application but when I accept the call it craps out and dies.
Leaving me with:
0:12.949 SetUpCall:8752 as
sert.cxx(259) Assertion fail: Transport not terminated when reattaching thread
, file d:\voip\software\opal\src\opal\transports.cxx, line 1021
PAssertFunc(0xde3f88, 0xffffffffd228226f, 0, 0) + 0x82
PAssertFunc(0x10d6e5f8, 0x3fd, 0, 0x10d6eb98) + 0x15b
OpalTransport::AttachThread(0xde5870, 0xffffffffd22fc76b, 0, 0) + 0x96
H323Connection::SetUpConnection(0xffffffffd22fc713, 0, 0, 0xde6ea8) + 0x196
AsynchCallSetUp(0x10d3572c, 0, 0, 0xdd0108) + 0x7c
PThread1Arg<PSafePtr<OpalConnection,PSafePtrBase> >::Main(0xffffffffd2282faf
From what I have deduced, perhaps incorrectly if that there is a thread lock issue caused (possibly arising from the fact my application is sending my expected H323 call, but it is also throwing in a call from sip:fred straight after, I have no idea why).
My current enpoint is set up as follows:
bool OpalManagerLite::Init()
{
h323EP = new MyH323EndPoint(*this);
SetAudioJitterDelay(20, 1000);
DisableDetectInBandDTMF(true);
if(!InitEndpoint(h323EP))
{
return false;
}
if (h323EP != NULL) {
AddRouteEntry("pc:.* = h323:<da>");
AddRouteEntry("h323:.* = pc:<da>");
AddRouteEntry("pots:.*\\*.*\\*.* = h323:<dn2ip>");
AddRouteEntry("h323:.*\\*.*\\*.* = pots:<dn2ip>");
AddRouteEntry("h323:.* = pots:<da>");
AddRouteEntry("pots:.* = h323:<da>");
}
PString defaultSrcEP = "pc:*";
srcEP = defaultSrcEP;
return true;
}
bool OpalManagerLite::InitEndpoint(MyH323EndPoint * h323EP)
{
h323EP->DisableFastStart(true);
h323EP->DisableH245Tunneling(true);
h323EP->ForceSymmetricTCS(true);
h323EP->SetLocalUserName("Ronnie");
h323EP->SetDefaultLocalPartyName("Ronnie");
PStringArray listeners; // Default listeners opened
OpalListenerTCP *toListen = new OpalListenerTCP(*h323EP,
PIPSocket::GetDefaultIpAny(), NULL, NULL);
if (!h323EP->StartListeners(listeners))
{
return false;
}
return true;
}
My call is as you would expect when making a call e.g. SetUpCall(srcEP, ip, callToken);
Any help would be greatly appreciated as to why this is occurring, I know I must of tripped up somewhere I just cannot see where.