Generate Text File Upon Button Action and Save to local Drive using Acumatica - acumatica

I am trying to generate a text file on an Action Button, with contents of a dataview created in a Graph Class and Save the file in my Local Drive. But i am unable to do it.
Please help me with the file generation...Thanks
I AM USING Acumatica Version 2019R2 (v 19.203.0042)
MY CODE GOES HERE...
public PXSelect<MayBankGIRO> Document; //this is my dataview
public PXAction<MayBankGiroFilter> createTextFile;
[PXUIField(DisplayName = "Create Text File")]
[PXButton()]
public virtual IEnumerable CreateTextFile(PXAdapter adapter)
{
string filepath = "C:\\Subhashish Dawn";
System.IO.StreamWriter sw = new System.IO.StreamWriter(filepath);
MayBankGIRO giroObject = this.Document.Current;
List<object> myListObject = new List<object> { };
FixedLengthFile flatFile = new FixedLengthFile();
foreach (MayBankGIRO dacRecord in this.Document.Select())
{
if (giroObject.ReordType == "00")
{
myListObject.Add(dacRecord.ReordType + "|" + dacRecord.CorporateID + "|" + dacRecord.ClientBatchID + "|");
}
else
{
myListObject.Add(dacRecord.ReordType + "|" + dacRecord.CorporateID + "|" + dacRecord.ClientBatchID + "|" + dacRecord.Country + "|");
string data = dacRecord.ReordType;
}
this.Document.Update(dacRecord);
}
flatFile.WriteToFile(myListObject, sw);
sw.Flush();
sw.FlushAsync();
string path = "DAWN" + ".txt";
PX.SM.FileInfo file = new PX.SM.FileInfo(Guid.NewGuid(), path, null, System.Text.Encoding.UTF8.GetBytes(**path**)); // what shall i substitite in place of **path**
throw new PXRedirectToFileException(file, true);
}
``````````````````````````````````````````````````````
Can anyone please specify what changes in have to make in the above code.

I utilize UploadFileMaintenance to do this. I'm not sure if this will meet your needs, but here is the core of my code that works for me.
byte[] labelBytes = Encoding.ASCII.GetBytes(myLabelData);
if(labelBytes.Length > 0)
{
string filename = "label-" + Guid.NewGuid().ToString() + ".txt";
PX.SM.FileInfo labelFileInfo = new FileInfo(filename, null, labelBytes);
UploadFileMaintenance upload = PXGraph.CreateInstance<UploadFileMaintenance>();
if (upload.SaveFile(labelFileInfo))
{
string targetUrl = PXRedirectToFileException.BuildUrl(labelFileInfo.UID);
throw new PXRedirectToUrlException(targetUrl, "Print Labels");
}
}

Assuming your Document object is the view you need and its Select() method returns all the data records you need inside your file, this should work:
// We need at least these, listing them for reference
using PX.Data;
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
// This is your dataview
public PXSelect<MayBankGIRO> Document;
// Your action delegate
public PXAction<MayBankGiroFilter> createTextFile;
[PXUIField(DisplayName = "Create Text File")]
[PXButton()]
public virtual IEnumerable CreateTextFile(PXAdapter adapter)
{
// You can use this method to print debug information for your customizations
// Just remove when you are done testing
PXTrace.WriteInformation("Generating records");
// We will build the content as a string list first
List<string> myList = new List<string> { };
// If the value of 'ReordType' can change for each record, you don't need this
MayBankGIRO giroObject = this.Document.Current;
foreach (MayBankGIRO dacRecord in this.Document.Select())
{
// Does 'ReordType' change for each record?
// if it does you may need to use 'dacRecord.ReordType' in this if instead
if (giroObject.ReordType == "00")
{
// This only works if all these members are strings or can be cast to strings
myList.Add(dacRecord.ReordType + "|" + dacRecord.CorporateID + "|" + dacRecord.ClientBatchID + "|");
}
else
{
// This only works if all these members are strings or can be cast to strings
myList.Add(dacRecord.ReordType + "|" + dacRecord.CorporateID + "|" + dacRecord.ClientBatchID + "|" + dacRecord.Country + "|");
}
}
PXTrace.WriteInformation("Generating file");
// Set the name
string filename = "DAWN" + ".txt";
// Use our download method
Download(myList, filename);
}
// We can define a static method to be able to reuse this later for other DACs
public static void Download(List<string> lines, string name)
{
var bytes = default(byte[]);
// Write all lines to stream
using (MemoryStream stream = new MemoryStream())
{
StreamWriter sw = new StreamWriter(stream);
foreach (string line in lines)
{
sw.WriteLine(line);
}
sw.Close();
stream.Position = 0;
bytes = stream.ToArray();
};
// Save content to file object
PX.SM.FileInfo textDoc = new PX.SM.FileInfo(name, null, bytes);
if (textDoc != null)
{
// Trigger file download
throw new PXRedirectToFileException(textDoc, true);
} else {
//TODO: You could raise an exception here also to notify the user
PXTrace.WriteInformation("Could not generate file");
}
}

Hi Markoan your code helped me to create the textfile but the content of the textfile is getting repeated with the first record of the data-view. My reord type have only three values "00" for the first record "01" for the 2nd to n-1 record and "99" for the nth record.
Though I have made few changes to your code
// We need at least these, listing them for reference
using PX.Data;
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
// This is your dataview
public PXSelect<MayBankGIRO> Document;
// Your action delegate
public PXAction<MayBankGiroFilter> createTextFile;
[PXUIField(DisplayName = "Create Text File")]
[PXButton()]
public virtual IEnumerable CreateTextFile(PXAdapter adapter)
{
// You can use this method to print debug information for your customizations
// Just remove when you are done testing
PXTrace.WriteInformation("Generating records");
// We will build the content as a string list first
List<string> myList = new List<string> { };
// If the value of 'ReordType' can change for each record, you don't need this
MayBankGIRO giroObject = this.Document.Current;
foreach (MayBankGIRO dacRecord in this.Document.Select())
{
// Does 'ReordType' change for each record?
// if it does you may need to use 'dacRecord.ReordType' in this if instead
myList.Add(dacRecord.ReordType + "|" + dacRecord.CustomerReferenceNumber + "|" + dacRecord.ClientBatchID + "|" + dacRecord.Country + "|");
}
PXTrace.WriteInformation("Generating file");
// Set the name
string filename = "DAWN" + ".txt";
// Use our download method
Download(myList, filename);
}
// We can define a static method to be able to reuse this later for other DACs
public static void Download(List<string> lines, string name)
{
var bytes = default(byte[]);
// Write all lines to stream
using (MemoryStream stream = new MemoryStream())
{
StreamWriter sw = new StreamWriter(stream);
foreach (string line in lines)
{
sw.WriteLine(line);
}
// sw.Close(); this was showing some error
stream.Position = 0; // "Cannot reach a closed stream" hence i added it in the next line
bytes = stream.ToArray();
sw.Close();
};
// Save content to file object
PX.SM.FileInfo textDoc = new PX.SM.FileInfo(name, null, bytes);
if (textDoc != null)
{
// Trigger file download
throw new PXRedirectToFileException(textDoc, true);
} else {
//TODO: You could raise an exception here also to notify the user
PXTrace.WriteInformation("Could not generate file");
}
}

Related

Azure Cognitive Search, how to iterate over facets to bypass 100K limit

I understand the limitation of the API with the 100K limit, and MS's site says as a work around "you can work around this limitation by adding code to iterate over, and filter on, a facet with less that 100K documents per facet value."
I'm using the "Back up and restore an Azure Cognitive Search index" sample solution provided by MS. (https://github.com/Azure-Samples/azure-search-dotnet-samples)
But can some when tell me where or how I implement this "iterate loop" on a facet" The facetable field I'm trying to use is "tributekey" but I don't know where to place the code in the below. Any help would be greatly appreciated.
// This is a prototype tool that allows for extraction of data from a search index
// Since this tool is still under development, it should not be used for production usage
using Azure;
using Azure.Search.Documents;
using Azure.Search.Documents.Indexes;
using Azure.Search.Documents.Models;
using Microsoft.Extensions.Configuration;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
namespace AzureSearchBackupRestore
{
class Program
{
private static string SourceSearchServiceName;
private static string SourceAdminKey;
private static string SourceIndexName;
private static string TargetSearchServiceName;
private static string TargetAdminKey;
private static string TargetIndexName;
private static string BackupDirectory;
private static SearchIndexClient SourceIndexClient;
private static SearchClient SourceSearchClient;
private static SearchIndexClient TargetIndexClient;
private static SearchClient TargetSearchClient;
private static int MaxBatchSize = 500; // JSON files will contain this many documents / file and can be up to 1000
private static int ParallelizedJobs = 10; // Output content in parallel jobs
static void Main(string[] args)
{
//Get source and target search service info and index names from appsettings.json file
//Set up source and target search service clients
ConfigurationSetup();
//Backup the source index
Console.WriteLine("\nSTART INDEX BACKUP");
BackupIndexAndDocuments();
//Recreate and import content to target index
//Console.WriteLine("\nSTART INDEX RESTORE");
//DeleteIndex();
//CreateTargetIndex();
//ImportFromJSON();
//Console.WriteLine("\r\n Waiting 10 seconds for target to index content...");
//Console.WriteLine(" NOTE: For really large indexes it may take longer to index all content.\r\n");
//Thread.Sleep(10000);
//
//// Validate all content is in target index
//int sourceCount = GetCurrentDocCount(SourceSearchClient);
//int targetCount = GetCurrentDocCount(TargetSearchClient);
//Console.WriteLine("\nSAFEGUARD CHECK: Source and target index counts should match");
//Console.WriteLine(" Source index contains {0} docs", sourceCount);
//Console.WriteLine(" Target index contains {0} docs\r\n", targetCount);
//
//Console.WriteLine("Press any key to continue...");
//Console.ReadLine();
}
static void ConfigurationSetup()
{
IConfigurationBuilder builder = new ConfigurationBuilder().AddJsonFile("appsettings.json");
IConfigurationRoot configuration = builder.Build();
SourceSearchServiceName = configuration["SourceSearchServiceName"];
SourceAdminKey = configuration["SourceAdminKey"];
SourceIndexName = configuration["SourceIndexName"];
TargetSearchServiceName = configuration["TargetSearchServiceName"];
TargetAdminKey = configuration["TargetAdminKey"];
TargetIndexName = configuration["TargetIndexName"];
BackupDirectory = configuration["BackupDirectory"];
Console.WriteLine("CONFIGURATION:");
Console.WriteLine("\n Source service and index {0}, {1}", SourceSearchServiceName, SourceIndexName);
Console.WriteLine("\n Target service and index: {0}, {1}", TargetSearchServiceName, TargetIndexName);
Console.WriteLine("\n Backup directory: " + BackupDirectory);
SourceIndexClient = new SearchIndexClient(new Uri("https://" + SourceSearchServiceName + ".search.windows.net"), new AzureKeyCredential(SourceAdminKey));
SourceSearchClient = SourceIndexClient.GetSearchClient(SourceIndexName);
// TargetIndexClient = new SearchIndexClient(new Uri($"https://" + TargetSearchServiceName + ".search.windows.net"), new AzureKeyCredential(TargetAdminKey));
// TargetSearchClient = TargetIndexClient.GetSearchClient(TargetIndexName);
}
static void BackupIndexAndDocuments()
{
// Backup the index schema to the specified backup directory
Console.WriteLine("\n Backing up source index schema to {0}\r\n", BackupDirectory + "\\" + SourceIndexName + ".schema");
File.WriteAllText(BackupDirectory + "\\" + SourceIndexName + ".schema", GetIndexSchema());
// Extract the content to JSON files
int SourceDocCount = GetCurrentDocCount(SourceSearchClient);
WriteIndexDocuments(SourceDocCount); // Output content from index to json files
}
static void WriteIndexDocuments(int CurrentDocCount)
{
// Write document files in batches (per MaxBatchSize) in parallel
string IDFieldName = GetIDFieldName();
int FileCounter = 0;
for (int batch = 0; batch <= (CurrentDocCount / MaxBatchSize); batch += ParallelizedJobs)
{
List<Task> tasks = new List<Task>();
for (int job = 0; job < ParallelizedJobs; job++)
{
FileCounter++;
int fileCounter = FileCounter;
if ((fileCounter - 1) * MaxBatchSize < CurrentDocCount)
{
Console.WriteLine(" Backing up source documents to {0} - (batch size = {1})", BackupDirectory + "\\" + SourceIndexName + fileCounter + ".json", MaxBatchSize);
tasks.Add(Task.Factory.StartNew(() =>
ExportToJSON((fileCounter - 1) * MaxBatchSize, IDFieldName, BackupDirectory + "\\" + SourceIndexName + fileCounter + ".json")
));
}
}
Task.WaitAll(tasks.ToArray()); // Wait for all the stored procs in the group to complete
}
return;
}
static void ExportToJSON(int Skip, string IDFieldName, string FileName)
{
// Extract all the documents from the selected index to JSON files in batches of 500 docs / file
string json = string.Empty;
try
{
SearchOptions options = new SearchOptions()
{
SearchMode = SearchMode.All,
Size = MaxBatchSize,
Skip = Skip,
// ,IncludeTotalCount = true
// ,Filter = Azure.Search.Documents.SearchFilter.Create('%24top=2&%24skip=0&%24orderby=tributeId%20asc')
//,Filter = String.Format("&search=*&%24top=2&%24skip=0&%24orderby=tributeId%20asc")
//,Filter = "%24top=2&%24skip=0&%24orderby=tributeId%20asc"
//,Filter = "tributeKey eq '5'"
};
SearchResults<SearchDocument> response = SourceSearchClient.Search<SearchDocument>("*", options);
foreach (var doc in response.GetResults())
{
json += JsonSerializer.Serialize(doc.Document) + ",";
json = json.Replace("\"Latitude\":", "\"type\": \"Point\", \"coordinates\": [");
json = json.Replace("\"Longitude\":", "");
json = json.Replace(",\"IsEmpty\":false,\"Z\":null,\"M\":null,\"CoordinateSystem\":{\"EpsgId\":4326,\"Id\":\"4326\",\"Name\":\"WGS84\"}", "]");
json += "\r\n";
}
// Output the formatted content to a file
json = json.Substring(0, json.Length - 3); // remove trailing comma
File.WriteAllText(FileName, "{\"value\": [");
File.AppendAllText(FileName, json);
File.AppendAllText(FileName, "]}");
Console.WriteLine(" Total documents: {0}", response.GetResults().Count().ToString());
json = string.Empty;
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
}
static string GetIDFieldName()
{
// Find the id field of this index
string IDFieldName = string.Empty;
try
{
var schema = SourceIndexClient.GetIndex(SourceIndexName);
foreach (var field in schema.Value.Fields)
{
if (field.IsKey == true)
{
IDFieldName = Convert.ToString(field.Name);
break;
}
}
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
return IDFieldName;
}
static string GetIndexSchema()
{
// Extract the schema for this index
// We use REST here because we can take the response as-is
Uri ServiceUri = new Uri("https://" + SourceSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", SourceAdminKey);
string Schema = string.Empty;
try
{
Uri uri = new Uri(ServiceUri, "/indexes/" + SourceIndexName);
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Get, uri);
AzureSearchHelper.EnsureSuccessfulSearchResponse(response);
Schema = response.Content.ReadAsStringAsync().Result.ToString();
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
return Schema;
}
private static bool DeleteIndex()
{
Console.WriteLine("\n Delete target index {0} in {1} search service, if it exists", TargetIndexName, TargetSearchServiceName);
// Delete the index if it exists
try
{
TargetIndexClient.DeleteIndex(TargetIndexName);
}
catch (Exception ex)
{
Console.WriteLine(" Error deleting index: {0}\r\n", ex.Message);
Console.WriteLine(" Did you remember to set your SearchServiceName and SearchServiceApiKey?\r\n");
return false;
}
return true;
}
static void CreateTargetIndex()
{
Console.WriteLine("\n Create target index {0} in {1} search service", TargetIndexName, TargetSearchServiceName);
// Use the schema file to create a copy of this index
// I like using REST here since I can just take the response as-is
string json = File.ReadAllText(BackupDirectory + "\\" + SourceIndexName + ".schema");
// Do some cleaning of this file to change index name, etc
json = "{" + json.Substring(json.IndexOf("\"name\""));
int indexOfIndexName = json.IndexOf("\"", json.IndexOf("name\"") + 5) + 1;
int indexOfEndOfIndexName = json.IndexOf("\"", indexOfIndexName);
json = json.Substring(0, indexOfIndexName) + TargetIndexName + json.Substring(indexOfEndOfIndexName);
Uri ServiceUri = new Uri("https://" + TargetSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", TargetAdminKey);
try
{
Uri uri = new Uri(ServiceUri, "/indexes");
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Post, uri, json);
response.EnsureSuccessStatusCode();
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
}
static int GetCurrentDocCount(SearchClient searchClient)
{
// Get the current doc count of the specified index
try
{
SearchOptions options = new SearchOptions()
{
SearchMode = SearchMode.All,
IncludeTotalCount = true
};
SearchResults<Dictionary<string, object>> response = searchClient.Search<Dictionary<string, object>>("*", options);
return Convert.ToInt32(response.TotalCount);
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
return -1;
}
static void ImportFromJSON()
{
Console.WriteLine("\n Upload index documents from saved JSON files");
// Take JSON file and import this as-is to target index
Uri ServiceUri = new Uri("https://" + TargetSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", TargetAdminKey);
try
{
foreach (string fileName in Directory.GetFiles(BackupDirectory, SourceIndexName + "*.json"))
{
Console.WriteLine(" -Uploading documents from file {0}", fileName);
string json = File.ReadAllText(fileName);
Uri uri = new Uri(ServiceUri, "/indexes/" + TargetIndexName + "/docs/index");
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Post, uri, json);
response.EnsureSuccessStatusCode();
}
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
}
}
}
I tried adding a filter option in the ExportToJSON method but the request fails

azure search work around $skip limit

I'm doing a job to check if all records from my database exists on Azure Search (around 610k). However there's a 100000 limit with the $skip parameter. Is there a way to work around this limit?
You can not facet over more thank 100K documents, however, you can add facets to work around this. For example, let’s say you have a facet called Country and no one facet has more than 100K documents. You can facet over all documents where Country == ‘Canada’, then facet over all documents where Country == ‘USA’, etc…
Just to clarify the other answers: you can't bypass the limit directly but you can use a workaround.
Here's what you can do:
1) Add a unique field to the index. The contents can be a modification timestamp (if it's granular enough to make it unique) or for example a running number. Or alternatively you can use some existing unique field for this.
2) Take the first 100000 results from the index ordered by your unique field
3) Check what is the maximum value (if ordering ascending) in the results for your unique field - so the value of the last entry
4) Take the next 100000 results by ordering based on the same unique field and adding a filter which takes results only where the unique field's value is bigger that the previous maximum. This way the same first 100000 values are not returned but we get the next 100000 values.
5) Continue until you have all results
The downside is that you can't use other custom ordering with the results unless you do the ordering after getting the results.
I use data metadata_storage_last_modified as filter, and the following is my example.
offset skip time
0 --%--> 0
100,000 --%--> 100,000 getLastTime
101,000 --%--> 0 useLastTime
200,000 --%--> 99,000 useLastTime
201,000 --%--> 100,000 useLastTime & getLastTime
202,000 --%--> 0 useLastTime
Because Skip limit is 100k, so we can calculate skip by
AzureSearchSkipLimit = 100k
AzureSearchTopLimit = 1k
skip = offset % (AzureSearchSkipLimit + AzureSearchTopLimit)
If total search count will large than AzureSearchSkipLimit, then apply
orderby = "metadata_storage_last_modified desc"
When skip reach AzureSearchSkipLimit ,then get metadata_storage_last_modified time from end of data. And put metadata_storage_last_modified as next 100k search filer.
filter = metadata_storage_last_modified lt ${metadata_storage_last_modified}
I understand the limitation of the API with the 100K limit, and MS's site says as a work around "you can work around this limitation by adding code to iterate over, and filter on, a facet with less that 100K documents per facet value."
I'm using the "Back up and restore an Azure Cognitive Search index" sample solution provided by MS. (https://github.com/Azure-Samples/azure-search-dotnet-samples)
But can some when tell me where or how I implement this "iterate loop" on a facet" The facetable field I'm trying to use is "tribtekey" but I don't know where to place the code in the below. Any help would be greatly appreciated.
// This is a prototype tool that allows for extraction of data from a search index
// Since this tool is still under development, it should not be used for production usage
using Azure;
using Azure.Search.Documents;
using Azure.Search.Documents.Indexes;
using Azure.Search.Documents.Models;
using Microsoft.Extensions.Configuration;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
namespace AzureSearchBackupRestore
{
class Program
{
private static string SourceSearchServiceName;
private static string SourceAdminKey;
private static string SourceIndexName;
private static string TargetSearchServiceName;
private static string TargetAdminKey;
private static string TargetIndexName;
private static string BackupDirectory;
private static SearchIndexClient SourceIndexClient;
private static SearchClient SourceSearchClient;
private static SearchIndexClient TargetIndexClient;
private static SearchClient TargetSearchClient;
private static int MaxBatchSize = 500; // JSON files will contain this many documents / file and can be up to 1000
private static int ParallelizedJobs = 10; // Output content in parallel jobs
static void Main(string[] args)
{
//Get source and target search service info and index names from appsettings.json file
//Set up source and target search service clients
ConfigurationSetup();
//Backup the source index
Console.WriteLine("\nSTART INDEX BACKUP");
BackupIndexAndDocuments();
//Recreate and import content to target index
//Console.WriteLine("\nSTART INDEX RESTORE");
//DeleteIndex();
//CreateTargetIndex();
//ImportFromJSON();
//Console.WriteLine("\r\n Waiting 10 seconds for target to index content...");
//Console.WriteLine(" NOTE: For really large indexes it may take longer to index all content.\r\n");
//Thread.Sleep(10000);
//
//// Validate all content is in target index
//int sourceCount = GetCurrentDocCount(SourceSearchClient);
//int targetCount = GetCurrentDocCount(TargetSearchClient);
//Console.WriteLine("\nSAFEGUARD CHECK: Source and target index counts should match");
//Console.WriteLine(" Source index contains {0} docs", sourceCount);
//Console.WriteLine(" Target index contains {0} docs\r\n", targetCount);
//
//Console.WriteLine("Press any key to continue...");
//Console.ReadLine();
}
static void ConfigurationSetup()
{
IConfigurationBuilder builder = new ConfigurationBuilder().AddJsonFile("appsettings.json");
IConfigurationRoot configuration = builder.Build();
SourceSearchServiceName = configuration["SourceSearchServiceName"];
SourceAdminKey = configuration["SourceAdminKey"];
SourceIndexName = configuration["SourceIndexName"];
TargetSearchServiceName = configuration["TargetSearchServiceName"];
TargetAdminKey = configuration["TargetAdminKey"];
TargetIndexName = configuration["TargetIndexName"];
BackupDirectory = configuration["BackupDirectory"];
Console.WriteLine("CONFIGURATION:");
Console.WriteLine("\n Source service and index {0}, {1}", SourceSearchServiceName, SourceIndexName);
Console.WriteLine("\n Target service and index: {0}, {1}", TargetSearchServiceName, TargetIndexName);
Console.WriteLine("\n Backup directory: " + BackupDirectory);
SourceIndexClient = new SearchIndexClient(new Uri("https://" + SourceSearchServiceName + ".search.windows.net"), new AzureKeyCredential(SourceAdminKey));
SourceSearchClient = SourceIndexClient.GetSearchClient(SourceIndexName);
// TargetIndexClient = new SearchIndexClient(new Uri($"https://" + TargetSearchServiceName + ".search.windows.net"), new AzureKeyCredential(TargetAdminKey));
// TargetSearchClient = TargetIndexClient.GetSearchClient(TargetIndexName);
}
static void BackupIndexAndDocuments()
{
// Backup the index schema to the specified backup directory
Console.WriteLine("\n Backing up source index schema to {0}\r\n", BackupDirectory + "\\" + SourceIndexName + ".schema");
File.WriteAllText(BackupDirectory + "\\" + SourceIndexName + ".schema", GetIndexSchema());
// Extract the content to JSON files
int SourceDocCount = GetCurrentDocCount(SourceSearchClient);
WriteIndexDocuments(SourceDocCount); // Output content from index to json files
}
static void WriteIndexDocuments(int CurrentDocCount)
{
// Write document files in batches (per MaxBatchSize) in parallel
string IDFieldName = GetIDFieldName();
int FileCounter = 0;
for (int batch = 0; batch <= (CurrentDocCount / MaxBatchSize); batch += ParallelizedJobs)
{
List<Task> tasks = new List<Task>();
for (int job = 0; job < ParallelizedJobs; job++)
{
FileCounter++;
int fileCounter = FileCounter;
if ((fileCounter - 1) * MaxBatchSize < CurrentDocCount)
{
Console.WriteLine(" Backing up source documents to {0} - (batch size = {1})", BackupDirectory + "\\" + SourceIndexName + fileCounter + ".json", MaxBatchSize);
tasks.Add(Task.Factory.StartNew(() =>
ExportToJSON((fileCounter - 1) * MaxBatchSize, IDFieldName, BackupDirectory + "\\" + SourceIndexName + fileCounter + ".json")
));
}
}
Task.WaitAll(tasks.ToArray()); // Wait for all the stored procs in the group to complete
}
return;
}
static void ExportToJSON(int Skip, string IDFieldName, string FileName)
{
// Extract all the documents from the selected index to JSON files in batches of 500 docs / file
string json = string.Empty;
try
{
SearchOptions options = new SearchOptions()
{
SearchMode = SearchMode.All,
Size = MaxBatchSize,
Skip = Skip,
// ,IncludeTotalCount = true
// ,Filter = Azure.Search.Documents.SearchFilter.Create('%24top=2&%24skip=0&%24orderby=tributeId%20asc')
//,Filter = String.Format("&search=*&%24top=2&%24skip=0&%24orderby=tributeId%20asc")
//,Filter = "%24top=2&%24skip=0&%24orderby=tributeId%20asc"
//,Filter = "tributeKey eq '5'"
};
SearchResults<SearchDocument> response = SourceSearchClient.Search<SearchDocument>("*", options);
foreach (var doc in response.GetResults())
{
json += JsonSerializer.Serialize(doc.Document) + ",";
json = json.Replace("\"Latitude\":", "\"type\": \"Point\", \"coordinates\": [");
json = json.Replace("\"Longitude\":", "");
json = json.Replace(",\"IsEmpty\":false,\"Z\":null,\"M\":null,\"CoordinateSystem\":{\"EpsgId\":4326,\"Id\":\"4326\",\"Name\":\"WGS84\"}", "]");
json += "\r\n";
}
// Output the formatted content to a file
json = json.Substring(0, json.Length - 3); // remove trailing comma
File.WriteAllText(FileName, "{\"value\": [");
File.AppendAllText(FileName, json);
File.AppendAllText(FileName, "]}");
Console.WriteLine(" Total documents: {0}", response.GetResults().Count().ToString());
json = string.Empty;
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
}
static string GetIDFieldName()
{
// Find the id field of this index
string IDFieldName = string.Empty;
try
{
var schema = SourceIndexClient.GetIndex(SourceIndexName);
foreach (var field in schema.Value.Fields)
{
if (field.IsKey == true)
{
IDFieldName = Convert.ToString(field.Name);
break;
}
}
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
return IDFieldName;
}
static string GetIndexSchema()
{
// Extract the schema for this index
// We use REST here because we can take the response as-is
Uri ServiceUri = new Uri("https://" + SourceSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", SourceAdminKey);
string Schema = string.Empty;
try
{
Uri uri = new Uri(ServiceUri, "/indexes/" + SourceIndexName);
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Get, uri);
AzureSearchHelper.EnsureSuccessfulSearchResponse(response);
Schema = response.Content.ReadAsStringAsync().Result.ToString();
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message.ToString());
}
return Schema;
}
private static bool DeleteIndex()
{
Console.WriteLine("\n Delete target index {0} in {1} search service, if it exists", TargetIndexName, TargetSearchServiceName);
// Delete the index if it exists
try
{
TargetIndexClient.DeleteIndex(TargetIndexName);
}
catch (Exception ex)
{
Console.WriteLine(" Error deleting index: {0}\r\n", ex.Message);
Console.WriteLine(" Did you remember to set your SearchServiceName and SearchServiceApiKey?\r\n");
return false;
}
return true;
}
static void CreateTargetIndex()
{
Console.WriteLine("\n Create target index {0} in {1} search service", TargetIndexName, TargetSearchServiceName);
// Use the schema file to create a copy of this index
// I like using REST here since I can just take the response as-is
string json = File.ReadAllText(BackupDirectory + "\\" + SourceIndexName + ".schema");
// Do some cleaning of this file to change index name, etc
json = "{" + json.Substring(json.IndexOf("\"name\""));
int indexOfIndexName = json.IndexOf("\"", json.IndexOf("name\"") + 5) + 1;
int indexOfEndOfIndexName = json.IndexOf("\"", indexOfIndexName);
json = json.Substring(0, indexOfIndexName) + TargetIndexName + json.Substring(indexOfEndOfIndexName);
Uri ServiceUri = new Uri("https://" + TargetSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", TargetAdminKey);
try
{
Uri uri = new Uri(ServiceUri, "/indexes");
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Post, uri, json);
response.EnsureSuccessStatusCode();
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
}
static int GetCurrentDocCount(SearchClient searchClient)
{
// Get the current doc count of the specified index
try
{
SearchOptions options = new SearchOptions()
{
SearchMode = SearchMode.All,
IncludeTotalCount = true
};
SearchResults<Dictionary<string, object>> response = searchClient.Search<Dictionary<string, object>>("*", options);
return Convert.ToInt32(response.TotalCount);
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
return -1;
}
static void ImportFromJSON()
{
Console.WriteLine("\n Upload index documents from saved JSON files");
// Take JSON file and import this as-is to target index
Uri ServiceUri = new Uri("https://" + TargetSearchServiceName + ".search.windows.net");
HttpClient HttpClient = new HttpClient();
HttpClient.DefaultRequestHeaders.Add("api-key", TargetAdminKey);
try
{
foreach (string fileName in Directory.GetFiles(BackupDirectory, SourceIndexName + "*.json"))
{
Console.WriteLine(" -Uploading documents from file {0}", fileName);
string json = File.ReadAllText(fileName);
Uri uri = new Uri(ServiceUri, "/indexes/" + TargetIndexName + "/docs/index");
HttpResponseMessage response = AzureSearchHelper.SendSearchRequest(HttpClient, HttpMethod.Post, uri, json);
response.EnsureSuccessStatusCode();
}
}
catch (Exception ex)
{
Console.WriteLine(" Error: {0}", ex.Message.ToString());
}
}
}
}

how to read data from csv file into C# console Application

using System;
namespace jagged_array
{
class Program
{
static void Main(string[] args)
{
string[][] Members = new string[10][]{
new string[]{"amit","amit#gmail.com", "9999999999"},
new string[]{"chandu","chandu#gmail.com","8888888888"},
new string[]{"naveen","naveen#gmail.com", "7777777777"},
new string[]{"ramu","ramu#gmail.com", "6666666666"},
new string[]{"durga","durga#gmail.com", "5555555555"},
new string[]{"sagar","sagar#gmail.com", "4444444444"},
new string[]{"yadav","yadav#gmail.com", "3333333333"},
new string[]{"suraj","suraj#gmail.com", "2222222222"},
new string[]{"niharika","niharika#gmail.com","11111111111"},
new string[]{"anusha","anusha#gmail.com", "0000000000"},
};
for (int i =0; i < Members.Length; i++)
{
System.Console.Write("Name List ({0}):", i + 1);
for (int j = 0; j < Members[i].Length; j++)
{
System.Console.Write(Members[i][j] + "\t");
}
System.Console.WriteLine();
}``
Console.ReadKey();
}
}
}
The above is the code for my C# console program in which i used jagged array and i assigned values manually but now my requirement is 'without assigning manually into array i want the same details to import into my program from an csv file(which is at some location in my disc). So how to do it what functions should i make use , please help me with some example. Thank you.
static void Main()
{
string csv_file_path=#"C:\Users\Administrator\Desktop\test.csv";
DataTable csvData = GetDataTabletFromCSVFile(csv_file_path);
Console.WriteLine("Rows count:" + csvData.Rows.Count);
Console.ReadLine();
}
private static DataTable GetDataTabletFromCSVFile(string csv_file_path)
{
DataTable csvData = new DataTable();
try
{
using(TextFieldParser csvReader = new TextFieldParser(csv_file_path))
{
csvReader.SetDelimiters(new string[] { "," });
csvReader.HasFieldsEnclosedInQuotes = true;
string[] colFields = csvReader.ReadFields();
foreach (string column in colFields)
{
DataColumn datecolumn = new DataColumn(column);
datecolumn.AllowDBNull = true;
csvData.Columns.Add(datecolumn);
}
while (!csvReader.EndOfData)
{
string[] fieldData = csvReader.ReadFields();
//Making empty value as null
for (int i = 0; i < fieldData.Length; i++)
{
if (fieldData[i] == "")
{
fieldData[i] = null;
}
}
csvData.Rows.Add(fieldData);
}
}
}
catch (Exception ex)
{
}
return csvData;
}
Treat the CSV file like an excel workbook and you will find a lot of examples on the web for what you need to do.
ExcelFile ef = new ExcelFile();
// Loads file.
ef.LoadCsv("filename.csv");
// Selects first worksheet.
ExcelWorksheet ws = ef.Worksheets[0];
I won't go into details, but you can read lines text from a file with File.ReadAllLines.
Once you have those lines, you can split them into parts using String.Split (at least this will work if the CSV file contains very simple information as in your example).

C# create configuration file

This code How can I create configuration file if so that i can change connection string easy
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using MySql.Data.MySqlClient;
using System.Web;
using mshtml;
namespace tabcontrolweb
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
string MyConString = "SERVER=192.168.0.78;" +
"DATABASE=webboard;" +
"UID=aimja;" +
"PASSWORD=aimjawork;" +
"charset=utf8;";
MySqlConnection connection = new MySqlConnection(MyConString);
MySqlCommand command = connection.CreateCommand();
MySqlDataReader Reader;
command.CommandText = "SELECT urlwebboard FROM `listweb` WHERE `urlwebboard` IS NOT NULL AND ( `webbordkind` = 'เว็บท้องถิ่น' ) and `nourl`= 'n' order by province, amphore limit 4 ";
connection.Open();
Reader = command.ExecuteReader();
string[] urls = new string[4];
string thisrow = "";
string sumthisrow = "";
while (Reader.Read())
{
thisrow = "";
for (int i = 0; i < Reader.FieldCount; i++)
{
thisrow += Reader.GetValue(i).ToString();
System.IO.File.AppendAllText(#"C:\file.txt", thisrow + " " + Environment.NewLine);
sumthisrow = Reader.GetValue(Reader.FieldCount - 1).ToString();
}
for (int m = 0; m < 4 ; m++)
{
urls[m] = sumthisrow;
MessageBox.Show(urls[m]);
}
webBrowser1.Navigate(new Uri(urls[0]));
webBrowser1.Dock = DockStyle.Fill;
webBrowser2.Navigate(new Uri(urls[1]));
webBrowser2.Dock = DockStyle.Fill;
webBrowser3.Navigate(new Uri(urls[2]));
webBrowser3.Dock = DockStyle.Fill;
webBrowser4.Navigate(new Uri(urls[3]));
webBrowser4.Dock = DockStyle.Fill;
}
connection.Close();
}
private void webBrowser1_DocumentCompleted(object sender, WebBrowserDocumentCompletedEventArgs e)
{
//if (webBrowser1.Document != null)
//{
// IHTMLDocument2 document = webBrowser1.Document.DomDocument as IHTMLDocument2;
// if (document != null)
// {
// IHTMLSelectionObject currentSelection = document.selection;
// IHTMLTxtRange range = currentSelection.createRange() as IHTMLTxtRange;
// if (range != null)
// {
// const String search = "We";
// if (range.findText(search, search.Length, 2))
// {
// range.select();
// }
// }
// }
//}
}
}
}
You can create an XML configuration file looking like this :
<db-config>
<server>192.168.0.78</server>
<database>webboard</database>
<...>...</...>
</db-config>
Then, use XMLTextReader to parse it.
Here is a basic example of how you can use it to parse XML files :
using System;
using System.Xml;
namespace ReadXMLfromFile
{
/// <summary>
/// Summary description for Class1.
/// </summary>
class Class1
{
static void Main(string[] args)
{
XmlTextReader reader = new XmlTextReader ("books.xml");
while (reader.Read())
{
switch (reader.NodeType)
{
case XmlNodeType.Element: // The node is an element.
Console.Write("<" + reader.Name);
Console.WriteLine(">");
break;
case XmlNodeType.Text: //Display the text in each element.
Console.WriteLine (reader.Value);
break;
case XmlNodeType.EndElement: //Display the end of the element.
Console.Write("</" + reader.Name);
Console.WriteLine(">");
break;
}
}
Console.ReadLine();
}
}
}
Hint : Use the ReadTofollowing() to get your values.
Once your XML DB Config Reader class is done, you use it each time you need a new connection, and you'll only need to change your DB Config XML file to change your DB Connections configuration.
Edit : there is an interesting article about Storing database connection settings in .NET here.
Use the default
System.Configuration.ConfigurationManager
that C# supports!
See: http://msdn.microsoft.com/en-us/library/bb397750.aspx

Generating unique random string in SharePoint document library

I'm working on customizing a SharePoint document library called "Quality Documents" so that when new docs are added to the library, a random and unique number is generated and applied to a field named "Document Number". I coded the feature below, but it's not working. Can anyone see what might be the problem? Nothing happens, no errors nothing, the page just works fine, but no Document Number gets generated. Any suggestions?
using System;
using System.Collections.Generic;
using System.Text;
using Microsoft.SharePoint;
using Microsoft.Office.Server;
using Microsoft.Office.Server.UserProfiles;
namespace QualityDocHandler
{
class DocumentHandler : SPItemEventReceiver
{
/// <summary>
/// Generates a random string with the given length
/// </summary>
/// <param name="size">Size of the string</param>
/// <returns>Random string</returns>
private string RandomString(int size)
{
StringBuilder builder = new StringBuilder();
Random random = new Random();
char ch;
for (int i = 0; i < size; i++)
{
ch = Convert.ToChar(Convert.ToInt32(Math.Floor(26 * random.NextDouble() + 65)));
builder.Append(ch);
}
return builder.ToString();
}
private string createDocNum(SPItemEventProperties properties)
{
int newRnd = 0;
do
{
// set static department
string dept = "QUA";
// set date without separators
string dateString = DateTime.Today.ToString("ddMMyyyy");
// get 1st random string
string Rand1 = RandomString(4);
// get 1st random string
string Rand2 = RandomString(4);
// creat full document number
string docNum = dept + "-" + dateString + "-" + Rand1 + "-" + Rand2;
using (SPWeb oWeb = new SPSite(properties.SiteId).OpenWeb(properties.RelativeWebUrl))
{
SPList oList = oWeb.Lists["Quality Documents"];
//create query
SPQuery oQuery = new SPQuery();
//configure the query //
oQuery.Query = "<Where><Eq><FieldRef Name='Document_x0020_Number' /><Value Type='Text'>" + docNum + "</Value></Eq></Where>";
//get the collection of items in the list
SPListItemCollection oItems = oList.GetItems(oQuery);
if (oItems.Count > 0)
{
newRnd = 0;
}
else
{
newRnd = 1;
}
}
return docNum;
}
while (newRnd < 1);
}
public override void ItemAdded(SPItemEventProperties properties)
{
base.ItemAdded(properties);
}
public override void ItemAdding(SPItemEventProperties properties)
{
string documentNum = createDocNum(properties);
using (SPWeb oWeb = new SPSite(properties.SiteId).OpenWeb(properties.RelativeWebUrl))
{
SPListItem listItem = properties.ListItem;
properties.AfterProperties["Document_x0020_Number"] = documentNum;
listItem.Update();
oWeb.Update();
}
base.ItemAdding(properties);
}
public override void ItemUpdated(SPItemEventProperties properties)
{
base.ItemUpdated(properties);
}
public override void ItemUpdating(SPItemEventProperties properties)
{
base.ItemUpdating(properties);
}
}
}
A few things:
You don't need to get a reference to listItem and use listItem.Update(). Just setting the AfterProperties should be enough.
Prevent the same event from firing multiple times by wrapping your ItemAdding method code with:
this.DisableEventFiring();
try
{
// ...
}
finally
{
this.EnableEventFiring();
}
Run SPDisposeCheck over your code. You might have a memory leak on the SPSite object with new SPSite().OpenWeb().
Have a read of Workarounds for ItemAdding/ItemAdded Event Handlers. I've never had to do this but using the display name instead of internal name may fix the problem.
In case of desperation, use ItemAdded() instead. Get a full reference to the original item and update that.
listItem.Update(); probably throws a NullReferenceException, you can see the error message in the SharePoint log (or by attaching to w3wp), but errors from event receivers will not show up to the end user. They just cancel the event.
Besides, you don’t have to call Update on the list item or the web in ItemAdding. And when you’re creating a SPWeb for the current web in an event receiver you could use SPItemEventProperties.OpenWeb() instead. It saves you the "new SPSite()" call, which you actually forget to dispose in the above code. This could lead to problems if you’re having a medium to high load on your site. SPDisposeCheck is a good tool which could be used to find such issues.
I was able to get this working. Here's the finished code:
using System;
using System.Collections.Generic;
using System.Text;
using Microsoft.SharePoint;
using Microsoft.Office.Server;
using Microsoft.Office.Server.UserProfiles;
namespace QualityDocHandler
{
class DocumentHandler : SPItemEventReceiver
{
private readonly Random _rng = new Random();
private const string _chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
private string RandomString(int size)
{
char[] buffer = new char[size];
for (int i = 0; i < size; i++)
{
buffer[i] = _chars[_rng.Next(_chars.Length)];
}
return new string(buffer);
}
private string createDocNum(SPItemEventProperties properties)
{
int newRnd = 0;
do
{
// set static department
string dept = "QUA";
// set date without separators
string dateString = DateTime.Today.ToString("ddMMyyyy");
// get 1st random string
string Rand1 = RandomString(4);
// get 2nd random string
string Rand2 = RandomString(4);
// creat full document number
string docNum = dept + "-" + dateString + "-" + Rand1 + "-" + Rand2;
using (SPWeb oWeb = new SPSite(properties.SiteId).OpenWeb(properties.RelativeWebUrl))
{
SPSiteDataQuery q = new SPSiteDataQuery();
q.Lists = "<Lists BaseType='1'/>";
q.Query = "<Where><Eq><FieldRef Name='Document_x0020_Number' /><Value Type='Text'>" + docNum + "</Value></Eq></Where>";
q.Webs = "<Webs Scope='SiteCollection' />";
q.RowLimit = 1;
System.Data.DataTable spSiteDataQueryResults = oWeb.GetSiteData(q);
if (spSiteDataQueryResults.Rows.Count > 0)
{
newRnd = 0;
}
else
{
newRnd = 1;
}
}
return docNum;
}
while (newRnd < 1);
}
public override void ItemAdded(SPItemEventProperties properties)
{
this.DisableEventFiring();
properties.ListItem["Document Number"] = properties.AfterProperties["Document Number"];
properties.ListItem.SystemUpdate();
this.EnableEventFiring();
}
public override void ItemAdding(SPItemEventProperties properties)
{
string documentNum = createDocNum(properties);
this.DisableEventFiring();
properties.AfterProperties["Document Number"] = documentNum;
this.EnableEventFiring();
}
public override void ItemUpdated(SPItemEventProperties properties)
{
base.ItemUpdated(properties);
}
public override void ItemUpdating(SPItemEventProperties properties)
{
base.ItemUpdating(properties);
}
}
}

Resources