I have the below module which is part of a larger program for searching the text within DOCX files. This is the code for when the search button is clicked;
{
this.resultListView.Items.Clear();
try
{
foreach (var filePath in Search(this.txtDirectory.Text, this.txtSearch.Text, this.cBoxUseSubdirectories.Checked, this.cBoxCaseSensitive.Checked, this.rBtnRegex.Checked))
{
var file = new FileInfo(filePath);
this.resultListView.Items.Add(new ListViewItem(new string[] { file.Name, string.Format("{0:0.0}", file.Length / 1024d), file.FullName}));
}
}
catch (Exception ex)
{
MessageBox.Show(this, string.Format("Exception details:\n{0}", ex), string.Format("Exception '{0}' occurred.", ex.GetType()), MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
This is the code for the Search method which is called with the arguments above;
private static IEnumerable<string> Search(string directory, string searchString, bool searchSubdirectories, bool caseSensitive, bool useRegex)
{
var isMatch = useRegex ? new Predicate<string>(x => Regex.IsMatch(x, searchString, caseSensitive ? RegexOptions.None : RegexOptions.IgnoreCase))
: new Predicate<string>(x => x.IndexOf(searchString, caseSensitive ? StringComparison.Ordinal : StringComparison.OrdinalIgnoreCase) >= 0);
foreach (var filePath in Directory.GetFiles(directory, "*.docx", searchSubdirectories ? SearchOption.AllDirectories : SearchOption.TopDirectoryOnly))
{
string docxText;
using (var stream = File.Open(filePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
docxText = new DocxToStringConverter(stream).Convert();
if (isMatch(docxText))
yield return filePath;
}
}
There are other classes which I do not think are relevant here.
the results are populated in a listview via this;
this.resultListView.Items.Add(new ListViewItem(new string[] { file.Name, string.Format("{0:0.0}", file.Length / 1024d), file.FullName}));
}
I want to add an extra column to show the file creation date for the found files. I tried adding this;
var fileCreatedDate = File.GetCreationTime(filePath);
then adding file.fileCreatedDate to the this.resultListView.Items.Add but it didn't work.
Any suggestions on how to pull the file created date out of the existing code?
Return an IEnumerable of Tuple instances.
private static IEnumerable<Tuple<string, DateTime>> Search(string directory, string searchString, bool searchSubdirectories, bool caseSensitive, bool useRegex)
{
var isMatch = useRegex ? new Predicate<string>(x => Regex.IsMatch(x, searchString, caseSensitive ? RegexOptions.None : RegexOptions.IgnoreCase))
: new Predicate<string>(x => x.IndexOf(searchString, caseSensitive ? StringComparison.Ordinal : StringComparison.OrdinalIgnoreCase) >= 0);
foreach (var filePath in Directory.GetFiles(directory, "*.docx", searchSubdirectories ? SearchOption.AllDirectories : SearchOption.TopDirectoryOnly))
{
string docxText;
using (var stream = File.Open(filePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
docxText = new DocxToStringConverter(stream).Convert();
if (isMatch(docxText))
yield return new Tuple<string, DateTime>(filePath, System.IO.File.GetLastWriteTime(filePath));
}
}
Tuple will have two properties: Item1, and Item2. Item1 is the path, and Item2 is the date.
You could also use your own class in place of the Tuple, but the Tuple was simple to code.
Through some trial and error I managed to sort the problem.
I added this to the initial button click code;
var fileCreatedDate = File.GetCreationTime(filePath);
and then this;
this.resultListView.Items.Add(new ListViewItem(new string[] { file.Name, string.Format("{0:0.0}", file.Length / 1024d), file.FullName, fileCreatedDate.ToString()}));
the fileCreated.ToString() sorts it.
Related
I have a big question, you can attach XML through an action, when you import the XML and also save it in the "Files" tab, the question is. if it can be attached?.
Here is an example image:
what is missing is that I automatically attach in the "Files" tab, when I put upload
Here is my code where, I attached the XML
public PXAction<ARInvoice> CargarXML;
[PXUIField(DisplayName = "Carga de código hash")]
[PXButton()]
public virtual void cargarXML()
{
var Result = NewFilePanel.AskExt(true);
if (Result == WebDialogResult.OK)
{
PX.SM.FileInfo fileInfo = PX.Common.PXContext.SessionTyped<PXSessionStatePXData>().FileInfo["CargaSessionKey"];
string result = System.Text.Encoding.UTF8.GetString(fileInfo.BinData);
ARInvoice ari = Base.Document.Current;
xtFECodHashEntry graph2 = PXGraph.CreateInstance<xtFECodHashEntry>();
var pchExt = ari.GetExtension<ARRegisterExt>();
string Valor = "";
XmlDocument xm = new XmlDocument();
xm.LoadXml(result);
XmlNodeList elemList = xm.GetElementsByTagName("ds:DigestValue");
for (int i = 0; i < elemList.Count;)
{
Valor = (elemList[i].InnerXml);
break;
}
PXLongOperation.StartOperation(Base, delegate ()
{
xtFECodHash dac = new xtFECodHash();
dac.RefNbr = ari.RefNbr;
dac.DocType = ari.DocType;
dac.Nrocomprobante = ari.InvoiceNbr;
dac.Hash = Valor;
dac.Tipo = pchExt.UsrTDocSunat;
graph2.xtFECodHashs.Insert(dac);
graph2.Actions.PressSave();
});
}
}
If you have the FileInfo object which it seems like you do I have used something like this:
protected virtual void SaveFile(FileInfo fileInfo, PXCache cache, object row)
{
if (fileInfo == null)
{
return;
}
var fileGraph = PXGraph.CreateInstance<UploadFileMaintenance>();
if (!fileGraph.SaveFile(fileInfo, FileExistsAction.CreateVersion))
{
return;
}
if (!fileInfo.UID.HasValue)
{
return;
}
PXNoteAttribute.SetFileNotes(cache, row, fileInfo.UID.Value);
}
So in your example you could call this method as shown above using the following:
SaveFile(fileInfo, Base.Document.Cache, Base.Document.Current);
I have created a custom NOTEID field in InItemLotserial Table to upload images for each serial number in a custom page. It works fine without any issue.
I have wrote a processing page to update the images for existing serial numbers.
protected void AttachImage(InfoINItemLotSerialImport row, INItemLotSerial ser, InventoryItem itm)
{
string imageurl = row.ImageSourceUrl;
string imageType = row.ImageType;
string sRetVal = string.Empty;
if (itm != null)
{
if (ser != null)
{
try
{
WebClient wc = new WebClient();
byte[] buffer = wc.DownloadData(imageurl);
//MemoryStream ms = new MemoryStream(bytes);
string fileName = Path.GetFileName(imageurl);
fileName = string.Format("Serial Number attribute ({0} {1})\\{2}", itm.InventoryID, ser.LotSerialNbr, fileName);
PX.SM.FileInfo fileinfo = null;
PX.SM.UploadFileMaintenance filegraph = PXGraph.CreateInstance<PX.SM.UploadFileMaintenance>();
fileinfo = new PX.SM.FileInfo(fileName, null, buffer);
fileinfo.IsPublic = true;
if (filegraph.SaveFile(fileinfo))
{
ItemLotSerialMnt oLotSerMnt = PXGraph.CreateInstance<ItemLotSerialMnt>();
oLotSerMnt.lotserailrecdata.Current = ser;
oLotSerMnt.lotserial.Current = ser;
PXNoteAttribute.SetFileNotes(oLotSerMnt.lotserailrecdata.Cache, oLotSerMnt.lotserailrecdata.Current, fileinfo.UID.Value);
PXNoteAttribute.SetNote(oLotSerMnt.lotserailrecdata.Cache, oLotSerMnt.lotserailrecdata.Current, "");
sRetVal = fileinfo.Name;
}
}
catch
{
}
}
}
}
It uploads the data into UploadFile table and entry looks fine. I can access the image using the URL, but the same is not reflecting in file section of serial page.
How acumatica links the file with the screen?
Update 1
#region UsrNoteID
public abstract class usrNoteID : PX.Data.IBqlField
{
}
protected Guid? _UsrNoteID;
[PXNote()]
public virtual Guid? UsrNoteID
{
get
{
return this._UsrNoteID;
}
set
{
this._UsrNoteID = value;
}
}
#endregion
Accumatica support helped me to fix the issue. I have missed firing persist event to save.
if (filegraph.SaveFile(fileinfo))
{
ItemLotSerialMnt oLotSerMnt = PXGraph.CreateInstance<ItemLotSerialMnt>();
oLotSerMnt.lotserailrecdata.Current = ser;
oLotSerMnt.lotserial.Current = ser;
PXNoteAttribute.SetFileNotes(oLotSerMnt.lotserailrecdata.Cache, oLotSerMnt.lotserailrecdata.Current, fileinfo.UID.Value);
PXNoteAttribute.SetNote(oLotSerMnt.lotserailrecdata.Cache, oLotSerMnt.lotserailrecdata.Current, "");
sRetVal = fileinfo.Name;
oLotSerMnt.Persist();
}
oLotSerMnt.Persist();
I am using Rotativa to generate PDF in my "MVC" application. How can I save Rotativa PDF? I need to save the document on a server after all the process is completed.
Code below:
public ActionResult PRVRequestPdf(string refnum,string emid)
{
var prv = functions.getprvrequest(refnum, emid);
return View(prv);
}
public ActionResult PDFPRVRequest()
{
var prv = Session["PRV"] as PRVRequestModel;
byte[] pdfByteArray = Rotativa.WkhtmltopdfDriver.ConvertHtml("Rotativa", "Approver", "PRVRequestPdf");
return new Rotativa.ViewAsPdf("PRVRequestPdf", new { refnum = prv.rheader.request.Referenceno });
}
You can give this a try
var actionResult = new ActionAsPdf("PRVRequestPdf", new { refnum = prv.rheader.request.Referenceno, emid = "Whatever this is" });
var byteArray = actionResult.BuildPdf(ControllerContext);
var fileStream = new FileStream(fullPath, FileMode.Create, FileAccess.Write);
fileStream.Write(byteArray, 0, byteArray.Length);
fileStream.Close();
If that doesn't do the trick then, you can follow the answers here
Just make sure if you do it this way not to have PRVRequestPdf return as a PDF View, rather a normal View like you have above (only mention as managed to fall foul of that myself causing lots of fun).
Another useful answer:
I found the solution here
var actionPDF = new Rotativa.ActionAsPdf("YOUR_ACTION_Method", new { id = ID, lang = strLang } //some route values)
{
//FileName = "TestView.pdf",
PageSize = Size.A4,
PageOrientation = Rotativa.Options.Orientation.Landscape,
PageMargins = { Left = 1, Right = 1 }
};
byte[] applicationPDFData = actionPDF.BuildPdf(ControllerContext);
This is the original thread
You can achieve this with ViewAsPdf.
[HttpGet]
public ActionResult SaveAsPdf(string refnum, string emid)
{
try
{
var prv = functions.getprvrequest(refnum, emid);
ViewAsPdf pdf = new Rotativa.ViewAsPdf("PRVRequestPdf", prv)
{
FileName = "Test.pdf",
CustomSwitches = "--page-offset 0 --footer-center [page] --footer-font-size 8"
};
byte[] pdfData = pdf.BuildFile(ControllerContext);
string fullPath = #"\\server\network\path\pdfs\" + pdf.FileName;
using (var fileStream = new FileStream(fullPath, FileMode.Create, FileAccess.Write))
{
fileStream.Write(pdfData, 0, pdfData.Length);
}
return Json(new { isSuccessful = true }, JsonRequestBehavior.AllowGet);
}
catch (Exception ex)
{
//TODO: ADD LOGGING
return Json(new { isSuccessful = false, error = "Uh oh!" }, JsonRequestBehavior.AllowGet);
//throw;
}
}
You can simply try this:
var fileName = string.Format("my_file_{0}.pdf", id);
var path = Server.MapPath("~/App_Data/" + fileName);
System.IO.File.WriteAllBytes(path, pdfByteArray );
Can OrmLite recognize differences between my POCO and my schema and automatically add (or remove) columns as necessary to force the schema to remain in sync with my POCO?
If this ability doesn't exist, is there way for me to query the db for table schema so that I may manually perform the syncing? I found this, but I'm using the version of OrmLite that installs with ServiceStack and for the life of me, I cannot find a namespace that has the TableInfo classes.
I created an extension method to automatically add missing columns to my tables. Been working great so far. Caveat: the code for getting the column names is SQL Server specific.
namespace System.Data
{
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = "exec sp_columns " + tableName;
var reader = cmd.ExecuteReader();
while (reader.Read())
{
var ordinal = reader.GetOrdinal("COLUMN_NAME");
columns.Add(reader.GetString(ordinal));
}
reader.Close();
}
return columns;
}
public static void AlterTable<T>(this IDbConnection db) where T : new()
{
var model = ModelDefinition<T>.Definition;
// just create the table if it doesn't already exist
if (db.TableExists(model.ModelName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(field.FieldName) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var alterSql = string.Format("ALTER TABLE {0} ADD {1} {2}",
model.ModelName,
field.FieldName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
}
No there is no current support for Auto Migration of RDBMS Schema's vs POCOs in ServiceStack's OrmLite.
There are currently a few threads being discussed in OrmLite's issues that are exploring the different ways to add this.
Here is a slightly modified version of code from cornelha to work with PostgreSQL. Removed this fragment
//private static List<string> GetColumnNames(object poco)
//{
// var list = new List<string>();
// foreach (var prop in poco.GetType().GetProperties())
// {
// list.Add(prop.Name);
// }
// return list;
//}
and used IOrmLiteDialectProvider.NamingStrategy.GetTableName and IOrmLiteDialectProvider.NamingStrategy.GetColumnName methods to convert table and column names from PascalNotation to this_kind_of_notation used by OrmLite when creating tables in PostgreSQL.
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName, IOrmLiteDialectProvider provider)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = getCommandText(tableName, provider);
var tbl = new DataTable();
tbl.Load(cmd.ExecuteReader());
for (int i = 0; i < tbl.Columns.Count; i++)
{
columns.Add(tbl.Columns[i].ColumnName);
}
}
return columns;
}
private static string getCommandText(string tableName, IOrmLiteDialectProvider provider)
{
if (provider == PostgreSqlDialect.Provider)
return string.Format("select * from {0} limit 1", tableName);
else return string.Format("select top 1 * from {0}", tableName);
}
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
var namingStrategy = provider.NamingStrategy;
// just create the table if it doesn't already exist
var tableName = namingStrategy.GetTableName(model.ModelName);
if (db.TableExists(tableName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName, provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(namingStrategy.GetColumnName(field.FieldName)) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var columnName = namingStrategy.GetColumnName(field.FieldName);
var alterSql = string.Format("ALTER TABLE {0} ADD COLUMN {1} {2}",
tableName,
columnName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
I implemented an UpdateTable function. The basic idea is:
Rename current table on database.
Let OrmLite create the new schema.
Copy the relevant data from the old table to the new.
Drop the old table.
Github Repo: https://github.com/peheje/Extending-NServiceKit.OrmLite
Condensed code:
public interface ISqlProvider
{
string RenameTableSql(string currentName, string newName);
string GetColumnNamesSql(string tableName);
string InsertIntoSql(string intoTableName, string fromTableName, string commaSeparatedColumns);
string DropTableSql(string tableName);
}
public static void UpdateTable<T>(IDbConnection connection, ISqlProvider sqlProvider) where T : new()
{
connection.CreateTableIfNotExists<T>();
var model = ModelDefinition<T>.Definition;
string tableName = model.Name;
string tableNameTmp = tableName + "Tmp";
string renameTableSql = sqlProvider.RenameTableSql(tableName, tableNameTmp);
connection.ExecuteNonQuery(renameTableSql);
connection.CreateTable<T>();
string getModelColumnsSql = sqlProvider.GetColumnNamesSql(tableName);
var modelColumns = connection.SqlList<string>(getModelColumnsSql);
string getDbColumnsSql = sqlProvider.GetColumnNamesSql(tableNameTmp);
var dbColumns = connection.SqlList<string>(getDbColumnsSql);
List<string> activeFields = dbColumns.Where(dbColumn => modelColumns.Contains(dbColumn)).ToList();
string activeFieldsCommaSep = ListToCommaSeparatedString(activeFields);
string insertIntoSql = sqlProvider.InsertIntoSql(tableName, tableNameTmp, activeFieldsCommaSep);
connection.ExecuteSql(insertIntoSql);
string dropTableSql = sqlProvider.DropTableSql(tableNameTmp);
//connection.ExecuteSql(dropTableSql); //maybe you want to clean up yourself, else uncomment
}
private static String ListToCommaSeparatedString(List<String> source)
{
var sb = new StringBuilder();
for (int i = 0; i < source.Count; i++)
{
sb.Append(source[i]);
if (i < source.Count - 1)
{
sb.Append(", ");
}
}
return sb.ToString();
}
}
MySql implementation:
public class MySqlProvider : ISqlProvider
{
public string RenameTableSql(string currentName, string newName)
{
return "RENAME TABLE `" + currentName + "` TO `" + newName + "`;";
}
public string GetColumnNamesSql(string tableName)
{
return "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '" + tableName + "';";
}
public string InsertIntoSql(string intoTableName, string fromTableName, string commaSeparatedColumns)
{
return "INSERT INTO `" + intoTableName + "` (" + commaSeparatedColumns + ") SELECT " + commaSeparatedColumns + " FROM `" + fromTableName + "`;";
}
public string DropTableSql(string tableName)
{
return "DROP TABLE `" + tableName + "`;";
}
}
Usage:
using (var db = dbFactory.OpenDbConnection())
{
DbUpdate.UpdateTable<SimpleData>(db, new MySqlProvider());
}
Haven't tested with FKs. Can't handle renaming properties.
I needed to implement something similiar and found the post by Scott very helpful. I decided to make a small change which will make it much more agnostic. Since I only use Sqlite and MSSQL, I made the getCommand method very simple, but can be extended. I used a simple datatable to get the columns. This solution works perfectly for my requirements.
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName,IOrmLiteDialectProvider provider)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = getCommandText(tableName, provider);
var tbl = new DataTable();
tbl.Load(cmd.ExecuteReader());
for (int i = 0; i < tbl.Columns.Count; i++)
{
columns.Add(tbl.Columns[i].ColumnName);
}
}
return columns;
}
private static string getCommandText(string tableName, IOrmLiteDialectProvider provider)
{
if(provider == SqliteDialect.Provider)
return string.Format("select * from {0} limit 1", tableName);
else return string.Format("select top 1 * from {0}", tableName);
}
private static List<string> GetColumnNames(object poco)
{
var list = new List<string>();
foreach (var prop in poco.GetType().GetProperties())
{
list.Add(prop.Name);
}
return list;
}
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
// just create the table if it doesn't already exist
if (db.TableExists(model.ModelName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName,provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(field.FieldName) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var alterSql = string.Format("ALTER TABLE {0} ADD {1} {2}",
model.ModelName,
field.FieldName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
So I took user44 answer, and modified the AlterTable method to make it a bit more efficient.
Instead of looping and running one SQL query per field/column, I merge it into one with some simple text parsing (MySQL commands!).
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
var namingStrategy = provider.NamingStrategy;
// just create the table if it doesn't already exist
var tableName = namingStrategy.GetTableName(model.ModelName);
if (db.TableExists(tableName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName, provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(namingStrategy.GetColumnName(field.FieldName)) == false)
.ToList();
string alterSql = "";
string addSql = "";
// add a new column for each missing field
foreach (var field in missing)
{
var alt = db.GetDialectProvider().ToAddColumnStatement(typeof(T), field); // Should be made more efficient, one query for all changes instead of many
int index = alt.IndexOf("ADD ");
alterSql = alt.Substring(0, index);
addSql += alt.Substring(alt.IndexOf("ADD COLUMN")).Replace(";", "") + ", ";
}
if (addSql.Length > 2)
addSql = addSql.Substring(0, addSql.Length - 2);
string fullSql = alterSql + addSql;
Console.WriteLine(fullSql);
db.ExecuteSql(fullSql);
}
Being very new to SharePoint coding I have been assigned the task to create a prototype code to upload a file and setting the field values for that file that will show up when opening the sharepoint page with the file.
This has to be done from a remote machine and not the Sharepoint server itself so using the .Net objects for Sharepoint is out the question.
I quickly found out how to upload a file through the Sharepoint Web Service Copy.asmx:
void UploadTestFile() {
var file = #"C:\Temp\TestFile.doc";
string destinationUrl = "http://mysharepointserver/Documents/"
+ Path.GetFileName(file);
string[] destinationUrls = { destinationUrl };
var CopyWS = new Copy.Copy();
CopyWS.UseDefaultCredentials = true;
CopyWS.Url = "http://mysharepointserver/_vti_bin/copy.asmx";
CopyResult[] result;
byte[] data = File.ReadAllBytes(file);
FieldInformation mf1 = new FieldInformation {
DisplayName = "title",
InternalName = "title",
Type = FieldType.Text,
Value = "Dummy text"
};
FieldInformation mf2 = new FieldInformation {
DisplayName = "MyTermSet",
InternalName = "MyTermSet",
Type = FieldType.Note,
Value = "Test; Unit;"
};
CopyWS.CopyIntoItems(
"+",
destinationUrls,
new FieldInformation[] { mf1, mf2 },
data,
out result);
}
This code easily uploads any file to the target site but only fills the "title" field with info. The field MyTermSet in which I have added 3 terms allready - Test, Unit and Page - will not update with the values "Test;" and "Unit;".
Being very new to Sharepoint and me not grasping all the basics googling has told me that updating "File", "Computed" or "Lookup" fields does not work with the CopyIntoItems method, and MyTermSet being a Taxonomy field is - if I am correct - a Lookup field.
So how do I get MyTermSet updated with the values "Test;" and "Unit;" ?
I would really prefer If someone has a sample code on this. I have followed several hint-links but I am none the wiser. I have found no sample-code on this at all.
Have anyone made one single method that wraps it all? Or another method that takes in the destinationUrl from the file upload and updates the Term Set/Taxonomy field.
Puzzling together what I have found so far, I am now able to do as I wanted. But I would really like to be able to get the Taxonomy field GUIDs dynamically and NOT having to explicitly set them myself:
void UploadTestFile(string FileName, string DocLib, Dictionary<string, string> Fields = null) {
//Upload the file to the target Sharepoint doc lib
string destinationUrl = DocLib + Path.GetFileName(FileName);
string[] destinationUrls = { destinationUrl };
var CopyWS = new Copy.Copy();
CopyWS.UseDefaultCredentials = true;
CopyWS.Url = new Uri(new Uri(DocLib), "/_vti_bin/copy.asmx").ToString();
CopyResult[] result;
var data = File.ReadAllBytes(FileName);
CopyWS.CopyIntoItems(
"+",
destinationUrls,
new FieldInformation[0],
data,
out result);
if (Fields == null) return; //Done uploading
//Get the ID and metadata information of the fields
var list = new ListsWS.Lists();
list.UseDefaultCredentials = true;
var localpath = new Uri(DocLib).LocalPath.TrimEnd('/');
var site = localpath.Substring(0, localpath.LastIndexOf("/")); //Get the site of the URL
list.Url = new Uri(new Uri(DocLib), site + "/_vti_bin/lists.asmx").ToString(); //Lists on the right site
FieldInformation[] fiOut;
byte[] filedata;
var get = CopyWS.GetItem(destinationUrl, out fiOut, out filedata);
if (data.Length != filedata.Length) throw new Exception("Failed on uploading the document.");
//Dictionary on name and display name
var fieldInfos = fiOut.ToDictionary(x => x.InternalName, x => x);
var fieldInfosByName = new Dictionary<string, FieldInformation>();
foreach (var item in fiOut) {
if (!fieldInfosByName.ContainsKey(item.DisplayName)) {
fieldInfosByName.Add(item.DisplayName, item);
}
}
//Update the document with fielddata - this one can be extended for more than Text and Note fields.
if (!fieldInfos.ContainsKey("ID")) throw new Exception("Could not get the ID of the upload.");
var ID = fieldInfos["ID"].Value; //The ID of the document we just uploaded
XDocument doc = new XDocument(); //Creating XML with updates we need
doc.Add(XElement.Parse("<Batch OnError='Continue' ListVersion='1' ViewName=''/>"));
doc.Element("Batch").Add(XElement.Parse("<Method ID='1' Cmd='Update'/>"));
var methNode = doc.Element("Batch").Element("Method");
//Add ID
var fNode = new XElement("Field");
fNode.SetAttributeValue("Name", "ID");
fNode.Value = ID;
methNode.Add(fNode);
//Loop each field and add each Field
foreach (var field in Fields) {
//Get the field object from name or display name
FieldInformation fi = null;
if (fieldInfos.ContainsKey(field.Key)) {
fi = fieldInfos[field.Key];
}
else if (fieldInfosByName.ContainsKey(field.Key)) {
fi = fieldInfosByName[field.Key];
}
if (fi != null) {
//Fix for taxonomy fields - find the correct field to update
if (fi.Type == FieldType.Invalid && fieldInfos.ContainsKey(field.Key + "TaxHTField0")) {
fi = fieldInfos[field.Key + "TaxHTField0"];
}
else if (fi.Type == FieldType.Invalid && fieldInfosByName.ContainsKey(field.Key + "_0")) {
fi = fieldInfosByName[field.Key + "_0"];
}
fNode = new XElement("Field");
fNode.SetAttributeValue("Name", fi.InternalName);
switch (fi.Type) {
case FieldType.Lookup:
fNode.Value = "-1;#" + field.Value;
break;
case FieldType.Choice:
case FieldType.Text:
fNode.Value = field.Value;
break;
case FieldType.Note: //TermSet's
var termsetval = "";
var terms = field.Value.Split(';');
foreach (var term in terms) {
termsetval += "-1;#" + term + ";";
}
fNode.Value = termsetval.TrimEnd(';');
break;
default:
//..Unhandled type. Implement if needed.
break;
}
methNode.Add(fNode); //Adds the field to the XML
}
else {
//Field does not exist. No use in uploading.
}
}
//Gets the listname (not sure if it is the full path or just the folder name)
var listname = new Uri(DocLib).LocalPath;
var listcol = list.GetListCollection(); //Get the lists of the site
listname = (from XmlNode x
in listcol.ChildNodes
where x.Attributes["DefaultViewUrl"].InnerText.StartsWith(listname, StringComparison.InvariantCultureIgnoreCase)
select x.Attributes["ID"].InnerText).DefaultIfEmpty(listname).First();
//Convert the XML to XmlNode and upload the data
var xmldoc = new XmlDocument();
xmldoc.LoadXml(doc.ToString());
list.UpdateListItems(listname, xmldoc.DocumentElement);
}
Then I call it like this:
var fields = new Dictionary<string, string>();
fields.Add("Test", "Dummy Text");
fields.Add("MrTermSet", "Page|a4ba29c1-3ed5-47e9-b43f-36bc59c0ea5c;Unit|4237dfbe-22a2-4d90-bd08-09f4a8dd0ada");
UploadTestFile(#"C:\Temp\TestFile2.doc", #"http://mysharepointserver/Documents/", fields);
I would however prefer to call it like this:
var fields = new Dictionary<string, string>();
fields.Add("Test", "Dummy Text");
fields.Add("MrTermSet", "Page;Unit");
UploadTestFile(#"C:\Temp\TestFile2.doc", #"http://mysharepointserver/Documents/", fields);