I'm trying migrate code to version 2. But, I'm running into some issues with finding an alternative to getCurrentLoggers in log4j 2.1.1
import org.apache.logging.log4j.LogManager.getCurrentLoggers(); does not exist.
private ArrayList<LoggerName> getLoggerList() {
// Generate a list of all the loggers and levels
ArrayList<String> al = new ArrayList<String>();
HashMap<Object, Object> hm = new HashMap<Object, Object>();
ArrayList<LoggerName> list = new ArrayList<LoggerName>();
// Get RootLogger
Logger rootLogger = LogManager.getRootLogger();
String rootLoggerName = rootLogger.getName();
al.add(rootLoggerName);
hm.put(rootLoggerName, rootLogger);
// All Other Loggers ISSUE HERE
Enumeration e = LogManager.getCurrentLoggers();
while (e.hasMoreElements()) {
Logger t1Logger = (Logger) e.nextElement();
String loggerName = t1Logger.getName();
al.add(loggerName);
hm.put(loggerName, t1Logger);
}
String[] alLoggerStr = ((String[]) al.toArray(new String[0]));
Arrays.sort(alLoggerStr);
for (int i=0; i < alLoggerStr.length; i++) {
Logger logger = (Logger) hm.get(alLoggerStr[i]);
String name = logger.getName();
String level = logger.getLevel().toString();
String parent = GenFuncs.EMPTY_STRING;
if (logger.getParent() != null) {
parent = (logger.getParent().getName();
}
LoggerName logData = new LoggerNameImpl(name, parent, level);
list.add(logData);
}
return list;
}
Found answer from Stack Overflow
File configFile = new File("c:\\my_path\\log4j2.xml");
LoggerContext loggerContext = Configurator.initialize("my_config", null, configFile.toURI());
Configuration configuration = loggerContext.getConfiguration();
Collection<LoggerConfig> loggerConfigs = configuration.getLoggers().values();
Related
I am new to apache spark and am trying to run a custom nearest neighbor algorithm on an RDD that has been partitioned into 2 parts using a custom partitioner. The JavaPairRDD contains the graph details and the random object created on the graph.
According to my logic, I am building subgraphs for each partition, and I am running a custom algorithm on each subgraph. It seems to be working "although not properly". I am not sure if this is the correct way to apply action in each partition. I am adding my code and the results as well. Comments and suggestions are highly appreciated.
// <Partition_Index_Key, Map<Source_vertex, Map<Destination Vertex, Tuple2<Edge_Length, ArrayList of Random Objects>>
JavaPairRDD<Object, Map<Object, Map<Object, Tuple2<Double, ArrayList<RoadObject>>>>> adjVertForSubgraphsRDD = jscontext
.parallelizePairs(adjacentVerticesForSubgraphs)
.partitionBy(new CustomPartitioner(CustomPartitionSize));
//applying foreachPartition action on JavaPairRDD
adjVertForSubgraphsRDD.foreachPartition(
new VoidFunction<Iterator<Tuple2<Object, Map<Object, Map<Object, Tuple2<Double, ArrayList<RoadObject>>>>>>>() {
/**
*
*/
private static final long serialVersionUID = 1L;
#Override
public void call(
Iterator<Tuple2<Object, Map<Object, Map<Object, Tuple2<Double, ArrayList<RoadObject>>>>>> tupleRow)
throws Exception {
int sourceVertex;
int destVertex;
double edgeLength;
int roadObjectId;
boolean roadObjectType;
double distanceFromStart;
CoreGraph subgraph0 = new CoreGraph();
CoreGraph subgraph1 = new CoreGraph();
while (tupleRow.hasNext()) {
Map<Object, Map<Object, Tuple2<Double, ArrayList<RoadObject>>>> newMap = tupleRow.next()
._2();
if ((Integer.parseInt(String.valueOf(tupleRow.next()._1())) == 0)) {
for (Object srcVertex : newMap.keySet()) {
for (Object dstVertex : newMap.get(srcVertex).keySet()) {
if (newMap.get(srcVertex).get(dstVertex)._2() != null) {
sourceVertex = Integer.parseInt(String.valueOf(srcVertex));
destVertex = Integer.parseInt(String.valueOf(dstVertex));
edgeLength = newMap.get(srcVertex).get(dstVertex)._1();
subgraph0.addEdge(sourceVertex, destVertex, edgeLength);
for (int i = 0; i < newMap.get(srcVertex).get(dstVertex)._2()
.size(); i++) {
int currentEdgeId = subgraph0.getEdgeId(sourceVertex, destVertex);
roadObjectId = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getObjectId();
roadObjectType = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getType();
distanceFromStart = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getDistanceFromStartNode();
RoadObject rn0 = new RoadObject();
rn0.setObjId(roadObjectId);
rn0.setType(roadObjectType);
rn0.setDistanceFromStartNode(distanceFromStart);
subgraph0.addObjectOnEdge(currentEdgeId, rn0);
}
} else {
sourceVertex = Integer.parseInt(String.valueOf(srcVertex));
destVertex = Integer.parseInt(String.valueOf(dstVertex));
edgeLength = newMap.get(srcVertex).get(dstVertex)._1();
subgraph0.addEdge(sourceVertex, destVertex, edgeLength);
}
}
}
} else if ((Integer.parseInt(String.valueOf(tupleRow.next()._1())) == 1)) {
for (Object srcVertex : newMap.keySet()) {
for (Object dstVertex : newMap.get(srcVertex).keySet()) {
if (newMap.get(srcVertex).get(dstVertex)._2() != null) {
sourceVertex = Integer.parseInt(String.valueOf(srcVertex));
destVertex = Integer.parseInt(String.valueOf(dstVertex));
edgeLength = newMap.get(srcVertex).get(dstVertex)._1();
subgraph1.addEdge(sourceVertex, destVertex, edgeLength);
for (int i = 0; i < newMap.get(srcVertex).get(dstVertex)._2()
.size(); i++) {
int currentEdgeId = subgraph1.getEdgeId(sourceVertex, destVertex);
roadObjectId = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getObjectId();
roadObjectType = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getType();
distanceFromStart = newMap.get(srcVertex).get(dstVertex)._2().get(i)
.getDistanceFromStartNode();
RoadObject rn1 = new RoadObject();
rn1.setObjId(roadObjectId);
rn1.setType(roadObjectType);
rn1.setDistanceFromStartNode(distanceFromStart);
subgraph1.addObjectOnEdge(currentEdgeId, rn1);
}
} else {
sourceVertex = Integer.parseInt(String.valueOf(srcVertex));
destVertex = Integer.parseInt(String.valueOf(dstVertex));
edgeLength = newMap.get(srcVertex).get(dstVertex)._1();
subgraph1.addEdge(sourceVertex, destVertex, edgeLength);
}
}
}
}
}
// Straight forward nearest neighbor algorithm from each true to false.
ANNNaive ann = new ANNNaive();
System.err.println("-------------------------------");
Map<Integer, Integer> nearestNeighorPairsSubg0 = ann.compute(subgraph0, true);
System.out.println("for subgraph0");
System.out.println(nearestNeighorPairsSubg0);
System.err.println("-------------------------------");
System.err.println("-------------------------------");
Map<Integer, Integer> nearestNeighorPairsSubg1 = ann.compute(subgraph1, true);
System.out.println("for subgraph1");
System.out.println(nearestNeighorPairsSubg1);
System.err.println("-------------------------------");
}
});
Our program displays a tree control showing the metadata structure of the XML file they are using as a datasource. So it displays all elements & attributes in use in the XML file, like this:
Employees
Employee
FirstName
LastName
Orders
Order
OrderId
For the case where the user does not pass us a XSD file, we need to walk the XML file and build up the metadata structure.
The full code for this is at SaxonQuestions.zip, TestBuildTreeWithSchema.java and is also listed below.
The below code works but it has a problem. Suppose under Employee there's an element for SpouseName. This is only populated if the employee is married. What if the sample data file I have is all unmarried employees? Then the below code does not know there's a SpouseName element.
So my question is - how can I read the schema directly, instead of using the below code. If I read the schema then I get every node & attribute including the optional ones. I also get the type. And the schema optionally has a description for each node and I get that too.
Therefore, I need to read the schema itself. How can I do that?
And secondary question - why is the type for an int BigInteger instead of Integer or Long? I see this with Employee/#EmployeeID in Southwind.xml & Southwind.xsd.
TestBuildTreeWithSample.java
import net.sf.saxon.s9api.*;
import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.List;
public class TestBuildTreeWithSchema {
public static void main(String[] args) throws Exception {
XmlDatasource datasource = new XmlDatasource(
new FileInputStream(new File("files", "SouthWind.xml").getCanonicalPath()),
new FileInputStream(new File("files", "SouthWind.xsd").getCanonicalPath()));
// get the root element
XdmNode rootNode = null;
for (XdmNode node : datasource.getXmlRootNode().children()) {
if (node.getNodeKind() == XdmNodeKind.ELEMENT) {
rootNode = node;
break;
}
}
TestBuildTreeWithSchema buildTree = new TestBuildTreeWithSchema(rootNode);
Element root = buildTree.addNode();
System.out.println("Schema:");
printElement("", root);
}
private static void printElement(String indent, Element element) {
System.out.println(indent + "<" + element.name + "> (" + (element.type == null ? "null" : element.type.getSimpleName()) + ")");
indent += " ";
for (Attribute attr : element.attributes)
System.out.println(indent + "=" + attr.name + " (" + (attr.type == null ? "null" : attr.type.getSimpleName()) + ")");
for (Element child : element.children)
printElement(indent, child);
}
protected XdmItem currentNode;
public TestBuildTreeWithSchema(XdmItem currentNode) {
this.currentNode = currentNode;
}
private Element addNode() throws SaxonApiException {
String name = ((XdmNode)currentNode).getNodeName().getLocalName();
// Question:
// Is this the best way to determine that this element has data (as opposed to child elements)?
Boolean elementHasData;
try {
((XdmNode) currentNode).getTypedValue();
elementHasData = true;
} catch (Exception ex) {
elementHasData = false;
}
// Questions:
// Is this the best way to get the type of the element value?
// Why BigInteger instead of Long for int?
Class valueClass = ! elementHasData ? null : ((XdmAtomicValue)((XdmNode)currentNode).getTypedValue()).getValue().getClass();
Element element = new Element(name, valueClass, null);
// add in attributes
XdmSequenceIterator currentSequence;
if ((currentSequence = moveTo(Axis.ATTRIBUTE)) != null) {
do {
name = ((XdmNode) currentNode).getNodeName().getLocalName();
// Questions:
// Is this the best way to get the type of the attribute value?
// Why BigInteger instead of Long for int?
valueClass = ((XdmAtomicValue)((XdmNode)currentNode).getTypedValue()).getValue().getClass();
Attribute attr = new Attribute(name, valueClass, null);
element.attributes.add(attr);
} while (moveToNextInCurrentSequence(currentSequence));
moveTo(Axis.PARENT);
}
// add in children elements
if ((currentSequence = moveTo(Axis.CHILD)) != null) {
do {
Element child = addNode();
// if we don't have this, add it
Element existing = element.getChildByName(child.name);
if (existing == null)
element.children.add(child);
else
// add in any children this does not have
existing.addNewItems (child);
} while (moveToNextInCurrentSequence(currentSequence));
moveTo(Axis.PARENT);
}
return element;
}
// moves to element or attribute
private XdmSequenceIterator moveTo(Axis axis) {
XdmSequenceIterator en = ((XdmNode) currentNode).axisIterator(axis);
boolean gotIt = false;
while (en.hasNext()) {
currentNode = en.next();
if (((XdmNode) currentNode).getNodeKind() == XdmNodeKind.ELEMENT || ((XdmNode) currentNode).getNodeKind() == XdmNodeKind.ATTRIBUTE) {
gotIt = true;
break;
}
}
if (gotIt) {
if (axis == Axis.ATTRIBUTE || axis == Axis.CHILD || axis == Axis.NAMESPACE)
return en;
return null;
}
return null;
}
// moves to next element or attribute
private Boolean moveToNextInCurrentSequence(XdmSequenceIterator currentSequence)
{
if (currentSequence == null)
return false;
while (currentSequence.hasNext())
{
currentNode = currentSequence.next();
if (((XdmNode)currentNode).getNodeKind() == XdmNodeKind.ELEMENT || ((XdmNode)currentNode).getNodeKind() == XdmNodeKind.ATTRIBUTE)
return true;
}
return false;
}
static class Node {
String name;
Class type;
String description;
public Node(String name, Class type, String description) {
this.name = name;
this.type = type;
this.description = description;
}
}
static class Element extends Node {
List<Element> children;
List<Attribute> attributes;
public Element(String name, Class type, String description) {
super(name, type, description);
children = new ArrayList<>();
attributes = new ArrayList<>();
}
public Element getChildByName(String name) {
for (Element child : children) {
if (child.name.equals(name))
return child;
}
return null;
}
public void addNewItems(Element child) {
for (Attribute attrAdd : child.attributes) {
boolean haveIt = false;
for (Attribute attrExist : attributes)
if (attrExist.name.equals(attrAdd.name)) {
haveIt = true;
break;
}
if (!haveIt)
attributes.add(attrAdd);
}
for (Element elemAdd : child.children) {
Element exist = null;
for (Element elemExist : children)
if (elemExist.name.equals(elemAdd.name)) {
exist = elemExist;
break;
}
if (exist == null)
children.add(elemAdd);
else
exist.addNewItems(elemAdd);
}
}
}
static class Attribute extends Node {
public Attribute(String name, Class type, String description) {
super(name, type, description);
}
}
}
XmlDatasource.java
import com.saxonica.config.EnterpriseConfiguration;
import com.saxonica.ee.s9api.SchemaValidatorImpl;
import net.sf.saxon.Configuration;
import net.sf.saxon.lib.FeatureKeys;
import net.sf.saxon.s9api.*;
import net.sf.saxon.type.SchemaException;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.XMLReaderFactory;
import javax.xml.transform.Source;
import javax.xml.transform.sax.SAXSource;
import javax.xml.transform.stream.StreamSource;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
public class XmlDatasource {
/** the DOM all searches are against */
private XdmNode xmlRootNode;
private XPathCompiler xPathCompiler;
/** key == the prefix; value == the uri mapped to that prefix */
private HashMap<String, String> prefixToUriMap = new HashMap<>();
/** key == the uri mapped to that prefix; value == the prefix */
private HashMap<String, String> uriToPrefixMap = new HashMap<>();
public XmlDatasource (InputStream xmlData, InputStream schemaFile) throws SAXException, SchemaException, SaxonApiException, IOException {
boolean haveSchema = schemaFile != null;
// call this before any instantiation of Saxon classes.
Configuration config = createEnterpriseConfiguration();
if (haveSchema) {
Source schemaSource = new StreamSource(schemaFile);
config.addSchemaSource(schemaSource);
}
Processor processor = new Processor(config);
DocumentBuilder doc_builder = processor.newDocumentBuilder();
XMLReader reader = createXMLReader();
InputSource xmlSource = new InputSource(xmlData);
SAXSource saxSource = new SAXSource(reader, xmlSource);
if (haveSchema) {
SchemaValidator validator = new SchemaValidatorImpl(processor);
doc_builder.setSchemaValidator(validator);
}
xmlRootNode = doc_builder.build(saxSource);
xPathCompiler = processor.newXPathCompiler();
if (haveSchema)
xPathCompiler.setSchemaAware(true);
declareNameSpaces();
}
public XdmNode getXmlRootNode() {
return xmlRootNode;
}
public XPathCompiler getxPathCompiler() {
return xPathCompiler;
}
/**
* Create a XMLReader set to disallow XXE aattacks.
* #return a safe XMLReader.
*/
public static XMLReader createXMLReader() throws SAXException {
XMLReader reader = XMLReaderFactory.createXMLReader();
// stop XXE https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet#JAXP_DocumentBuilderFactory.2C_SAXParserFactory_and_DOM4J
reader.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
reader.setFeature("http://xml.org/sax/features/external-general-entities", false);
reader.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
return reader;
}
private void declareNameSpaces() throws SaxonApiException {
// saxon has some of their functions set up with this.
prefixToUriMap.put("saxon", "http://saxon.sf.net");
uriToPrefixMap.put("http://saxon.sf.net", "saxon");
XdmValue list = xPathCompiler.evaluate("//namespace::*", xmlRootNode);
if (list == null || list.size() == 0)
return;
for (int index=0; index<list.size(); index++) {
XdmNode node = (XdmNode) list.itemAt(index);
String prefix = node.getNodeName() == null ? "" : node.getNodeName().getLocalName();
// xml, xsd, & xsi are XML structure ones, not ones used in the XML
if (prefix.equals("xml") || prefix.equals("xsd") || prefix.equals("xsi"))
continue;
// use default prefix if prefix is empty.
if (prefix == null || prefix.isEmpty())
prefix = "def";
// this returns repeats, so if a repeat, go on to next.
if (prefixToUriMap.containsKey(prefix))
continue;
String uri = node.getStringValue();
if (uri != null && !uri.isEmpty()) {
xPathCompiler.declareNamespace(prefix, uri);
prefixToUriMap.put(prefix, uri);
uriToPrefixMap.put(uri, prefix); }
}
}
public static EnterpriseConfiguration createEnterpriseConfiguration()
{
EnterpriseConfiguration configuration = new EnterpriseConfiguration();
configuration.supplyLicenseKey(new BufferedReader(new java.io.StringReader(deobfuscate(key))));
configuration.setConfigurationProperty(FeatureKeys.SUPPRESS_XPATH_WARNINGS, Boolean.TRUE);
return configuration;
}
}
Thanks for the clarifications. I think your real goal is to find a way to parse and process an XML Schema in Java without having to treat the XSD as an ordinary XML document (it is an ordinary XML document, but processing it using the standard facilities is not easy).
On that basis, I think this thread should help: In Java, how do I parse an xml schema (xsd) to learn what's valid at a given element?
Personally, I've never found any library that does a better job than the EMF XSD model. It's complex, but comprehensive.
I am unable to give a string as the first parameter in dictionary Acumatica 2019R2
public void PrintReportInDeviceHub(string reportID, Dictionary<string, string> parametersDictionary, string printerName, int? branchID)
{
Dictionary<string, PXReportRequiredException> reportsToPrint = new Dictionary<string, PXReportRequiredException>();
PrintParameters filter = new PrintParameters();
filter.PrintWithDeviceHub = true;
filter.DefinePrinterManually = true;
filter.PrinterName = printerName;
reportsToPrint = PX.SM.SMPrintJobMaint.AssignPrintJobToPrinter(reportsToPrint, parametersDictionary, filter, new NotificationUtility(this.Base).SearchPrinter, CRNotificationSource.BAccount, reportID, reportID, branchID);
if (reportsToPrint != null)
{
PX.SM.SMPrintJobMaint.CreatePrintJobGroups(reportsToPrint);
}
}
In order to print using device hub I followed the below code and it should work:
Dictionary<string, string> reportParams = new Dictionary<string, string>();
// Add Report Parameters
PrintSettings printerSettings = new PrintSettings()
{
PrintWithDeviceHub = true,
DefinePrinterManually = true,
PrinterID = , //Set the printer id of the printer you want to use
NumberOfCopies = 1
};
PXReportRequiredException reportToPrint = null;
reportToPrint = PXReportRequiredException.CombineReport(reportToPrint,
/* ReportID */, reportParams);
SMPrintJobMaint.CreatePrintJobGroup(printerSettings, deliveryListReport. "Printing Report");
I need to configure a log4net Logger programmatically. Specifically, I need to add a Logger with one appender. However, when I retrieve it, I find that it doesn't have the correct level or appender, and was hoping I was missing some simple configuration ceremony:
var patternLayout = new PatternLayout { ConversionPattern = "%message%newline" };
patternLayout.ActivateOptions();
var hierarchy = (Hierarchy)LogManager.GetRepository();
var testAppender = new RollingFileAppender
{
Name = "test-appender",
AppendToFile = true,
File = "c:\\temp\\test.log",
Layout = patternLayout,
DatePattern = "yyyyMMdd-HHmm",
MaxSizeRollBackups = 50,
MaximumFileSize = "20MB",
RollingStyle = RollingFileAppender.RollingMode.Size,
StaticLogFileName = false,
Encoding = Encoding.UTF8,
PreserveLogFileNameExtension = true,
Threshold = Level.Debug
};
testAppender.ActivateOptions();
var testLogger = hierarchy.LoggerFactory.CreateLogger(hierarchy, "test");
testLogger.Hierarchy = hierarchy;
testLogger.AddAppender(testAppender);
testLogger.Repository.Configured = true;
testLogger.Level = Level.Debug;
testLogger.Additivity = false;
hierarchy.Configured = true;
var testLogger = LogManager.GetLogger("test");
The logger 'testLogger' has a null Level and no appenders. What am I missing?
Instead of calling
var testLogger = hierarchy.LoggerFactory.CreateLogger(hierarchy, "test");
I called
var testLogger = hierarchy.GetLogger("test", hierarchy.LoggerFactory);
And that worked. GetLogger will create the logger if it does not already exist.
I want to create better web service that display collection from NotesView with pagination.
And I have found some performance issue of View.getAllEntries from bigger view.
On MongoDB, I can use findAll() with skip() and limit().
How can I do like that on Domino ?
Use the ViewNavigator class. If you are paging through a large view, it is much faster than view.getAllEntries().
You can acquire an instance of ViewNavigator with view.createViewNav() or a similar method. For best performance, call view.setAutoUpdate(false) before you acquire the navigator.
You can find lots more information by searching the web. This article looks like a good place to start.
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.ibm.commons.util.io.json.JsonJavaObject;
import lotus.domino.NotesException;
import lotus.domino.View;
import lotus.domino.ViewColumn;
import lotus.domino.ViewEntryCollection;
import lotus.domino.ViewNavigator;
import lotus.domino.ViewEntry;
private String consultView(View view, int counter,int position) throws Exception{
String strValue = "";
ViewNavigator nav;
int count = 0;
view.setAutoUpdate(false);
nav = view.createViewNav();
nav.setEntryOptions(ViewNavigator.VN_ENTRYOPT_NOCOUNTDATA);
nav.setBufferMaxEntries(400);
int limit = counter;
int skippedEntries = nav.skip(position);
String number = "";
if (skippedEntries == position) {
Map<Integer, String> columnNameMap = new HashMap<Integer, String>();
for (ViewColumn col : (List<ViewColumn>) view.getColumns()) {
if (col.getColumnValuesIndex() < 65535) {
columnNameMap.put(col.getColumnValuesIndex(), col.getItemName());
}
}
List nodeData = new ArrayList();
ViewEntry entry = nav.getCurrent();
while (entry != null && count <= (limit - 1)) {
if (!entry.isCategory()) {
try {
HashMap<String, Object> entryMap = new HashMap<String, Object>();
count++;
List<Object> columnValues = entry.getColumnValues();
entryMap.put("unid", entry.getUniversalID());
entryMap.put("position", entry.getPosition('.'));
entryMap.put("pos", entry.getPosition('.'));
entryMap.put("userpos", count);
for (Integer index : columnNameMap.keySet())
entryMap.put(columnNameMap.get(index).toString(),columnValues.get(index));
nodeData.add(entryMap);
} catch (Exception e) {
e.printStackTrace();
}
}
ViewEntry tmpentry = nav.getNext(entry);
entry.recycle();
entry = tmpentry;
}
JsonJavaObject returnJSON = new JsonJavaObject();
returnJSON.put("errorcode", 0);
returnJSON.put("errormessage", "");
returnJSON.put("total",getViewCount(view));
returnJSON.put("data", nodeData);
strValue = returnJSON.toString();
}
nav.recycle();
view.recycle();
return strValue;
}
private int getViewCount(View view) throws NotesException {
int count = 0;
ViewEntryCollection entryCollection = view.getAllEntries();
count = entryCollection.getCount();
entryCollection.recycle();
return count;
}
}
This below function get all AllEntries from view and the outputs result in JSON object. Please try the following and let me know if it works.
private String consultView(View view, int counter,int position) throws Exception{
String strValue = "";
ViewNavigator nav;
int count = 0;
view.setAutoUpdate(false);
nav = view.createViewNav();
nav.setEntryOptions(ViewNavigator.VN_ENTRYOPT_NOCOUNTDATA);
nav.setBufferMaxEntries(400);
int limit = counter;
int skippedEntries = nav.skip(position);
String number = "";
int inde = 111;
if (skippedEntries == position) {
Map<Integer, String> columnNameMap = new HashMap<Integer, String>();
for (ViewColumn col : (List<ViewColumn>) view.getColumns()) {
if (col.getColumnValuesIndex() < 65535 && Utilisties.containsVar(viewObject.getRetCols(), col.getItemName())) {
columnNameMap.put(col.getColumnValuesIndex(), col.getItemName());
}
}
List nodeData = new ArrayList();
ViewEntry entry = nav.getCurrent();
while (entry != null && count <= (limit - 1)) {
if (!entry.isCategory()) {
try {
HashMap<String, Object> entryMap = new HashMap<String, Object>();
count++;
List<Object> columnValues = entry.getColumnValues();
entryMap.put("unid", entry.getUniversalID());
entryMap.put("position", entry.getPosition('.'));
entryMap.put("pos", entry.getPosition('.'));
entryMap.put("userpos", count);
for (Integer index : columnNameMap.keySet())
entryMap.put(columnNameMap.get(index).toString(),columnValues.get(index));
nodeData.add(entryMap);
} catch (Exception e) {
e.printStackTrace();
}
}
ViewEntry tmpentry = nav.getNext(entry);
entry.recycle();
entry = tmpentry;
}
JsonJavaObject returnJSON = new JsonJavaObject();
returnJSON.put("errorcode", 0);
returnJSON.put("errormessage", "");
if(viewObject.getGetCount())
returnJSON.put("total",getViewCount(view));
returnJSON.put("data", nodeData);
strValue = returnJSON.toString();
}
nav.recycle();
view.recycle();
return strValue;