How do I train Bixby to recognize a wild card search term? - bixby

I have an action FindPage.js that finds pages and retrieves them for display as results. I understand how to train it to find pages with utterances like "Read the Twitter Search page" or "Read the Searchable Text page". The training treats "Twitter Search" as SearchTerm and the code below matches SearchTerm to the tag field in the data. But how would I train to understand a command like "Read all pages"? I want the code to carry out a search on the wildcard and bring back all available pages.
// search for informational pages
var console = require('console');
const PAGES = require('./content/pages')
pages = PAGES
console.log('pages are', pages)
exports.function = function findPage (searchTerm) {
console.log('searchTerm is', searchTerm)
var matches = []
pages = PAGES
for (var i = 0; i < pages.length; i++) {
if (searchTerm == pages[i].tag) {
matches.push(pages[i])
}
else
{ console.log('no tag matches')
}
}
console.log('matches are', matches)
return matches
}
Training:
[g:Page] Read the (Twitter Search)[v:SearchTerm] page.

This works although I feel it is somewhat clunky to hardcode a conversion from "all" to the include wildcard string, which is ''.
exports.function = function findPage (searchTerm) {
//console.log('searchTerm is', searchTerm)
if (searchTerm == 'all') {
searchTerm = ''
console.log('searchTerm is all', searchTerm)
}
else
{ console.log('searchTerm is not all', searchTerm)
}
var matches = []
pages = PAGES
matches = pages.filter(function(pages) {
return pages.tag.includes(searchTerm);
});
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes#Examples See example
const str = 'To be, or not to be, that is the question.';
console.log(str.includes('To be')); // true
console.log(str.includes('')) // true

Related

Changing search method in Prestashop

The search function is as shown below matching results after 3 characters and input and they match the product name or description. I'm looking for a change in search function of MegaShop theme in Prestashop 1.7 as follows:
The search should be able to find words parts. In example, If the user writes "hi he", the search should be able to find "high heels". This should work also in other orders, lets say "he hi" (instead of "hi he") would return also "high heels" and every other article that match these word parts in different words.
Inside /root/modules/tptnsearch the file "tptnsearch-ajax-php contains:
<?php
require_once('../../config/config.inc.php');
require_once('../../init.php');
require_once(dirname(__FILE__).'/tptnsearch.php');
$tptnsearch = new TptnSearch();
$result_products = array();
$products = array();
$tptnsearch_key = Tools::getValue('search_key');
$context = Context::getContext();
$count = 0;
$product_link = $context->link;
if (Tools::strlen($tptnsearch_key) >= 3) {
$products = Product::searchByName($context->language->id, $tptnsearch_key);
$total_products = count($products);
if ($total_products) {
for ($i = 0; $i < $total_products; $i++) {
if (($products[$i]['name']) && ($products[$i]['active'])) {
$images = Image::getImages($context->language->id, $products[$i]['id_product']);
$product = new Product($products[$i]['id_product']);
$products[$i]['link'] = $product_link->getProductLink($products[$i]['id_product'], $product->link_rewrite[1], $product->id_category_default, $product->ean13);
$products[$i]['link_rewrite'] = $product->link_rewrite[1];
$products[$i]['id_image'] = $images[0]['id_image'];
$products[$i]['price'] = Tools::displayPrice(Tools::convertPrice($products[$i]['price_tax_incl'], $context->currency), $context->currency);
if ($count < Configuration::get('TPTN_SEARCH_COUNT')) {
$result_products[] = $products[$i];
$count ++;
} else {
break;
}
}
}
}
$context->smarty->assign(array(
'enable_image' => Configuration::get('TPTN_SEARCH_IMAGE'),
'enable_price' => Configuration::get('TPTN_SEARCH_PRICE'),
'enable_name' => Configuration::get('TPTN_SEARCH_NAME'),
'search_alert' => $tptnsearch->no_product,
'link' => $context->link,
'products' => $result_products,
));
$context->smarty->display(dirname(__FILE__).'/views/templates/hook/popupsearch.tpl');
} else {
echo '<div class="wrap_item">'.$tptnsearch->three_character.'</div>';
}
I believe changes must be done within this file.
I think your approach wouldn't give you desirable behavior. Basically, I think you need to create your own search query or override existed one and modify SQL query. Because now there are only LIKE %text% conditions and mean that your text should appear in the exact same way. So it means that that you are able to find "gh he" but not "hi he".
Or you can split your search request by gaps and then search by words doing the checking if all of them are in the request. But also I think it would be better to modify LIKE from %text% to %text to exclude duplicating and search only by words beginnings

Detecting a category page in the isCategoryPage method

I currently have an SCA website that has sub categories that need to display as a category page, and not a Product listing page. (i.e. display the categories, not the products).
Currently, I have modified the isCategoryPage to override the Facets.Views.isCategoryPage such that it does this correctly. However, when doing a search on the site - it breaks that page with a blank page.
I am currently stuck at figuring out how to detect if I am on a search page rather than a category page.
The code is thus:
...
// #Overrides Facets.Views.isCategoryPage
isCategoryPage: function isCategoryPage(translator) {
var currentFacets = translator.getAllFacets();
var categories = translator.getCategoryPath();
if (<--IsSearchPage() === true --->) {
return (_.keys(categories[categories.length-1].categories).length !== 0);
} else {
return (currentFacets.length === 1 &&
currentFacets[0].id === 'category' &&
categories &&
CategoryHelper.showCategoryPage(categories)
);
}
},
...
As you can see the if statement is where I need a bit of help.
if (<--IsSearchPage() === true --->) {
What method, function, code would detect if the page is a search page. Or if the page url has /search in the url. (either would work).
Thank you.
The proper update, after much trial and error:
isCategoryPage: function isCategoryPage(translator) {
var currentFacets = translator.getAllFacets();
var categories = translator.getCategoryPath();
if (categories) {
return (_.keys(categories[categories.length-1].categories).length !== 0);
} else {
return (currentFacets.length === 1 &&
currentFacets[0].id === 'category' &&
categories &&
CategoryHelper.showCategoryPage(categories)
);
}
},

Retrieve entire Word document in task pane app / office.js

Working in Word 2013 (desktop) and office.js, we see some functionality around the user's selection (GetSelectedDataAsync, SetSelectedDataAsync), but nothing that might let you view the entire (OpenXML) document. Am I missing something?
Office.context.document.getFileAsync will let you get the entire document in a choice of 3 formats:
compressed: returns the entire document (.pptx or .docx) in Office Open XML (OOXML) format as a byte array
pdf: returns the entire document in PDF format as a byte array
text: returns only the text of the document as a string. (Word only)
Here's the example taken from MSDN:
var i = 0;
var slices = 0;
function getDocumentAsPDF() {
Office.context.document.getFileAsync("pdf", { sliceSize: 2097152 }, function (result) {
if (result.status == "succeeded") {
// If the getFileAsync call succeeded, then
// result.value will return a valid File Object.
myFile = result.value;
slices = myFile.sliceCount;
document.getElementById("result").innerText = " File size:" + myFile.size + " #Slices: " + slices;
// Iterate over the file slices.
for (i = 0; i < slices; i++) {
var slice = myFile.getSliceAsync(i, function (result) {
if (result.status == "succeeded") {
doSomethingWithChunk(result.value.data);
if (slices == i) // Means it's done traversing...
{
SendFileComplete();
}
}
else
document.getElementById("result").innerText = result.error.message;
});
}
myFile.closeAsync();
}
else
document.getElementById("result2").innerText = result.error.message;
});
}
This is not exactly what you asked for (it is only the body of the document) but it helped me... So I post it here as it is where I landed when I googled my problem.
The documentation here: https://dev.office.com/reference/add-ins/word/body suggests that getOoxml() will get you the body of the document. There is also the property text which will return you the plain text content.
The way this API works is not overly straight forward - however the examples in the online doc really help in getting started.
All the best,

Results pagination in Cassandra (CQL)

I am wondering how can I achieve pagination using Cassandra.
Let us say that I have a blog. The blog lists max 10 posts per page. To access next posts a user must click on pagination menu to access page 2 (posts 11-20), page 3 (posts 21-30), etc.
Using SQL under MySQL, I could do the following:
SELECT * FROM posts LIMIT 20,10;
The first parameter of LIMIT is offset from the beginning of result set and second argument is amount of rows to fetch. The example above returns 10 rows starting from row 20.
How can I achieve the same effect in CQL?
I have found some solutions on Google, but all of them require to have "the last result from previous query". It works for having "next" button to paginate to another 10-results-set, but what if I want to jump from page 1 to page 5?
You don't need to use tokens, if you are using Cassandra 2.0+.
Cassandra 2.0 has auto paging.
Instead of using token function to create paging, it is now a built-in feature.
Now developers can iterate over the entire result set, without having to care that it’s size is larger than the memory. As the client code iterates over the results, some extra rows can be fetched, while old ones are dropped.
Looking at this in Java, note that SELECT statement returns all rows, and the number of rows retrieved is set to 100.
I’ve shown a simple statement here, but the same code can be written with a prepared statement, couple with a bound statement. It is possible to disable automatic paging, if it is not desired. It is also important to test various fetch size settings, since you will want to keep the memorize small enough, but not so small that too many round-trips to the database are taken. Check out this blog post to see how paging works server side.
Statement stmt = new SimpleStatement(
"SELECT * FROM raw_weather_data"
+ " WHERE wsid= '725474:99999'"
+ " AND year = 2005 AND month = 6");
stmt.setFetchSize(24);
ResultSet rs = session.execute(stmt);
Iterator<Row> iter = rs.iterator();
while (!rs.isFullyFetched()) {
rs.fetchMoreResults();
Row row = iter.next();
System.out.println(row);
}
Try using the token function in CQL:
https://docs.datastax.com/en/cql-oss/3.3/cql/cql_using/useToken.html
Another suggestion, if you are using DSE, solr supports deep paging:
https://cwiki.apache.org/confluence/display/solr/Pagination+of+Results
Manual Paging
The driver exposes a PagingState object that represents where we were in the result set when the last page was fetched:
ResultSet resultSet = session.execute("your query");
// iterate the result set...
PagingState pagingState = resultSet.getExecutionInfo().getPagingState();
This object can be serialized to a String or a byte array:
String string = pagingState.toString();
byte[] bytes = pagingState.toBytes();
This serialized form can be saved in some form of persistent storage to be reused later. When that value is retrieved later, we can deserialize it and reinject it in a statement:
PagingState pagingState = PagingState.fromString(string);
Statement st = new SimpleStatement("your query");
st.setPagingState(pagingState);
ResultSet rs = session.execute(st);
Note that the paging state can only be reused with the exact same statement (same query string, same parameters). Also, it is an opaque value that is only meant to be collected, stored an re-used. If you try to modify its contents or reuse it with a different statement, the driver will raise an error.
Src: https://docs.datastax.com/en/cql-oss/3.3/cql/cql_reference/cqlshPaging.html
If you read this doc "Use paging state token to get next result",
https://datastax.github.io/php-driver/features/result_paging/
We can use "paging state token" to paginate at application level.
So PHP logic should look like,
<?php
$limit = 10;
$offset = 20;
$cluster = Cassandra::cluster()->withContactPoints('127.0.0.1')->build();
$session = $cluster->connect("simplex");
$statement = new Cassandra\SimpleStatement("SELECT * FROM paging_entries Limit ".($limit+$offset));
$result = $session->execute($statement, new Cassandra\ExecutionOptions(array('page_size' => $offset)));
// Now $result has all rows till "$offset" which we can skip and jump to next page to fetch "$limit" rows.
while ($result->pagingStateToken()) {
$result = $session->execute($statement, new Cassandra\ExecutionOptions($options = array('page_size' => $limit,'paging_state_token' => $result->pagingStateToken())));
foreach ($result as $row) {
printf("key: '%s' value: %d\n", $row['key'], $row['value']);
}
}
?>
Although the count is available in CQL, so far I have not seen a good solution for the offset part...
So... one solution I have been contemplating was to create sets of pages using a background process.
In some table, I would create the blog page A as a set of references to page 1, 2, ... 10. Then another entry for blog page B pointing to pages 11 to 20, etc.
In other words, I would build my own index with a row key set to the page number. You may still make it somewhat flexible since you can offer the user to choose to see 10, 20 or 30 references per page. For example, when set to 30, you display sets 1, 2, and 3 as page A, sets 4, 5, 6 as page B, etc.)
And if you have a backend process to handle all of that, you can update your lists as new pages are added and old pages are deleted from the blog. The process should be really fast (like 1 min. for 1,000,000 rows if even that slow...) and then you can find the pages to display in your list pretty much instantaneously. (Obviously, if you are to have thousands of users each posting hundreds of pages... that number can grow quickly.)
Where it becomes more complicated is if you wanted to offer a complex WHERE clause. By default a blog shows you a list of all the posts from the newest to the oldest. You could also offer lists of posts with tag Cassandra. Maybe you want to inverse the order, etc. That makes it difficult unless you have some form of advanced way to create your index(es). On my end I have a C-like language which goes and peek and poke to the values in a row to (a) select them and if selected (b) to sort them. In other words, on my end I can already have WHERE clauses as complex as what you'd have in SQL. However, I do not yet break up my lists in pages. Next step I suppose...
Using cassandra-node driver for node js (koa js,marko js) : Pagination
Problem
Due to the absence of skip functionality, we need to work around. Below is the implementation of manual paging for node app in case of anyone can get an idea.
code for simple users list
navigate between next and previous page states
easy to replicate
There are two solutions which i am going to state here but only gave the code for solution 1 below,
Solution 1 : Maintain page states for next and previous records (maintain stack or whatever data structure best fit)
Solution 2 : Loop through all records with limit and save all possible page states in variable and generate pages relatively to their pageStates
Using this commented code in model, we can get all states for pages
//for the next flow
//if (result.nextPage) {
// Retrieve the following pages:
// the same row handler from above will be used
// result.nextPage();
//}
Router Functions
var userModel = require('/models/users');
public.get('/users', users);
public.post('/users', filterUsers);
var users = function* () {//get request
var data = {};
var pageState = { "next": "", "previous": "" };
try {
var userCount = yield userModel.Count();//count all users with basic count query
var currentPage = 1;
var pager = yield generatePaging(currentPage, userCount, pagingMaxLimit);
var userList = yield userModel.List(pager);
data.pageNumber = currentPage;
data.TotalPages = pager.TotalPages;
console.log('--------------what now--------------');
data.pageState_next = userList.pageStates.next;
data.pageState_previous = userList.pageStates.previous;
console.log("next ", data.pageState_next);
console.log("previous ", data.pageState_previous);
data.previousStates = null;
data.isPrevious = false;
if ((userCount / pagingMaxLimit) > 1) {
data.isNext = true;
}
data.userList = userList;
data.totalRecords = userCount;
console.log('--------------------userList--------------------', data.userList);
//pass to html template
}
catch (e) {
console.log("err ", e);
log.info("userList error : ", e);
}
this.body = this.stream('./views/userList.marko', data);
this.type = 'text/html';
};
//post filter and get list
var filterUsers = function* () {
console.log("<------------------Form Post Started----------------->");
var data = {};
var totalCount;
data.isPrevious = true;
data.isNext = true;
var form = this.request.body;
console.log("----------------formdata--------------------", form);
var currentPage = parseInt(form.hdpagenumber);//page number hidden in html
console.log("-------before current page------", currentPage);
var pageState = null;
try {
var statesArray = [];
if (form.hdallpageStates && form.hdallpageStates !== '') {
statesArray = form.hdallpageStates.split(',');
}
console.log(statesArray);
//develop stack to track paging states
if (form.hdpagestateRequest === 'next') {
console.log('--------------------------next---------------------');
currentPage = currentPage + 1;
statesArray.push(form.hdpageState_next);
pageState = form.hdpageState_next;
}
else if (form.hdpagestateRequest === 'previous') {
console.log('--------------------------pre---------------------');
currentPage = currentPage - 1;
var p_st = statesArray.length - 2;//second last index
console.log('this index of array to be removed ', p_st);
pageState = statesArray[p_st];
statesArray.splice(p_st, 1);
//pageState = statesArray.pop();
}
else if (form.hdispaging === 'false') {
currentPage = 1;
pageState = null;
statesArray = [];
}
data.previousStates = statesArray;
console.log("paging true");
totalCount = yield userModel.Count();
var pager = yield generatePaging(form.hdpagenumber, totalCount, pagingMaxLimit);
data.pageNumber = currentPage;
data.TotalPages = pager.TotalPages;
//filter function - not yet constructed
var searchUsers = yield userModel.searchList(pager, pageState);
data.usersList = searchUsers;
if (searchUsers.pageStates) {
data.pageStates = searchUsers.pageStates;
data.next = searchUsers.nextPage;
data.pageState_next = searchUsers.pageStates.next;
data.pageState_previous = searchUsers.pageStates.previous;
//show previous and next buttons accordingly
if (currentPage == 1 && pager.TotalPages > 1) {
data.isPrevious = false;
data.isNext = true;
}
else if (currentPage == 1 && pager.TotalPages <= 1) {
data.isPrevious = false;
data.isNext = false;
}
else if (currentPage >= pager.TotalPages) {
data.isPrevious = true;
data.isNext = false;
}
else {
data.isPrevious = true;
data.isNext = true;
}
}
else {
data.isPrevious = false;
data.isNext = false;
}
console.log("response ", searchUsers);
data.totalRecords = totalCount;
//pass to html template
}
catch (e) {
console.log("err ", e);
log.info("user list error : ", e);
}
console.log("<------------------Form Post Ended----------------->");
this.body = this.stream('./views/userList.marko', data);
this.type = 'text/html';
};
//Paging function
var generatePaging = function* (currentpage, count, pageSizeTemp) {
var paging = new Object();
var pagesize = pageSizeTemp;
var totalPages = 0;
var pageNo = currentpage == null ? null : currentpage;
var skip = pageNo == null ? 0 : parseInt(pageNo - 1) * pagesize;
var pageNumber = pageNo != null ? pageNo : 1;
totalPages = pagesize == null ? 0 : Math.ceil(count / pagesize);
paging.skip = skip;
paging.limit = pagesize;
paging.pageNumber = pageNumber;
paging.TotalPages = totalPages;
return paging;
};
Model Functions
var clientdb = require('../utils/cassandradb')();
var Users = function (options) {
//this.init();
_.assign(this, options);
};
Users.List = function* (limit) {//first time
var myresult; var res = [];
res.pageStates = { "next": "", "previous": "" };
const options = { prepare: true, fetchSize: limit };
console.log('----------did i appeared first?-----------');
yield new Promise(function (resolve, reject) {
clientdb.eachRow('SELECT * FROM users_lookup_history', [], options, function (n, row) {
console.log('----paging----rows');
res.push(row);
}, function (err, result) {
if (err) {
console.log("error ", err);
}
else {
res.pageStates.next = result.pageState;
res.nextPage = result.nextPage;//next page function
}
resolve(result);
});
}).catch(function (e) {
console.log("error ", e);
}); //promise ends
console.log('page state ', res.pageStates);
return res;
};
Users.searchList = function* (pager, pageState) {//paging filtering
console.log("|------------Query Started-------------|");
console.log("pageState if any ", pageState);
var res = [], myresult;
res.pageStates = { "next": "" };
var query = "SELECT * FROM users_lookup_history ";
var params = [];
console.log('current pageState ', pageState);
const options = { pageState: pageState, prepare: true, fetchSize: pager.limit };
console.log('----------------did i appeared first?------------------');
yield new Promise(function (resolve, reject) {
clientdb.eachRow(query, [], options, function (n, row) {
console.log('----Users paging----rows');
res.push(row);
}, function (err, result) {
if (err) {
console.log("error ", err);
}
else {
res.pageStates.next = result.pageState;
res.nextPage = result.nextPage;
}
//for the next flow
//if (result.nextPage) {
// Retrieve the following pages:
// the same row handler from above will be used
// result.nextPage();
//}
resolve(result);
});
}).catch(function (e) {
console.log("error ", e);
info.log('something');
}); //promise ends
console.log('page state ', pageState);
console.log("|------------Query Ended-------------|");
return res;
};
Html side
<div class="box-footer clearfix">
<ul class="pagination pagination-sm no-margin pull-left">
<if test="data.isPrevious == true">
<li><a class='submitform_previous' href="">Previous</a></li>
</if>
<if test="data.isNext == true">
<li><a class="submitform_next" href="">Next</a></li>
</if>
</ul>
<ul class="pagination pagination-sm no-margin pull-right">
<li>Total Records : $data.totalRecords</li>
<li> | Total Pages : $data.TotalPages</li>
<li> | Current Page : $data.pageNumber</li>
</ul>
</div>
I am not very much experienced with node js and cassandra db, this solution can surely be improved. Solution 1 is working example code to start with the paging idea. Cheers
a detailed blog.
Our use case was similar. Pull everything from a Cassandra table (cassandra does it smartly by fetching ~5000 in one go and return a cursor), heavy personalized processing on each row, and keep going. Once our iteration reaches close to 5000, it again fetches the next chunk of 5000 rows internally and adds it to the result cursor. It does it so brilliantly that we don’t even feel this magic happening behind the scene.
but It became a bottleneck for us.As iterating over the chunk took some time and till it reached the end of the chunk, Cassandra thought the connection was not being used and closed the connection automatically yelling, its timeout. So we implemented with page state.
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.query import SimpleStatement
# connection with cassandra
cluster = Cluster(["127.0.0.1"], auth_provider=PlainTextAuthProvider(username="pankaj", password="pankaj"))
session = cluster.connect()
# setting keyspace
session.set_keyspace("my_keyspace")
# set fetch size
fetch_size = 100
# It will print first 100 records
next_page_available = True
paging_state     = None
data_count     = 0
while next_page_available is True:
# fetches a new chunk with given page state
result = fetch_a_fresh_chunk(paging_state)
paging_state = results.paging_state
for result in results:
# process payload here.....
# payload processed
data_count += 1
# once we reach fetch size, we stop cassandra to fetch more chunk, internally
if data_count == fetch_size:
i = 0
break
# fetches a fresh chunk with given page state
def fetch_a_fresh_chunk(paging_state = None)
query = "SELECT * FROM my_cute_cassandra_table;"
statement = SimpleStatement(query, fetch_size = fetch_size)
results = session.execute(statement, paging_state=paging_state)

Can I allow the extension user to choose matching domains?

Can I allow the domain matching for my extension to be user configurable?
I'd like to let my users choose when the extension runs.
To implement customizable "match patterns" for content scripts, the Content script need to be executed in by the background page using the chrome.tabs.executeScript method (after detecting a page load using the chrome.tabs.onUpdated event listener).
Because the match pattern check is not exposed in any API, you have to create the method yourself. It is implemented in url_pattern.cc, and the specification is available at match patterns.
Here's an example of a parser:
/**
* #param String input A match pattern
* #returns null if input is invalid
* #returns String to be passed to the RegExp constructor */
function parse_match_pattern(input) {
if (typeof input !== 'string') return null;
var match_pattern = '(?:^'
, regEscape = function(s) {return s.replace(/[[^$.|?*+(){}\\]/g, '\\$&');}
, result = /^(\*|https?|file|ftp|chrome-extension):\/\//.exec(input);
// Parse scheme
if (!result) return null;
input = input.substr(result[0].length);
match_pattern += result[1] === '*' ? 'https?://' : result[1] + '://';
// Parse host if scheme is not `file`
if (result[1] !== 'file') {
if (!(result = /^(?:\*|(\*\.)?([^\/*]+))(?=\/)/.exec(input))) return null;
input = input.substr(result[0].length);
if (result[0] === '*') { // host is '*'
match_pattern += '[^/]+';
} else {
if (result[1]) { // Subdomain wildcard exists
match_pattern += '(?:[^/]+\\.)?';
}
// Append host (escape special regex characters)
match_pattern += regEscape(result[2]);
}
}
// Add remainder (path)
match_pattern += input.split('*').map(regEscape).join('.*');
match_pattern += '$)';
return match_pattern;
}
Example: Run content script on pages which match the pattern
In the example below, the array is hard-coded. In practice, you would store the match patterns in an array using localStorage or chrome.storage.
// Example: Parse a list of match patterns:
var patterns = ['*://*/*', '*exampleofinvalid*', 'file://*'];
// Parse list and filter(exclude) invalid match patterns
var parsed = patterns.map(parse_match_pattern)
.filter(function(pattern){return pattern !== null});
// Create pattern for validation:
var pattern = new RegExp(parsed.join('|'));
// Example of filtering:
chrome.tabs.onUpdated.addListener(function(tabId, changeInfo, tab) {
if (changeInfo.status === 'complete') {
var url = tab.url.split('#')[0]; // Exclude URL fragments
if (pattern.test(url)) {
chrome.tabs.executeScript(tabId, {
file: 'contentscript.js'
// or: code: '<JavaScript code here>'
// Other valid options: allFrames, runAt
});
}
}
});
To get this to work, you need to request the following permissions in the manifest file:
"tabs" - To enable the necessary tabs API.
"<all_urls>" - To be able to use chrome.tabs.executeScript to execute a content script in a specific page.
A fixed list of permissions
If the set of match patterns is fixed (ie. the user cannot define new ones, only toggle patterns), "<all_urls>" can be replaced with this set of permissions. You may even use optional permissions to reduce the initial number of requested permissions (clearly explained in the documentation of chrome.permissions).

Resources