AWS CDK / NodejsFunction: beforeInstall, beforeBundling, afterBundling - node.js

I'm working on a project with an AWS Infrastructure. I work with aws-cdk-lib for IaC to simplify the whole process.
The flow is the following:
User upload photo/video to S3 Bucket
A lambda is triggered to compress the file with sharp and store it on another S3 Bucket with a short TTL.
A S3 Notification event trigger another lambda which will upload the S3Object to a Storj Bucket.
So, I need to use uplink-nodejs library to store images and videos on the Storj DCS service.
The issue is that, in order to install uplink-nodejs i need to install make command and copy ./node_modules/uplink-nodejs/* / for uplink command to be available to the system.
But I can't find a way to install all required dependancies and make the required commands.
I've already tried multiple solutions:
Use NodejsFunction bundling options (beforeInstall, beforeBundling, afterBundling).
Create a .dockerfile in my lambda folder.
The first solution throw an error for su command and sudo command, which prevent me from installing make
The second solution doesn't seems specifically relevant in my situation.
Someone have an idea of what's happening ?
Here's the AWS CDK NestedStack for the ContentService with the lambdas:
import * as cdk from 'aws-cdk-lib';
import * as lambda from 'aws-cdk-lib/aws-lambda';
import * as dynamodb from 'aws-cdk-lib/aws-dynamodb';
import * as s3 from 'aws-cdk-lib/aws-s3';
import * as path from 'path';
import { Construct } from 'constructs';
import { NodejsFunction } from 'aws-cdk-lib/aws-lambda-nodejs';
import { DockerImage } from 'aws-cdk-lib';
import { LambdaDestination } from 'aws-cdk-lib/aws-s3-notifications';
interface ContentServiceProps extends cdk.NestedStackProps {
main_table: dynamodb.Table;
user_content_bucket: s3.Bucket;
};
export class ContentService extends cdk.NestedStack {
public readonly content_service_handler: NodejsFunction;
public readonly storj_image_lambda: NodejsFunction;
constructor(scope: Construct, id: string, props: ContentServiceProps) {
super(scope, id, props);
this.storj_image_lambda = new NodejsFunction(this, "storj_image_lambda", {
entry: path.join(__dirname, '../../lambda-fns/src/functions/storj_upload/index.ts'),
bundling: {
nodeModules: ["uplink-nodejs", "node-gyp"],
forceDockerBundling: true,
commandHooks: {
beforeInstall(_inputDir, outputDir) {
return [
'export PATH=$PATH:$GOPATH/bin',
'sudo yum -y install make'
]
},
beforeBundling(inputDir, outputDir) {
return [
''
]
},
afterBundling(inputDir, outputDir) {
return [`cp -r ./node_modules/uplink-nodejs /usr`]
}
}
}
});
this.content_service_handler = new NodejsFunction(this, "content_service_handler", {
runtime: lambda.Runtime.NODEJS_16_X,
handler: 'handler',
entry: path.join(__dirname, "../../lambda-fns/src/functions/content/index.ts"),
environment: {
TABLE_NAME: props.main_table.tableName,
USER_CONTENT: props.user_content_bucket.bucketName,
STORJ_LAMBDA: this.storj_image_lambda.functionName
}
});
props.main_table.grantFullAccess(this.content_service_handler);
props.user_content_bucket.grantRead(this.content_service_handler);
props.user_content_bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new LambdaDestination(this.storj_image_lambda));
};
};
And here's the logs I get when I try to deploy the my Stack:
Bundling asset MainStack/content_service/storj_image_lambda/Code/Stage...
asset-output/index.js 1.5kb
⚡ Done in 21ms
bash: sudo: command not found
/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/asset-staging.js:2
`),localBundling=options.local?.tryBundle(bundleDir,options),!localBundling){let user;if(options.user)user=options.user;else{const userInfo=os.userInfo();user=userInfo.uid!==-1?`${userInfo.uid}:${userInfo.gid}`:"1000:1000"}options.image.run({command:options.command,user,volumes,environment:options.environment,entrypoint:options.entrypoint,workingDirectory:options.workingDirectory??AssetStaging.BUNDLING_INPUT_DIR,securityOpt:options.securityOpt??""})}}catch(err){const bundleErrorDir=bundleDir+"-error";throw fs.existsSync(bundleErrorDir)&&fs.removeSync(bundleErrorDir),fs.renameSync(bundleDir,bundleErrorDir),new Error(`Failed to bundle asset ${this.node.path}, bundle output is located at ${bundleErrorDir}: ${err}`)}if(fs_1.FileSystem.isEmpty(bundleDir)){const outputDir=localBundling?bundleDir:AssetStaging.BUNDLING_OUTPUT_DIR;throw new Error(`Bundling did not produce any output. Check that content is written to ${outputDir}.`)}}calculateHash(hashType,bundling,outputDir){if(hashType==assets_1.AssetHashType.CUSTOM||hashType==assets_1.AssetHashType.SOURCE&&bundling){const hash=crypto.createHash("sha256");return hash.update(this.customSourceFingerprint??fs_1.FileSystem.fingerprint(this.sourcePath,this.fingerprintOptions)),bundling&&hash.update(JSON.stringify(bundling)),hash.digest("hex")}switch(hashType){case assets_1.AssetHashType.SOURCE:return fs_1.FileSystem.fingerprint(this.sourcePath,this.fingerprintOptions);case assets_1.AssetHashType.BUNDLE:case assets_1.AssetHashType.OUTPUT:if(!outputDir)throw new Error(`Cannot use \`${hashType}\` hash type when \`bundling\` is not specified.`);return fs_1.FileSystem.fingerprint(outputDir,this.fingerprintOptions);default:throw new Error("Unknown asset hash type.")}}}exports.AssetStaging=AssetStaging,_a=JSII_RTTI_SYMBOL_1,AssetStaging[_a]={fqn:"aws-cdk-lib.AssetStaging",version:"2.53.0"},AssetStaging.BUNDLING_INPUT_DIR="/asset-input",AssetStaging.BUNDLING_OUTPUT_DIR="/asset-output",AssetStaging.assetCache=new cache_1.Cache;function renderAssetFilename(assetHash,extension=""){return`asset.${assetHash}${extension}`}function determineHashType(assetHashType,customSourceFingerprint){const hashType=customSourceFingerprint?assetHashType??assets_1.AssetHashType.CUSTOM:assetHashType??assets_1.AssetHashType.SOURCE;if(customSourceFingerprint&&hashType!==assets_1.AssetHashType.CUSTOM)throw new Error(`Cannot specify \`${assetHashType}\` for \`assetHashType\` when \`assetHash\` is specified. Use \`CUSTOM\` or leave \`undefined\`.`);if(hashType===assets_1.AssetHashType.CUSTOM&&!customSourceFingerprint)throw new Error("`assetHash` must be specified when `assetHashType` is set to `AssetHashType.CUSTOM`.");return hashType}function calculateCacheKey(props){return crypto.createHash("sha256").update(JSON.stringify(sortObject(props))).digest("hex")}function sortObject(object){if(typeof object!="object"||object instanceof Array)return object;const ret={};for(const key of Object.keys(object).sort())ret[key]=sortObject(object[key]);return ret}function singleArchiveFile(directory){if(!fs.existsSync(directory))throw new Error(`Directory ${directory} does not exist.`);if(!fs.statSync(directory).isDirectory())throw new Error(`${directory} is not a directory.`);const content=fs.readdirSync(directory);if(content.length===1){const file=path.join(directory,content[0]),extension=getExtension(content[0]).toLowerCase();if(fs.statSync(file).isFile()&&ARCHIVE_EXTENSIONS.includes(extension))return file}}function determineBundledAsset(bundleDir,outputType){const archiveFile=singleArchiveFile(bundleDir);switch(outputType===bundling_1.BundlingOutput.AUTO_DISCOVER&&(outputType=archiveFile?bundling_1.BundlingOutput.ARCHIVED:bundling_1.BundlingOutput.NOT_ARCHIVED),outputType){case bundling_1.BundlingOutput.NOT_ARCHIVED:return{path:bundleDir,packaging:assets_1.FileAssetPackaging.ZIP_DIRECTORY};case bundling_1.BundlingOutput.ARCHIVED:if(!archiveFile)throw new Error("Bundling output directory is expected to include only a single archive file when `output` is set to `ARCHIVED`");return{path:archiveFile,packaging:assets_1.FileAssetPackaging.FILE,extension:getExtension(archiveFile)}}}function getExtension(source){for(const ext of ARCHIVE_EXTENSIONS)if(source.toLowerCase().endsWith(ext))return ext;return path.extname(source)}
^
Error: Failed to bundle asset MainStack/content_service/storj_image_lambda/Code/Stage, bundle output is located at /Users/thomgeenen/Git/nude_safer_cdk/cdk.out/bundling-temp-4e95a752d2aca74dd84aa402102b31591eb7ee9ec0a94b306360353a61e924f0-error: Error: docker exited with status 127
at AssetStaging.bundle (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/asset-staging.js:2:614)
at AssetStaging.stageByBundling (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/asset-staging.js:1:4506)
at stageThisAsset (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/asset-staging.js:1:1867)
at Cache.obtain (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/private/cache.js:1:242)
at new AssetStaging (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/core/lib/asset-staging.js:1:2262)
at new Asset (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/aws-s3-assets/lib/asset.js:1:736)
at AssetCode.bind (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/aws-lambda/lib/code.js:1:4628)
at new Function (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/aws-lambda/lib/function.js:1:2803)
at new NodejsFunction (/Users/thomgeenen/Git/nude_safer_cdk/node_modules/aws-cdk-lib/aws-lambda-nodejs/lib/function.js:1:1171)
at new ContentService (/Users/thomgeenen/Git/nude_safer_cdk/lib/services/content/index.ts:24:35)
Detected file changes during deployment. Invoking 'cdk deploy' again
[+] Building 0.5s (13/13) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 37B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for public.ecr.aws/sam/build-nodejs14.x:latest 0.4s
=> [1/9] FROM public.ecr.aws/sam/build-nodejs14.x#sha256:cfe32c14b97a6d5d7128f4e110c6d11b80bf89c0cca8a3493879f8c587627655 0.0s
=> CACHED [2/9] RUN npm install --global yarn#1.22.5 0.0s
=> CACHED [3/9] RUN npm install --global pnpm 0.0s
=> CACHED [4/9] RUN npm install --global typescript 0.0s
=> CACHED [5/9] RUN npm install --global --unsafe-perm=true esbuild#0 0.0s
=> CACHED [6/9] RUN mkdir /tmp/npm-cache && chmod -R 777 /tmp/npm-cache && npm config --global set cache /tmp/npm-cache 0.0s
=> CACHED [7/9] RUN mkdir /tmp/yarn-cache && chmod -R 777 /tmp/yarn-cache && yarn config set cache-folder /tmp/yarn-cache 0.0s
=> CACHED [8/9] RUN npm config --global set update-notifier false 0.0s
=> CACHED [9/9] RUN /sbin/useradd -u 1000 user && chmod 711 / 0.0s
=> exporting to image 0.0s
=> => exporting layers 0.0s
=> => writing image sha256:c83389551577052395325ed8a0fb51c59639344b8c92baa1656956f23c765d18 0.0s
=> => naming to docker.io/library/cdk-7ef608663d730a301f1ab98604e57fd96273751c63db25a1a75f1390f462c655 0.0s
Use 'docker scan' to run Snyk tests against images to find vulnerabilities and learn how to fix them

Related

Rails dont send email in docker

i need a help.
I try send email using rails and default mail service. In developering all ok, but after dockerize project i get error: "wrong authentication type 'plain'".
------------------------ My docker file ------------------------
FROM ruby:3.1.2
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
RUN mkdir /app
WORKDIR /app
COPY Gemfile .
COPY Gemfile.lock .
RUN gem update bundler
RUN bundle install
COPY . .
ENV RAILS_ENV production
EXPOSE 3000
CMD rails server -b 0.0.0.0 -p 3000
------------------------ My .env file ------------------------
SMTP_ADDRESS='smtp.gmail.com'
SMTP_PORT=587
SMTP_AUTHENTICATION='plain'
SMTP_USER_NAME='login'
SMTP_PASSWORD='password'
DATABASE_NAME='dbname'
DATABASE_USERNAME='dbuser'
DATABASE_PASSWORD='dbpassword'
DATABASE_PORT=5432
DATABASE_HOST='host.docker.internal'
------------------------ My production.rb file ------------------------
config.action_mailer.delivery_method = :smtp
host = 'example.com' #replace with your own url
config.action_mailer.default_url_options = { host: host }
config.action_mailer.perform_caching = false
config.action_mailer.raise_delivery_errors = true
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = {
:address => ENV['SMTP_ADDRESS'],
:port => ENV['SMTP_PORT'],
:authentication => ENV['SMTP_AUTHENTICATION'],
:user_name => ENV['SMTP_USER_NAME'],
:password => ENV['SMTP_PASSWORD'],
:enable_starttls_auto => true,
:openssl_verify_mode => 'none' #Use this because ssl is activated but we have no certificate installed. So clients need to confirm to use the untrusted url.
}
I think maybe you need to pass the ENV variables into the Dockerfile? Or if you have a docker-compose file, pass it there

How can I connect to Memgraph database and executes queries using Rust?

I'm starting to learn Rust. I want to try out connecting to the Memgraph database and executing a query. I'm running a local instance of Memgraph Platform in Docker. I'm running it with default settings.
Since you are using Docker right after you create a new Rust project using cargo new memgraph_rust --bin add the following line to the Cargo.toml file under the line [dependencies] :
rsmgclient = "1.0.0"
Then, add the following code to the src/main.rs file:
use rsmgclient::{ConnectParams, Connection, SSLMode};
fn main(){
// Parameters for connecting to database.
let connect_params = ConnectParams {
host: Some(String::from("172.17.0.2")),
sslmode: SSLMode::Disable,
..Default::default()
};
// Make a connection to the database.
let mut connection = match Connection::connect(&connect_params) {
Ok(c) => c,
Err(err) => panic!("{}", err)
};
// Execute a query.
let query = "CREATE (u:User {name: 'Alice'})-[:Likes]->(m:Software {name: 'Memgraph'}) RETURN u, m";
match connection.execute(query, None) {
Ok(columns) => println!("Columns: {}", columns.join(", ")),
Err(err) => panic!("{}", err)
};
// Fetch all query results.
match connection.fetchall() {
Ok(records) => {
for value in &records[0].values {
println!("{}", value);
}
},
Err(err) => panic!("{}", err)
};
// Commit any pending transaction to the database.
match connection.commit() {
Ok(()) => {},
Err(err) => panic!("{}", err)
};
}
Now, create a new file in the project root directory /memgraph_rust and name it Dockerfile:
# Set base image (host OS)
FROM rust:1.56
# Install CMake
RUN apt-get update && \
apt-get --yes install cmake
# Install mgclient
RUN apt-get install -y git cmake make gcc g++ libssl-dev clang && \
git clone https://github.com/memgraph/mgclient.git /mgclient && \
cd mgclient && \
git checkout 5ae69ea4774e9b525a2be0c9fc25fb83490f13bb && \
mkdir build && \
cd build && \
cmake .. && \
make && \
make install
# Set the working directory in the container
WORKDIR /code
# Copy the dependencies file to the working directory
COPY Cargo.toml .
# Copy the content of the local src directory to the working directory
RUN mkdir src
COPY src/ ./src
# Generate binary using the Rust compiler
RUN cargo build
# Command to run on container start
CMD [ "cargo", "run" ]
All that is now left is to get the address docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' CONTAINER_ID, create and image docker build -t memgraph_rust . and starting the application with docker run memgraph_rust.
If you ever decide to take your Rust program to an environment that doesn't have Docker you will maybe need to install rsmgclient driver
The complete documentation for connecting using Rust can be found at Rust quick start guide on the Memgraph site.

Vite: Could not resolve entry module (index.html)

I am new to Openshift 3.11 deployment, I created a Multistage Dockerfile for a React application, the build want correctly on my local machine, but when I run on the openshift cluster I get the error below:
> kncare-ui#0.1.0 build
> tsc && vite build
vite v2.9.9 building for production...
✓ 0 modules transformed.
Could not resolve entry module (index.html).
error during build:
Error: Could not resolve entry module (index.html).
at error (/app/node_modules/rollup/dist/shared/rollup.js:198:30)
at ModuleLoader.loadEntryModule (/app/node_modules/rollup/dist/shared/rollup.js:22680:20)
at async Promise.all (index 0)
error: build error: running 'npm run build' failed with exit code 1
and this is my Dockefile
FROM node:16.14.2-alpine as build-stage
RUN mkdir -p /app/
WORKDIR /app/
RUN chmod -R 777 /app/
COPY package*.json /app/
COPY tsconfig.json /app/
COPY tsconfig.node.json /app/
RUN npm ci
COPY ./ /app/
RUN npm run build
FROM nginxinc/nginx-unprivileged
#FROM bitnami/nginx:latest
COPY --from=build-stage /app/dist/ /usr/share/nginx/html
#CMD ["nginx", "-g", "daemon off;"]
ENTRYPOINT ["nginx", "-g", "daemon off;"]
EXPOSE 80
Vite by default uses an html page as an entry point. So you either need to create one or if you don't have an html page you can use it in "Library Mode".
https://vitejs.dev/guide/build.html#library-mode
From the docs:
// vite.config.js
const path = require('path')
const { defineConfig } = require('vite')
module.exports = defineConfig({
build: {
lib: {
entry: path.resolve(__dirname, 'lib/main.js'),
name: 'MyLib',
fileName: (format) => `my-lib.${format}.js`
},
rollupOptions: {
// make sure to externalize deps that shouldn't be bundled
// into your library
external: ['vue'],
output: {
// Provide global variables to use in the UMD build
// for externalized deps
globals: {
vue: 'Vue'
}
}
}
}
})
If you're using ES Modules (i.e., import sytax):
Look in your package.json to confirm type field is set to module:
// vite.config.js
import * as path from 'path';
import { defineConfig } from "vite";
const config = defineConfig({
build: {
lib: {
entry: path.resolve(__dirname, 'lib/main.js'),
name: 'MyLib',
fileName: (format) => `my-lib.${format}.js`
},
rollupOptions: {
// make sure to externalize deps that shouldn't be bundled
// into your library
external: ['vue'],
output: {
// Provide global variables to use in the UMD build
// for externalized deps
globals: {
vue: 'Vue'
}
}
}
}
})
export default config;
Had same issue because of .dockerignore. Make sure your index.html not ignored.
In case if you ignoring everything (**) you can add !index.html to the next line and try.

Yarn install production dependencies of a single package in workspace

I'm trying to install the production dependencies only for a single package in my workspace. Is that possible?
I've already tried this:
yarn workspace my-package-in-workspace install -- --prod
But it is installing all production dependencies of all my packages.
yarn 1 doesn't support it as far as I know.
If you are trying to install a specific package in a dockerfile, then there is a workaround:
copy the yarn.lock file and the root package.json
copy only the packages's package.json that you need: your package and which other packages that your package depends on (locally in the monorepo).
in the dockerfile, manually remove all the devDependnecies of all the package.json(s) that you copied.
run yarn install on the root package.json.
Note:
Deterministic installation - It is recommended to do so in monorepos to force deterministic install - https://stackoverflow.com/a/64503207/806963
Full dockefile example:
FROM node:12
WORKDIR /usr/project
COPY yarn.lock package.json remove-all-dev-deps-from-all-package-jsons.js change-version.js ./
ARG package_path=packages/dancer-placing-manager
COPY ${package_path}/package.json ./${package_path}/package.json
RUN node remove-all-dev-deps-from-all-package-jsons.js && rm remove-all-dev-deps-from-all-package-jsons.js
RUN yarn install --frozen-lockfile --production
COPY ${package_path}/dist/src ./${package_path}/dist/src
COPY ${package_path}/src ./${package_path}/src
CMD node --unhandled-rejections=strict ./packages/dancer-placing-manager/dist/src/index.js
remove-all-dev-deps-from-all-package-jsons.js:
const fs = require('fs')
const path = require('path')
const { execSync } = require('child_process')
async function deleteDevDeps(packageJsonPath) {
const packageJson = require(packageJsonPath)
delete packageJson.devDependencies
await new Promise((res, rej) =>
fs.writeFile(packageJsonPath, JSON.stringify(packageJson, null, 2), 'utf-8', error => (error ? rej(error) : res())),
)
}
function getSubPackagesPaths(repoPath) {
const result = execSync(`yarn workspaces --json info`).toString()
const workspacesInfo = JSON.parse(JSON.parse(result).data)
return Object.values(workspacesInfo)
.map(workspaceInfo => workspaceInfo.location)
.map(packagePath => path.join(repoPath, packagePath, 'package.json'))
}
async function main() {
const repoPath = __dirname
const packageJsonPath = path.join(repoPath, 'package.json')
await deleteDevDeps(packageJsonPath)
await Promise.all(getSubPackagesPaths(repoPath).map(packageJsonPath => deleteDevDeps(packageJsonPath)))
}
if (require.main === module) {
main()
}
It looks like this is easily possible now with Yarn 2: https://yarnpkg.com/cli/workspaces/focus
But I haven't tried myself.
Here is my solution for Yarn 1:
# Install dependencies for the whole monorepo because
# 1. The --ignore-workspaces flag is not implemented https://github.com/yarnpkg/yarn/issues/4099
# 2. The --focus flag is broken https://github.com/yarnpkg/yarn/issues/6715
# Avoid the target workspace dependencies to land in the root node_modules.
sed -i 's|"dependencies":|"workspaces": { "nohoist": ["**"] }, "dependencies":|g' apps/target-app/package.json
# Run `yarn install` twice to workaround https://github.com/yarnpkg/yarn/issues/6988
yarn || yarn
# Find all linked node_modules and dereference them so that there are no broken
# symlinks if the target-app is copied somewhere. (Don't use
# `cp -rL apps/target-app some/destination` because then it also dereferences
# node_modules/.bin/* and thus breaks them.)
cd apps/target-app/node_modules
for f in $(find . -maxdepth 1 -type l)
do
l=$(readlink -f $f) && rm $f && cp -rf $l $f
done
Now apps/target-app can be copied and used as a standalone app.
I would not recommend it for production. It is slow (because it installs dependencies for the whole monorepo) and not really reliable (because there may be additional issues with symlinks).
You may try
yarn workspace #my-monorepo/my-package-in-workspace install -- --prod

How can i install a local rpm using puppet

I am trying to install a particular rpm using puppet, my init.pp is:
class nmap {
package {'nmap':
provider => 'rpm',
source => "<Local PATH to the RPM>",
}
}
and the rpm is in ...modules/nmap/files
If i move the rpm to manifests, and provide the rpm name in source => ''
class nmap {
package {'nmap':
provider => 'rpm',
source => "rpm-name.rpm",
}
}
it works, but how can i specify source path with ../files/ and do puppet apply successfully
When i use :
source => 'puppet:///files/nmap-6.45-1.x86_64.rpm',
i get an error:
Debug: Executing '/bin/rpm -i puppet:///files/nmap-6.45-1.x86_64.rpm'
Error: Execution of '/bin/rpm -i puppet:///files/nmap-6.45-1.x86_64.rpm' returned 1: error: open of puppet:///files/nmap-6.45-1.x86_64.rpm failed: No such file or directory
Error: /Stage[main]/Nmap/Package[nmap]/ensure: change from absent to present failed: Execution of '/bin/rpm -i puppet:///files/nmap-6.45-1.x86_64.rpm' returned 1: error: open of puppet:///files/nmap-6.45-1.x86_64.rpm failed: No such file or directory
`
when running the command:
sudo puppet apply --modulepath=/home/user1/qa/puppet_qa/modules/ -e "include nmap" --debug
Unlike the file resource type, the package type has no support for Puppet fileserver URLs. You will need to use a file resource to download the rpm prior to installing it. If this is a recurring problem for you, make a defined type that does those in one go (think macros), e.g.
define fileserver_package($source, $ensure='installed') {
file { "/my/tmp/dir/$name.rpm": source => $source }
package { $name:
ensure => $ensure,
provider => 'rpm',
source => "/my/tmp/dir/$name.rpm",
require => File["/my/tmp/dir/$name.rpm"],
}
}
Edit: it is generally advisable to use a local yum repo instead, see also the first comment by #rojs below.
The RPM package can be installed this way:
package { 'epel-release-6':
provider => 'rpm',
ensure => 'present',
source => '/usr/local/rpms/epel-release-latest-6.noarch.rpm',
}
It seems the module name you are using is nmap. You can use the same source parameter like this,
source => 'puppet:///modules/nmap/nmap-6.45-1.x86_64.rpm',
The syntax to access a file under a module goes like this,
puppet:///modules/<modulename>/<file you want to access>
See this link here, http://docs.puppetlabs.com/puppet/latest/reference/modules_fundamentals.html#files
Lets start from start :
on server:
$rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-pc1-el-7.noarch.rpm
$yum -y install puppetserver
$vi /etc/sysconfig/puppetserver #change JAVA args
$systemctl start puppetserver
$systemctl enable puppetserver
$vi /etc/puppetlabs/puppet/puppet.conf #Add “dns_alt_names” in [master]
On Agent:
$rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-pc1-el-7.noarch.rpm
$yum -y install puppet-agent
$systemctl start puppet
$systemctl enable puppet
$vi /etc/puppetlabs/puppet/puppet.conf # Add “server = pupmaster” in [main]
puppet cert list
puppet cert sign
/etc/puppetlabs/code/environments/production/manifests/site.pp:
node webserver {
class { 'apache': }
}
node dbserver {
class { ‘mysql’: }
}
mkdir –p /etc/puppetlabs/code/environments/production/modules/apache/{manifests, files}
apacheinstall.pp:
class apache::apacheinstall {
if $osfamily == 'redhat' {
package { 'httpd':
ensure => 'latest'
}
service {'httpd':
ensure => 'running',
require => Package["httpd"],
}
file { '/var/www/html/ndex.html':
mode => "0644",
owner => 'root',
group => 'root',
source => 'puppet:///modules/apache/index.html',
}
}
elsif $osfamily == 'debian' {
package { 'apache2':
ensure => 'latest'
}
service {'httpd':
ensure => 'running',
require => Package["httpd"],
}
}
}
INIT.pp
class apache {
notify { 'Installing and Configuring Webserver for $osfamily': }
include apache::mysqlinstall
}
Mysqlinstall.pp:
class apache::mysqlinstall {
exec { 'wget':
path => [ "/bin/", "/sbin/", "/usr/bin/", "/usr/sbin/" ],
command => "/usr/bin/wget https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm && rpm -ivh /tmp/mysql57-community-release-el7-9.noarch.rpm",
cwd => '/tmp/',
creates => '/etc/firstruns/p1.done',
}
}

Resources