Fixing "Target not found" in pacman (Maybe mirrorlist of pacman.conf - linux

I have been having a lot of problems with pacman in my new arch system. I can't install anything.
[root#life ~]# pacman -S pacaur
error: target not found: pacaur
I have tried everything including pacman -Syyu, reinstalling pacman and messing with mirrorlist which leads me to believe it might be a problem with mirrorlist or pacman.conf
Can someone help?
#
# /etc/pacman.conf
#
# See the pacman.conf(5) manpage for option and repository directives
#
# GENERAL OPTIONS
#
[options]
# The following paths are commented out with their default values listed.
# If you wish to use different paths, uncomment and update the paths.
#RootDir = /
#DBPath = /var/lib/pacman/
#CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
#CleanMethod = KeepInstalled
#UseDelta = 0.7
Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
#IgnorePkg =
#IgnoreGroup =
#NoUpgrade =
#NoExtract =
# Misc options
#UseSyslog
#Color
#TotalDownload
CheckSpace
#VerbosePkgLists
# By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages.
SigLevel = Required DatabaseOptional
LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required
# NOTE: You must run `pacman-key --init` before first using pacman; the local
# keyring can then be populated with the keys of all official Arch Linux
# packagers with `pacman-key --populate archlinux`.
#
# REPOSITORIES
# - can be defined here or included from another file
# - pacman will search repositories in the order defined here
# - local/custom mirrors can be added here or in separate files
# - repositories listed first will take precedence when packages
# have identical names, regardless of version number
# - URLs will have $repo replaced by the name of the current repo
# - URLs will have $arch replaced by the name of the architecture
#
# Repository entries are of the format:
# [repo-name]
# Server = ServerName
# Include = IncludePath
#
# The header [repo-name] is crucial - it must be present and
# uncommented to enable the repo.
#
# The testing repositories are disabled by default. To enable, uncomment the
# repo name header and Include lines. You can add preferred servers immediately
# after the header, and they will be used before the default mirrors.
#[testing]
#Include = /etc/pacman.d/mirrorlist
[core]
Include = /etc/pacman.d/mirrorlist
[extra]
Include = /etc/pacman.d/mirrorlist
#[community-testing]
#Include = /etc/pacman.d/mirrorlist
[community]
Include = /etc/pacman.d/mirrorlist
# If you want to run 32 bit applications on your x86_64 system,
# enable the multilib repositories as required here.
#[multilib-testing]
#Include = /etc/pacman.d/mirrorlist
#[multilib]
#Include = /etc/pacman.d/mirrorlist
# An example of a custom package repository. See the pacman manpage for
# tips on creating your own repositories.
#[custom]
#SigLevel = Optional TrustAll
#Server = file:///home/custompkgs
Displaying pacman.conf.
##
## Arch Linux repository mirrorlist
## Sorted by mirror score from mirror status page
## Generated on 2014-12-28
##
## Score: 0.5, United States
Server = http://mirror.us.leaseweb.net/archlinux/$repo/os/$arch
## Score: 0.9, United States
Server = http://lug.mtu.edu/archlinux/$repo/os/$arch
## Score: 1.1, United States
Server = http://mirror.umd.edu/archlinux/$repo/os/$arch
## Score: 1.1, United States
Server = http://mirror.rit.edu/archlinux/$repo/os/$arch
## Score: 1.1, United States
Server = http://mirrors.acm.wpi.edu/archlinux/$repo/os/$arch
## Score: 1.1, United States
Server = http://archlinux.surlyjake.com/archlinux/$repo/os/$arch
## Score: 1.2, United States
Server = http://www.gtlib.gatech.edu/pub/archlinux/$repo/os/$arch
## Score: 1.2, United States
Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch
## Score: 1.3, United States
Server = http://archlinux.pallissard.net/archlinux/$repo/os/$arch
## Score: 1.4, United States
Server = http://mirrors.cecsresearch.org/archlinux/$repo/os/$arch
## Score: 1.4, United States
Server = http://mirror.cs.pitt.edu/archlinux/$repo/os/$arch
## Score: 1.5, United States
Server = http://mirrors.cat.pdx.edu/archlinux/$repo/os/$arch
## Score: 1.7, United States
Server = http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch
## Score: 1.8, United States
Server = http://mirror.grig.io/archlinux/$repo/os/$arch
## Score: 1.9, United States
Server = http://mirrors.aggregate.org/archlinux/$repo/os/$arch
## Score: 1.9, United States
Server = http://mirrors.kernel.org/archlinux/$repo/os/$arch
## Score: 2.0, United States

pacman cannot find the target because it only searches the official repositories. And pacaur, actually lives in the AUR.
AUR helpers, like yaourt or packer, make searching for, and installing, AUR packages really simply. If you don't have one of these, you can always do things manually using the desired PKGBUILD. All you need to do is compile it from source using makepkg and then install it via pacman.
For example:
mkdir ~/pkgs && cd ~/pkgs
wget https://aur.archlinux.org/packages/pa/pacaur/PKGBUILD
makepkg -s
sudo pacman -U pacaur-4.2.14-1-any.pkg.tar.xz

Related

how can I add rabbitmq queue to elk

I want to display RabbitMQ Queues in ELK, for this I ran Kibana, ElasticSearch and LogStash, below are all the details.
The result of running Kiabana
samira#elk:/var/www/apps/kibana-8.4.2-linux-x86_64(1)/kibana-8.4.2/bin$ ./kibana
[2022-10-20T12:37:57.842+03:30][INFO ][node] Kibana process configured with roles: [background_tasks, ui]
[2022-10-20T12:38:08.711+03:30][INFO ][http.server.Preboot] http server running at http://localhost:5601
[2022-10-20T12:38:08.740+03:30][INFO ][plugins-system.preboot] Setting up [1] plugins: [interactiveSetup]
[2022-10-20T12:38:08.771+03:30][WARN ][config.deprecation] The default mechanism for Reporting privileges will work differently in future versions, which will affect the behavior of this cluster. Set "xpack.reporting.roles.enabled" to "false" to adopt the future behavior before upgrading.
[2022-10-20T12:38:08.935+03:30][INFO ][plugins-system.standard] Setting up [121] plugins: [translations,monitoringCollection,licensing,globalSearch,globalSearchProviders,features,mapsEms,licenseApiGuard,usageCollection,taskManager,telemetryCollectionManager,telemetryCollectionXpack,kibanaUsageCollection,share,embeddable,uiActionsEnhanced,screenshotMode,banners,newsfeed,fieldFormats,expressions,dataViews,charts,esUiShared,customIntegrations,home,searchprofiler,painlessLab,grokdebugger,management,advancedSettings,spaces,security,lists,encryptedSavedObjects,cloud,snapshotRestore,screenshotting,telemetry,licenseManagement,eventLog,actions,console,bfetch,data,watcher,reporting,fileUpload,ingestPipelines,alerting,unifiedSearch,savedObjects,graph,savedObjectsTagging,savedObjectsManagement,presentationUtil,expressionShape,expressionRevealImage,expressionRepeatImage,expressionMetric,expressionImage,controls,eventAnnotation,dataViewFieldEditor,triggersActionsUi,transform,stackAlerts,ruleRegistry,discover,fleet,indexManagement,remoteClusters,crossClusterReplication,indexLifecycleManagement,cloudSecurityPosture,discoverEnhanced,aiops,visualizations,canvas,visTypeXy,visTypeVislib,visTypeVega,visTypeTimeseries,rollup,visTypeTimelion,visTypeTagcloud,visTypeTable,visTypeMetric,visTypeHeatmap,visTypeMarkdown,dashboard,dashboardEnhanced,expressionXY,expressionTagcloud,expressionPartitionVis,visTypePie,expressionMetricVis,expressionLegacyMetricVis,expressionHeatmap,expressionGauge,lens,osquery,maps,dataVisualizer,ml,cases,timelines,sessionView,kubernetesSecurity,securitySolution,visTypeGauge,sharedUX,observability,synthetics,infra,upgradeAssistant,monitoring,logstash,enterpriseSearch,apm,dataViewManagement]
[2022-10-20T12:38:08.948+03:30][INFO ][plugins.taskManager] TaskManager is identified by the Kibana UUID: 114adb80-0285-4b29-b403-64ea1e454f19
[2022-10-20T12:38:09.009+03:30][WARN ][plugins.security.config] Session cookies will be transmitted over insecure connections. This is not recommended.
[2022-10-20T12:38:09.028+03:30][WARN ][plugins.security.config] Session cookies will be transmitted over insecure connections. This is not recommended.
[2022-10-20T12:38:09.034+03:30][INFO ][plugins.encryptedSavedObjects] Hashed 'xpack.encryptedSavedObjects.encryptionKey' for this instance: B6ABZzCc0sMI2CQc1eJYyeLXhC0I61v8xdNjUusVvp0=
[2022-10-20T12:38:09.159+03:30][INFO ][plugins.ruleRegistry] Installing common resources shared between all indices
[2022-10-20T12:38:09.189+03:30][INFO ][plugins.cloudSecurityPosture] Registered task successfully [Task: cloud_security_posture-stats_task]
[2022-10-20T12:38:09.678+03:30][INFO ][plugins.screenshotting.config] Chromium sandbox provides an additional layer of protection, and is supported for Linux Ubuntu 20.04 OS. Automatically enabling Chromium sandbox.
[2022-10-20T12:38:09.707+03:30][ERROR][elasticsearch-service] Unable to retrieve version information from Elasticsearch nodes. connect ECONNREFUSED 127.0.0.1:9200
[2022-10-20T12:38:10.162+03:30][INFO ][plugins.screenshotting.chromium] Browser executable: /var/www/apps/kibana-8.4.2-linux-x86_64(1)/kibana-8.4.2/x-pack/plugins/screenshotting/chromium/headless_shell-linux_x64/headless_shell
[2022-10-20T12:39:34.886+03:30][INFO ][savedobjects-service] Waiting until all Elasticsearch nodes are compatible with Kibana before starting saved objects migrations...
[2022-10-20T12:39:34.887+03:30][INFO ][savedobjects-service] Starting saved objects migrations
[2022-10-20T12:39:34.935+03:30][INFO ][savedobjects-service] [.kibana] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 28ms.
[2022-10-20T12:39:34.937+03:30][INFO ][savedobjects-service] [.kibana_task_manager] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 27ms.
[2022-10-20T12:39:34.948+03:30][ERROR][savedobjects-service] [.kibana_task_manager] Action failed with 'search_phase_execution_exception: '. Retrying attempt 1 in 2 seconds.
[2022-10-20T12:39:34.949+03:30][INFO ][savedobjects-service] [.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 11ms.
[2022-10-20T12:39:34.950+03:30][ERROR][savedobjects-service] [.kibana] Action failed with 'search_phase_execution_exception: '. Retrying attempt 1 in 2 seconds.
[2022-10-20T12:39:34.950+03:30][INFO ][savedobjects-service] [.kibana] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 15ms.
[2022-10-20T12:39:36.961+03:30][INFO ][savedobjects-service] [.kibana] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ. took: 2011ms.
The result of running elasticsearch
samira#elk:/var/www/apps/elasticsearch-8.4.2/bin$ ./elasticsearch
warning: ignoring JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64; using bundled JDK
[2022-10-20T12:39:26,417][INFO ][o.e.n.Node ] [elk.kifarunix-demo.com] version[8.4.2], pid[3594], build[tar/89f8c6d8429db93b816403ee75e5c270b43a940a/2022-09-14T16:26:04.382547801Z], OS[Linux/5.15.0-52-generic/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/18.0.2.1/18.0.2.1+1-1]
[2022-10-20T12:39:26,422][INFO ][o.e.n.Node ] [elk.kifarunix-demo.com] JVM home [/var/www/apps/elasticsearch-8.4.2/jdk], using bundled JDK [true]
[2022-10-20T12:39:26,422][INFO ][o.e.n.Node ] [elk.kifarunix-demo.com] JVM arguments [-Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -Djava.security.manager=allow, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j2.formatMsgNoLookups=true, -Djava.locale.providers=SPI,COMPAT, --add-opens=java.base/java.io=ALL-UNNAMED, -XX:+UseG1GC, -Djava.io.tmpdir=/tmp/elasticsearch-11974649730192173872, -XX:+HeapDumpOnOutOfMemoryError, -XX:+ExitOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Xms2g, -Xmx2g, -XX:MaxDirectMemorySize=1073741824, -XX:G1HeapRegionSize=4m, -XX:InitiatingHeapOccupancyPercent=30, -XX:G1ReservePercent=15, -Des.distribution.type=tar, --module-path=/var/www/apps/elasticsearch-8.4.2/lib, --add-modules=jdk.net, -Djdk.module.main=org.elasticsearch.server]
[2022-10-20T12:39:27,685][INFO ][c.a.c.i.j.JacksonVersion ] [elk.kifarunix-demo.com] Package versions: jackson-annotations=2.13.2, jackson-core=2.13.2, jackson-databind=2.13.2.2, jackson-dataformat-xml=2.13.2, jackson-datatype-jsr310=2.13.2, azure-core=1.27.0, Troubleshooting version conflicts: https://aka.ms/azsdk/java/dependency/troubleshoot
[2022-10-20T12:39:28,669][INFO ][o.e.p.PluginsService ] [elk.kifarunix-demo.com] loaded module [aggs-matrix-stats]
[2022-10-20T12:39:28,670][INFO ][o.e.p.PluginsService ] [elk.kifarunix-demo.com] loaded module [analysis-common]
[2022-10-20T12:39:28,670][INFO ][o.e.p.PluginsService ] [elk.kifarunix-demo.com] loaded module [constant-keyword]
[2022-10-20T12:39:28,670][INFO ][o.e.p.PluginsService ] [elk.kifarunix-demo.com] loaded module [data-streams]
The result of running logstash
$ bin/logstash -f sdamiii.conf
this is my sdamiii.conf
input {
rabbitmq {
host => "localhost"
port => 5672
heartbeat => 30
durable => true
queue => "system_logs"
user => "guest"
password => "guest"
vhost => "/"
}
}
output {
elasticsearch {
hosts => ["localhost:9200"]
}
rabbitmq {
exchange => "system_logs"
host => "localhost"
exchange_type => "fanout"
key => "logstash"
persistent => false
}
}
This is kibana.yml
elasticsearch.username: "kibana_system"
elasticsearch.password: "pass"
xpack.encryptedSavedObjects.encryptionKey: 5b6d5d7b20e971b2e562cc7c8ca181ae
xpack.reporting.encryptionKey: ba46a278d1dcb511339ea01f2a9d2651
xpack.security.encryptionKey: 11f8ec40b5bf442404f5c5a53b38ad13
This is elasticsearch.yml
network.host: localhost
xpack.security.enabled: false
xpack.security.enrollment.enabled: false
# Enable encryption for HTTP API client connections, such as Kibana, Logstash, and Agents
xpack.security.http.ssl:
enabled: true
keystore.path: certs/http.p12
# Enable encryption and mutual authentication between cluster nodes
xpack.security.transport.ssl:
enabled: false
verification_mode: certificate
keystore.path: certs/transport.p12
truststore.path: certs/transport.p12
# Create a new cluster with the current node only
# Additional nodes can still join the cluster later
cluster.initial_master_nodes: ["elk.kifarunix-demo.com"]
# Allow HTTP API connections from anywhere
# Connections are encrypted and require user authentication
http.host: localhost
This is logstash.yml
# Settings file in YAML
#
# Settings can be specified either in hierarchical form, e.g.:
#
# pipeline:
# batch:
# size: 125
# delay: 5
#
# Or as flat keys:
#
# pipeline.batch.size: 125
# pipeline.batch.delay: 5
#
# ------------ Node identity ------------
#
# Use a descriptive name for the node:
#
# node.name: test
#
# If omitted the node name will default to the machine's host name
#
# ------------ Data path ------------------
#
# Which directory should be used by logstash and its plugins
# for any persistent needs. Defaults to LOGSTASH_HOME/data
#
# path.data:
#
# ------------ Pipeline Settings --------------
#
# The ID of the pipeline.
#
# pipeline.id: main
#
# Set the number of workers that will, in parallel, execute the filters+outputs
# stage of the pipeline.
#
# This defaults to the number of the host's CPU cores.
#
# pipeline.workers: 2
#
# How many events to retrieve from inputs before sending to filters+workers
#
# pipeline.batch.size: 125
#
# How long to wait in milliseconds while polling for the next event
# before dispatching an undersized batch to filters+outputs
#
# pipeline.batch.delay: 50
#
# Force Logstash to exit during shutdown even if there are still inflight
# events in memory. By default, logstash will refuse to quit until all
# received events have been pushed to the outputs.
#
# WARNING: Enabling this can lead to data loss during shutdown
#
# pipeline.unsafe_shutdown: false
#
# Set the pipeline event ordering. Options are "auto" (the default), "true" or "false".
# "auto" automatically enables ordering if the 'pipeline.workers' setting
# is also set to '1', and disables otherwise.
# "true" enforces ordering on the pipeline and prevent logstash from starting
# if there are multiple workers.
# "false" disables any extra processing necessary for preserving ordering.
#
# pipeline.ordered: auto
#
# Sets the pipeline's default value for `ecs_compatibility`, a setting that is
# available to plugins that implement an ECS Compatibility mode for use with
# the Elastic Common Schema.
# Possible values are:
# - disabled
# - v1
# - v8 (default)
# Pipelines defined before Logstash 8 operated without ECS in mind. To ensure a
# migrated pipeline continues to operate as it did before your upgrade, opt-OUT
# of ECS for the individual pipeline in its `pipelines.yml` definition. Setting
# it here will set the default for _all_ pipelines, including new ones.
#
# pipeline.ecs_compatibility: v8
#
# ------------ Pipeline Configuration Settings --------------
#
# Where to fetch the pipeline configuration for the main pipeline
#
# path.config:
#
# Pipeline configuration string for the main pipeline
#
# config.string:
#
# At startup, test if the configuration is valid and exit (dry run)
#
# config.test_and_exit: false
#
# Periodically check if the configuration has changed and reload the pipeline
# This can also be triggered manually through the SIGHUP signal
#
# config.reload.automatic: false
#
# How often to check if the pipeline configuration has changed (in seconds)
# Note that the unit value (s) is required. Values without a qualifier (e.g. 60)
# are treated as nanoseconds.
# Setting the interval this way is not recommended and might change in later versions.
#
# config.reload.interval: 3s
#
# Show fully compiled configuration as debug log message
# NOTE: --log.level must be 'debug'
#
# config.debug: false
#
# When enabled, process escaped characters such as \n and \" in strings in the
# pipeline configuration files.
#
# config.support_escapes: false
#
# ------------ API Settings -------------
# Define settings related to the HTTP API here.
#
# The HTTP API is enabled by default. It can be disabled, but features that rely
# on it will not work as intended.
#
# api.enabled: true
#
# By default, the HTTP API is not secured and is therefore bound to only the
# host's loopback interface, ensuring that it is not accessible to the rest of
# the network.
# When secured with SSL and Basic Auth, the API is bound to _all_ interfaces
# unless configured otherwise.
#
# api.http.host: 127.0.0.1
#
# The HTTP API web server will listen on an available port from the given range.
# Values can be specified as a single port (e.g., `9600`), or an inclusive range
# of ports (e.g., `9600-9700`).
#
# api.http.port: 9600-9700
#
# The HTTP API includes a customizable "environment" value in its response,
# which can be configured here.
#
# api.environment: "production"
#
# The HTTP API can be secured with SSL (TLS). To do so, you will need to provide
# the path to a password-protected keystore in p12 or jks format, along with credentials.
#
# api.ssl.enabled: false
# api.ssl.keystore.path: /path/to/keystore.jks
# api.ssl.keystore.password: "y0uRp4$$w0rD"
#
# The HTTP API can be configured to require authentication. Acceptable values are
# - `none`: no auth is required (default)
# - `basic`: clients must authenticate with HTTP Basic auth, as configured
# with `api.auth.basic.*` options below
# api.auth.type: none
#
# When configured with `api.auth.type` `basic`, you must provide the credentials
# that requests will be validated against. Usage of Environment or Keystore
# variable replacements is encouraged (such as the value `"${HTTP_PASS}"`, which
# resolves to the value stored in the keystore's `HTTP_PASS` variable if present
# or the same variable from the environment)
#
# api.auth.basic.username: "logstash-user"
# api.auth.basic.password: "s3cUreP4$$w0rD"
#
# When setting `api.auth.basic.password`, the password should meet
# the default password policy requirements.
# The default password policy requires non-empty minimum 8 char string that
# includes a digit, upper case letter and lower case letter.
# Policy mode sets Logstash to WARN or ERROR when HTTP authentication password doesn't
# meet the password policy requirements.
# The default is WARN. Setting to ERROR enforces stronger passwords (recommended).
#
# api.auth.basic.password_policy.mode: WARN
#
# ------------ Module Settings ---------------
# Define modules here. Modules definitions must be defined as an array.
# The simple way to see this is to prepend each `name` with a `-`, and keep
# all associated variables under the `name` they are associated with, and
# above the next, like this:
#
# modules:
# - name: MODULE_NAME
# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
#
# Module variable names must be in the format of
#
# var.PLUGIN_TYPE.PLUGIN_NAME.KEY
#
# modules:
#
# ------------ Cloud Settings ---------------
# Define Elastic Cloud settings here.
# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
# and it may have an label prefix e.g. staging:dXMtZ...
# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
# cloud.id: <identifier>
#
# Format of cloud.auth is: <user>:<pass>
# This is optional
# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
# cloud.auth: elastic:<password>
#
# ------------ Queuing Settings --------------
#
# Internal queuing model, "memory" for legacy in-memory based queuing and
# "persisted" for disk-based acked queueing. Defaults is memory
#
# queue.type: memory
#
# If `queue.type: persisted`, the directory path where the pipeline data files will be stored.
# Each pipeline will group its PQ files in a subdirectory matching its `pipeline.id`.
# Default is path.data/queue.
#
# path.queue:
#
# If using queue.type: persisted, the page data files size. The queue data consists of
# append-only data files separated into pages. Default is 64mb
#
# queue.page_capacity: 64mb
#
# If using queue.type: persisted, the maximum number of unread events in the queue.
# Default is 0 (unlimited)
#
# queue.max_events: 0
#
# If using queue.type: persisted, the total capacity of the queue in number of bytes.
# If you would like more unacked events to be buffered in Logstash, you can increase the
# capacity using this setting. Please make sure your disk drive has capacity greater than
# the size specified here. If both max_bytes and max_events are specified, Logstash will pick
# whichever criteria is reached first
# Default is 1024mb or 1gb
#
# queue.max_bytes: 1024mb
#
# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.acks: 1024
#
# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.writes: 1024
#
# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
# Default is 1000, 0 for no periodic checkpoint.
#
# queue.checkpoint.interval: 1000
#
# ------------ Dead-Letter Queue Settings --------------
# Flag to turn on dead-letter queue.
#
# dead_letter_queue.enable: false
# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
# will be dropped if they would increase the size of the dead letter queue beyond this setting.
# Default is 1024mb
# dead_letter_queue.max_bytes: 1024mb
# If using dead_letter_queue.enable: true, the interval in milliseconds where if no further events eligible for the DLQ
# have been created, a dead letter queue file will be written. A low value here will mean that more, smaller, queue files
# may be written, while a larger value will introduce more latency between items being "written" to the dead letter queue, and
# being available to be read by the dead_letter_queue input when items are written infrequently.
# Default is 5000.
#
# dead_letter_queue.flush_interval: 5000
# If using dead_letter_queue.enable: true, controls which entries should be dropped to avoid exceeding the size limit.
# Set the value to `drop_newer` (default) to stop accepting new events that would push the DLQ size over the limit.
# Set the value to `drop_older` to remove queue pages containing the oldest events to make space for new ones.
#
# dead_letter_queue.storage_policy: drop_newer
# If using dead_letter_queue.enable: true, the interval that events have to be considered valid. After the interval has
# expired the events could be automatically deleted from the DLQ.
# The interval could be expressed in days, hours, minutes or seconds, using as postfix notation like 5d,
# to represent a five days interval.
# The available units are respectively d, h, m, s for day, hours, minutes and seconds.
# If not specified then the DLQ doesn't use any age policy for cleaning events.
#
# dead_letter_queue.retain.age: 1d
# If using dead_letter_queue.enable: true, defines the action to take when the dead_letter_queue.max_bytes is reached,
# could be "drop_newer" or "drop_older".
# With drop_newer, messages that were inserted most recently are dropped, logging an error line.
# With drop_older setting, the oldest messages are dropped as new ones are inserted.
# Default value is "drop_newer".
# dead_letter_queue.storage_policy: drop_newer
# If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
# Default is path.data/dead_letter_queue
#
# path.dead_letter_queue:
#
# ------------ Debugging Settings --------------
#
# Options for log.level:
# * fatal
# * error
# * warn
# * info (default)
# * debug
# * trace
#
# log.level: info
# path.logs:
#
# ------------ Other Settings --------------
#
# Allow or block running Logstash as superuser (default: true)
# allow_superuser: false
#
# Where to find custom plugins
# path.plugins: []
#
# Flag to output log lines of each pipeline in its separate log file. Each log filename contains the pipeline.name
# Default is false
# pipeline.separate_logs: false
#
# ------------ X-Pack Settings (not applicable for OSS build)--------------
#
# X-Pack Monitoring
# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
#xpack.monitoring.enabled: false
#xpack.monitoring.elasticsearch.username: logstash_system
#xpack.monitoring.elasticsearch.password: password
#xpack.monitoring.elasticsearch.proxy: ["http://proxy:port"]
#xpack.monitoring.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
# an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
#xpack.monitoring.elasticsearch.cloud_id: monitoring_cluster_id:xxxxxxxxxx
#xpack.monitoring.elasticsearch.cloud_auth: logstash_system:password
# another authentication alternative is to use an Elasticsearch API key
#xpack.monitoring.elasticsearch.api_key: "id:api_key"
#xpack.monitoring.elasticsearch.ssl.certificate_authority: "/path/to/ca.crt"
#xpack.monitoring.elasticsearch.ssl.ca_trusted_fingerprint: xxxxxxxxxx
#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
#xpack.monitoring.elasticsearch.ssl.truststore.password: password
#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.monitoring.elasticsearch.ssl.keystore.password: password
#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
#xpack.monitoring.elasticsearch.sniffing: false
#xpack.monitoring.collection.interval: 10s
#xpack.monitoring.collection.pipeline.details.enabled: true
#
# X-Pack Management
# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
#xpack.management.enabled: false
#xpack.management.pipeline.id: ["main", "apache_logs"]
#xpack.management.elasticsearch.username: logstash_admin_user
#xpack.management.elasticsearch.password: password
#xpack.management.elasticsearch.proxy: ["http://proxy:port"]
#xpack.management.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
# an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
#xpack.management.elasticsearch.cloud_id: management_cluster_id:xxxxxxxxxx
#xpack.management.elasticsearch.cloud_auth: logstash_admin_user:password
# another authentication alternative is to use an Elasticsearch API key
#xpack.management.elasticsearch.api_key: "id:api_key"
#xpack.management.elasticsearch.ssl.ca_trusted_fingerprint: xxxxxxxxxx
#xpack.management.elasticsearch.ssl.certificate_authority: "/path/to/ca.crt"
#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
#xpack.management.elasticsearch.ssl.truststore.password: password
#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.management.elasticsearch.ssl.keystore.password: password
#xpack.management.elasticsearch.ssl.verification_mode: certificate
#xpack.management.elasticsearch.sniffing: false
#xpack.management.logstash.poll_interval: 5s
# X-Pack GeoIP plugin
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html#plugins-filters-geoip-manage_update
#xpack.geoip.download.endpoint: "https://geoip.elastic.co/v1/database"
Now I don't know how to save the queues that Lagstash reads from RabbitMQ in Elasticsearch and watch them as a chart in Kiabana??

How to use two databases: Postgres and Snowflake with alembic and sqlachemy?

I want to use two databases: Postgres and Snowflake using alembic migration tool in a single fastapi app.
Able to perform alembic migrations and alembic upgrade if using single database i.e. Postgres but on using multiple databases alembic is creating problems.
Tried using snowflake database independently on a different app with only snowflake as a database. It is working perfectly fine with revisions and alembic upgrade but not working on a single app with two databases.
Here is my directory structure as suggested in few articles:
main-project-->
postgres-migration-->
versions/
env.py
README
script.py.mako
snowflake-migration-->
versions/
env.py
README
script.py.mako
Here is my alembic.ini file generated for postgres, but I manipulated it to support snowflake:
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = db-migration
[A_SNOWFLAKE_SCHEMA]
# path to env.py and migration scripts for schema1
script_location = snowflake-db-migration
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to db-migration/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat db-migration/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
#sqlalchemy.url = driver://user:pass#localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
Above file is able to create revision scripts inside versions/ folder of snowflake-db-migration. But not able to run the generated revision using(throwing errors):
alembic upgrade head
How to convert this .ini file to support multi-database .ini file?
Tried using command:
alembic init --template multidb snowflake-db-migration
But not sure how to integrate changes here.
Also don't want to hardcode sqlalchemy.url variable in .ini file as required to use snowflake variables from a .toml file which contains all environment variables.
There are few answers but none of them are accepted and exactly similar to my use case.

How to set the path for module command?

This is my .modulepath file, the last two lines are the paths where I have my modules mounted from another hard drive. Even though I added these lines module avail command does not fetch me any of the modules in those folders. If anyone could help it would be of great help.
#
# #(#)$Id: 38aa24cc33a5f54a93781d63005a084f74418022 $
# Module version 3.2.10
# init/.modulespath. Generated from .modulespath.in by configure.
#
# Modulepath initial setup
# ========================
#
# This file defines the initial setup for the module files search path.
# Comments may be added anywhere, which begin on # and continue until the
# end of the line
# Each line containing a single path will be added to the MODULEPATH
# environment variable. You may add as many as you want - just
# limited by the maximum variable size of your shell.
#
/etc/environment-modules/modules
#/usr/share/modules/versions # location of version files
#/usr/Modules/$MODULE_VERSION/modulefiles # Module pkg modulefiles (if versioning)
#/usr/Modules/modulefiles # Module pkg modulefiles (if no versioning)
#/usr/share/modules/modulefiles # General module files
#/usr/Modules/3.2.10/your_contribs # Edit for your requirements
/opt/apps/modulefiles/Core
/opt/apps/modulefiles/Compiler
I have even tried using module use /opt/apps/modulesfiles/Core
user#user-N501VW:~$ module use /opt/apps/modulefiles/Core
user#user-N501VW:~$ $MODULEPATH
bash: /opt/apps/modulefiles/Core:/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles: No such file or directory
akhila#akhila-N501VW:~$ module avail
------------------------------------------------------------- /usr/share/modules/versions --------------------------------------------------------------
3.2.10
------------------------------------------------------------ /usr/share/modules/modulefiles ------------------------------------------------------------
dot module-git module-info modules null use.own
Even though your specific modulepaths are correctly set in MODULEPATH environment variable, module avail does not return any modulefiles for these directories. It means module has not found a file in these directories that is a modulefile.
So I suggest you to:
check if your mountpoint is correctly mounted
verify that the files in the directories are modulefiles compatible with the module command you use (on the module command version you use, the content of a modulefile should start with the #%Module magic cookie)

Gnome Software shows error even though everything is fine

When refreshing updates tab I get either
[core]: Unexpected system error
[core]: invalid or corrupted database (PGP signature)
I am using Arch Linux with Gnome 3.18. here is my pacman config
#
# /etc/pacman.conf
#
# See the pacman.conf(5) manpage for option and repository directives
#
# GENERAL OPTIONS
#
[options]
# The following paths are commented out with their default values listed.
# If you wish to use different paths, uncomment and update the paths.
#RootDir = /
#DBPath = /var/lib/pacman/
#CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
#CleanMethod = KeepInstalled
#UseDelta = 0.7
Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
#IgnorePkg =
#IgnoreGroup =
#NoUpgrade =
#NoExtract =
# Misc options
#UseSyslog
#Color
#TotalDownload
CheckSpace
#VerbosePkgLists
# By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages.
SigLevel = Required DatabaseOptional
LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required
# NOTE: You must run `pacman-key --init` before first using pacman; the local
# keyring can then be populated with the keys of all official Arch Linux
# packagers with `pacman-key --populate archlinux`.
#
# REPOSITORIES
# - can be defined here or included from another file
# - pacman will search repositories in the order defined here
# - local/custom mirrors can be added here or in separate files
# - repositories listed first will take precedence when packages
# have identical names, regardless of version number
# - URLs will have $repo replaced by the name of the current repo
# - URLs will have $arch replaced by the name of the architecture
#
# Repository entries are of the format:
# [repo-name]
# Server = ServerName
# Include = IncludePath
#
# The header [repo-name] is crucial - it must be present and
# uncommented to enable the repo.
#
# The testing repositories are disabled by default. To enable, uncomment the
# repo name header and Include lines. You can add preferred servers immediately
# after the header, and they will be used before the default mirrors.
#[testing]
#Include = /etc/pacman.d/mirrorlist
[core]
Include = /etc/pacman.d/mirrorlist
[extra]
Include = /etc/pacman.d/mirrorlist
#[community-testing]
#Include = /etc/pacman.d/mirrorlist
[community]
Include = /etc/pacman.d/mirrorlist
# If you want to run 32 bit applications on your x86_64 system,
# enable the multilib repositories as required here.
#[multilib-testing]
#Include = /etc/pacman.d/mirrorlist
[multilib]
Include = /etc/pacman.d/mirrorlist
# An example of a custom package repository. See the pacman manpage for
# tips on creating your own repositories.
#[custom]
#SigLevel = Optional TrustAll
#Server = file:///home/custompkgs
[archlinuxfr]
SigLevel = Never
Server = http://repo.archlinux.fr/$arch
[infinality-bundle]
SigLevel = Never
Server = http://bohoomil.com/repo/$arch
[infinality-bundle-multilib]
SigLevel = Never
Server = http://bohoomil.com/repo/multilib/$arch
[infinality-bundle-fonts]
SigLevel = Never
Server = http://bohoomil.com/repo/fonts
And when updating with pacman there are no issues.
I have no issues to use terminal, I just want to know what is the problem with gnome software and is it a false positive error or there is some issues indeed with my repos?

CPM failed to get the hash for HEAD

I'm trying to use CMake for the following CMakeLists.txt:
cmake_minimum_required(VERSION 2.8.0 FATAL_ERROR)
#-----------------------------------------------------------------------
# CPM configuration
#-----------------------------------------------------------------------
set(CPM_MODULE_NAME "arc_ball")
set(CPM_LIB_TARGET_NAME ${CPM_MODULE_NAME})
if ((DEFINED CPM_DIR) AND (DEFINED CPM_UNIQUE_ID) AND (DEFINED CPM_TARGET_NAME))
set(CPM_LIB_TARGET_NAME ${CPM_TARGET_NAME})
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CPM_DIR})
include(CPM)
else()
set (CPM_DIR "${CMAKE_CURRENT_BINARY_DIR}/cpm-packages" CACHE TYPE STRING)
find_package(Git)
if(NOT GIT_FOUND)
message(FATAL_ERROR "CPM requires Git.")
endif()
if (NOT EXISTS ${CPM_DIR}/CPM.cmake)
message(STATUS "Cloning repo (https://github.com/iauns/cpm)")
execute_process(
COMMAND "${GIT_EXECUTABLE}" clone git#github.com:iauns/cpm.git ${CPM_DIR}
RESULT_VARIABLE error_code
OUTPUT_QUIET ERROR_QUIET)
if(error_code)
message(FATAL_ERROR "CPM failed to get the hash for HEAD")
endif()
endif()
include(${CPM_DIR}/CPM.cmake)
endif()
#-----------------------------------------------------------------------
# CPM Modules
#-----------------------------------------------------------------------
# ++ MODULE: GLM
CPM_AddModule("GLM"
GIT_REPOSITORY "https://github.com/iauns/cpm-glm"
GIT_TAG "1.0.2"
USE_EXISTING_VER TRUE
EXPORT_MODULE TRUE # Use EXPORT_MODULE sparingly. We expose GLM's interface
) # through our own interface hence why we export it.
# This call will ensure all include directories and definitions are present
# in the target. These correspond to the modules that we added above.
CPM_InitModule(${CPM_MODULE_NAME})
#-----------------------------------------------------------------------
# Source
#-----------------------------------------------------------------------
# Globbing has some downsides, but the advantages outweigh the
# disadvantages.
file (GLOB Sources
"arc-ball/*.cpp"
"arc-ball/*.hpp"
)
#-----------------------------------------------------------------------
# Library setup
#-----------------------------------------------------------------------
# Build the library.
add_library(${CPM_LIB_TARGET_NAME} ${Sources})
if (NOT EMSCRIPTEN AND CPM_LIBRARIES)
target_link_libraries(${CPM_LIB_TARGET_NAME} ${CPM_LIBRARIES})
endif()
My system is windows 8.1 and I'm using Visual Studio 11 2012 as a generator. However when I try to configure, I get the following error:
CMake Error at
CMakeLists.txt:26 (message):
CPM failed to get the hash for HEAD
Meaning that it's happening on that line:COMMAND "${GIT_EXECUTABLE}" clone git#github.com:iauns/cpm.git ${CPM_DIR}
Would anyone know why?
Thanks in advance for the help.

Resources