2016-04-05 11 views
5

Quando ho provato localhost nell'impostazione url all'interno dell'impostazione di influxdb di kapacitor.conf, sono in grado di ricevere gli avvisi correttamente.Non è possibile ottenere avvisi quando kapacitor è configurato per il nodo influxdb remoto

Ma quando ho cercato di indirizzare l'url in qualche posizione remota nella sezione di configurazione dell'infludb, non sono in grado di ricevere alcun avviso e tutto.

Qualcuno può aiutarmi?

Qui di seguito troverete il file kapacitor.conf:

# The hostname of this node. 
# Must be resolvable by any configured InfluxDB hosts. 
hostname = "localhost" 
# Directory for storing a small amount of metadata about the server. 
data_dir = "/var/lib/kapacitor" 

[http] 
    # HTTP API Server for Kapacitor 
    # This server is always on, 
    # it servers both as a write endpoint 
    # and as the API endpoint for all other 
    # Kapacitor calls. 
    bind-address = ":9092" 
    auth-enabled = false 
    log-enabled = true 
    write-tracing = false 
    pprof-enabled = false 
    https-enabled = false 
    https-certificate = "/etc/ssl/kapacitor.pem" 

[logging] 
    # Destination for logs 
    # Can be a path to a file or 'STDOUT', 'STDERR'. 
    file = "/var/log/kapacitor/kapacitor.log" 
    # Logging level can be one of: 
    # DEBUG, INFO, WARN, ERROR, or OFF 
    level = "INFO" 

[replay] 
    # Where to store replay files, aka recordings. 
    dir = "/var/lib/kapacitor/replay" 

[task] 
    # Where to store the tasks database 
    dir = "/var/lib/kapacitor/tasks" 
    # How often to snapshot running task state. 
    snapshot-interval = "60s" 

[deadman] 
    # Configure a deadman's switch 
    # Globally configure deadman's switches on all stream tasks. 
    # NOTE: for this to be of use you must also globally configure at least one alerting method. 
    global = false 
    # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold. 
    threshold = 0.0 
    # Interval, if globally configured the frequency at which to check the throughput. 
    interval = "10s" 
    # Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored. 
    id = "node 'NODE_NAME' in task '{{ .TaskName }}'" 
    # The message of the alert. INTERVAL will be replaced by the interval. 
    message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL." 


# Multiple InfluxDB configurations can be defined. 
# Exactly one must be marked as the default. 
# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes. 
[[influxdb]] 
    # Connect to an InfluxDB cluster 
    # Kapacitor can subscribe, query and write to this cluster. 
    # Using InfluxDB is not required and can be disabled. 
    enabled = true 
    default = true 
    name = "localhost" 
    urls = ["http://remote_ip:8086"] 
    #urls = ["http://localhost:8086"] 
    username = "" 
    password = "" 
    timeout = 0 
    # Absolute path to pem encoded CA file. 
    # A CA can be provided without a key/cert pair 
    # ssl-ca = "/etc/kapacitor/ca.pem" 
    # Absolutes paths to pem encoded key and cert files. 
    # ssl-cert = "/etc/kapacitor/cert.pem" 
    # ssl-key = "/etc/kapacitor/key.pem" 

    # Do not verify the TLS/SSL certificate. 
    # This is insecure. 
    insecure-skip-verify = false 

    # Subscriptions use the UDP network protocl. 
    # The following options of for the created UDP listeners for each subscription. 
    # Number of packets to buffer when reading packets off the socket. 
    udp-buffer = 1000 
    # The size in bytes of the OS read buffer for the UDP socket. 
    # A value of 0 indicates use the OS default. 
    udp-read-buffer = 0 

    [influxdb.subscriptions] 
    # Set of databases and retention policies to subscribe to. 
    # If empty will subscribe to all, minus the list in 
    # influxdb.excluded-subscriptions 
    # 
    # Format 
    # db_name = <list of retention policies> 
    # 
    # Example: 
    # my_database = [ "default", "longterm" ] 
    [influxdb.excluded-subscriptions] 
    # Set of databases and retention policies to exclude from the subscriptions. 
    # If influxdb.subscriptions is empty it will subscribe to all 
    # except databases listed here. 
    # 
    # Format 
    # db_name = <list of retention policies> 
    # 
    # Example: 
    # my_database = [ "default", "longterm" ] 

[smtp] 
    # Configure an SMTP email server 
    # Will use TLS and authentication if possible 
    # Only necessary for sending emails from alerts. 
    enabled = false 
    host = "localhost" 
    port = 25 
    username = "" 
    password = "" 
    # From address for outgoing mail 
    from = "" 
    # List of default To addresses. 
    # to = ["[email protected]"] 

    # Skip TLS certificate verify when connecting to SMTP server 
    no-verify = false 
    # Close idle connections after timeout 
    idle-timeout = "30s" 

    # If true the all alerts will be sent via Email 
    # without explicity marking them in the TICKscript. 
    global = false 
    # Only applies if global is true. 
    # Sets all alerts in state-changes-only mode, 
    # meaning alerts will only be sent if the alert state changes. 
    state-changes-only = false 

[opsgenie] 
    # Configure OpsGenie with your API key and default routing key. 
    enabled = false 
    # Your OpsGenie API Key. 
    api-key = "" 
    # Default OpsGenie teams, can be overridden per alert. 
    # teams = ["team1", "team2"] 
    # Default OpsGenie recipients, can be overridden per alert. 
    # recipients = ["recipient1", "recipient2"] 
    # The OpsGenie API URL should not need to be changed. 
    url = "https://api.opsgenie.com/v1/json/alert" 
    # The OpsGenie Recovery URL, you can change this 
    # based on which behavior you want a recovery to 
    # trigger (Add Notes, Close Alert, etc.) 
    recovery_url = "https://api.opsgenie.com/v1/json/alert/note" 
    # If true then all alerts will be sent to OpsGenie 
    # without explicity marking them in the TICKscript. 
    # The team and recipients can still be overridden. 
    global = false 

[victorops] 
    # Configure VictorOps with your API key and default routing key. 
    enabled = false 
    # Your VictorOps API Key. 
    api-key = "" 
    # Default VictorOps routing key, can be overridden per alert. 
    routing-key = "" 
    # The VictorOps API URL should not need to be changed. 
    url = "https://alert.victorops.com/integrations/generic/20131114/alert" 
    # If true the all alerts will be sent to VictorOps 
    # without explicity marking them in the TICKscript. 
    # The routing key can still be overridden. 
    global = false 

[pagerduty] 
    # Configure PagerDuty. 
    enabled = false 
    # Your PagerDuty Service Key. 
    service-key = "" 
    # The PagerDuty API URL should not need to be changed. 
    url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" 
    # If true the all alerts will be sent to PagerDuty 
    # without explicity marking them in the TICKscript. 
    global = false 

[slack] 
    # Configure Slack. 
    enabled = false 
    # The Slack webhook URL, can be obtained by addding 
    # an Incoming Webhook integration. 
    # Visit https://slack.com/services/new/incoming-webhook 
    # to add new webhook for Kapacitor. 
    #url = "test_hook" 
    # Default channel for messages 
    channel = "test-alerts" 
    # If true the all alerts will be sent to Slack 
    # without explicity marking them in the TICKscript. 
    global = false 
    # Only applies if global is true. 
    # Sets all alerts in state-changes-only mode, 
    # meaning alerts will only be sent if the alert state changes. 
    state-changes-only = false 

[hipchat] 
    # Configure HipChat. 
    enabled = false 
    # The HipChat API URL. Replace subdomain with your 
    # HipChat subdomain. 
    url = "https://subdomain.hipchat.com/v2/room" 
    # Visit https://www.hipchat.com/docs/apiv2 
    # for information on obtaining your room id and 
    # authentication token. 
    # Default room for messages 
    room = "" 
    # Default authentication token 
    token = "" 
    # If true then all alerts will be sent to HipChat 
    # without explicitly marking them in the TICKscript. 
    global = false 
    # Only applies if global is true. 
    # Sets all alerts in state-changes-only mode, 
    # meaning alerts will only be sent if the alert state changes. 
    state-changes-only = false 

[alerta] 
    # Configure Alerta. 
    enabled = false 
    # The Alerta URL. 
    url = "" 
    # Default authentication token. 
    token = "" 
    # Default environment. 
    environment = "" 
    # Default origin. 
    origin = "kapacitor" 

[sensu] 
    # Configure Sensu. 
    enabled = false 
    # The Sensu Client host:port address. 
    addr = "sensu-client:3030" 
    # Default JIT source. 
    source = "Kapacitor" 

[reporting] 
    # Send anonymous usage statistics 
    # every 12 hours to Enterprise. 
    enabled = true 
    url = "https://usage.influxdata.com" 

[stats] 
    # Emit internal statistics about Kapacitor. 
    # To consume these stats create a stream task 
    # that selects data from the configured database 
    # and retention policy. 
    # 
    # Example: 
    # stream.from().database('_kapacitor').retentionPolicy('default')... 
    # 
    enabled = true 
    stats-interval = "10s" 
    database = "_kapacitor" 
    retention-policy= "default" 

[udf] 
# Configuration for UDFs (User Defined Functions) 
[udf.functions] 
    # Example go UDF. 
    # First compile example: 
    # go build -o avg_udf ./udf/agent/examples/moving_avg.go 
    # 
    # Use in TICKscript like: 
    # stream.goavg() 
    #   .field('value') 
    #   .size(10) 
    #   .as('m_average') 
    # 
    # uncomment to enable 
    #[udf.functions.goavg] 
    # prog = "./avg_udf" 
    # args = [] 
    # timeout = "10s" 

    # Example python UDF. 
    # Use in TICKscript like: 
    # stream.pyavg() 
    #   .field('value') 
    #   .size(10) 
    #   .as('m_average') 
    # 
    # uncomment to enable 
    #[udf.functions.pyavg] 
    # prog = "/usr/bin/python2" 
    # args = ["-u", "./udf/agent/examples/moving_avg.py"] 
    # timeout = "10s" 
    # [udf.functions.pyavg.env] 
    #  PYTHONPATH = "./udf/agent/py" 

[talk] 
    # Configure Talk. 
    enabled = false 
    # The Talk webhook URL. 
    url = "https://jianliao.com/v2/services/webhook/uuid" 
    # The default authorName. 
    author_name = "Kapacitor" 

################################## 
# Input Methods, same as InfluxDB 
# 

[collectd] 
    enabled = false 
    bind-address = ":25826" 
    database = "collectd" 
    retention-policy = "" 
    batch-size = 1000 
    batch-pending = 5 
    batch-timeout = "10s" 
    typesdb = "/usr/share/collectd/types.db" 

[opentsdb] 
    enabled = false 
    bind-address = ":4242" 
    database = "opentsdb" 
    retention-policy = "" 
    consistency-level = "one" 
    tls-enabled = false 
    certificate = "/etc/ssl/influxdb.pem" 
    batch-size = 1000 
    batch-pending = 5 
    batch-timeout = "1s" 
+0

La seconda riga dei file di configurazione di esempio dice "Deve essere risolvibile da qualsiasi host InfluxDB configurato". Questa è la chiave. –

risposta

4

mia ipotesi: nome host deve essere risolvibile dall'istanza InfluxDB. Passa da localhost al nome host/indirizzo IP della tua macchina kapacitor.

+1

L'esecuzione di SHOW SUBSCRIPTIONS su InfluxDb mostrerà gli abbonamenti Kapacitor e il relativo URL. Questi URL dovrebbero essere accessibili dal server InfluxDb. –

Problemi correlati