From 6e6ed075dc63593b0b48a4c4913f9d983d2724d5 Mon Sep 17 00:00:00 2001
From: Patrick O'Brien <poblahblahblah@users.noreply.github.com>
Date: Tue, 19 Sep 2017 11:46:01 -0700
Subject: [PATCH] Add new nginx_plus input plugin (#3214)

---
 plugins/inputs/all/all.go                    |   1 +
 plugins/inputs/nginx_plus/README.md          | 124 ++++
 plugins/inputs/nginx_plus/nginx_plus.go      | 568 +++++++++++++++++++
 plugins/inputs/nginx_plus/nginx_plus_test.go | 413 ++++++++++++++
 4 files changed, 1106 insertions(+)
 create mode 100644 plugins/inputs/nginx_plus/README.md
 create mode 100644 plugins/inputs/nginx_plus/nginx_plus.go
 create mode 100644 plugins/inputs/nginx_plus/nginx_plus_test.go

diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 955c3a2c..7281f5b7 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -53,6 +53,7 @@ import (
 	_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
 	_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
 	_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
+	_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
 	_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
 	_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
 	_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
diff --git a/plugins/inputs/nginx_plus/README.md b/plugins/inputs/nginx_plus/README.md
new file mode 100644
index 00000000..bfa9593c
--- /dev/null
+++ b/plugins/inputs/nginx_plus/README.md
@@ -0,0 +1,124 @@
+# Telegraf Plugin: nginx_plus
+
+Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/).
+
+Structures for Nginx Plus have been built based on history of
+[status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+### Configuration:
+
+```
+# Read Nginx Plus' advanced status information
+[[inputs.nginx_plus]]
+  ## An array of Nginx status URIs to gather stats.
+  urls = ["http://localhost/status"]
+```
+
+### Measurements & Fields:
+
+- nginx_plus_processes
+  - respawned
+- nginx_plus_connections
+  - accepted
+  - dropped
+  - active
+  - idle
+- nginx_plus_ssl
+  - handshakes
+  - handshakes_failed
+  - session_reuses
+- nginx_plus_requests
+  - total
+  - current
+- nginx_plus_upstream, nginx_plus_stream_upstream
+  - keepalive
+  - zombies
+- nginx_plus_upstream_peer, nginx_plus_stream_upstream_peer
+  - requests
+  - unavail
+  - healthchecks_checks
+  - header_time
+  - response_time
+  - state
+  - active
+  - downstart
+  - healthchecks_last_passed
+  - weight
+  - responses_1xx
+  - responses_2xx
+  - responses_3xx
+  - responses_4xx
+  - responses_5xx
+  - received
+  - selected
+  - healthchecks_fails
+  - healthchecks_unhealthy
+  - backup
+  - responses_total
+  - sent
+  - fails
+  - downtime
+
+
+### Tags:
+
+- nginx_plus_processes, nginx_plus_connections, nginx_plus_ssl, nginx_plus_requests
+  - server
+  - port
+
+- nginx_plus_upstream, nginx_plus_stream_upstream
+  - upstream
+  - server
+  - port
+
+- nginx_plus_upstream_peer, nginx_plus_stream_upstream_peer
+  - id
+  - upstream
+  - server
+  - port
+  - upstream_address
+
+### Example Output:
+
+Using this configuration:
+```
+[[inputs.nginx_plus]]
+  ## An array of Nginx Plus status URIs to gather stats.
+  urls = ["http://localhost/status"]
+```
+
+When run with:
+```
+./telegraf -config telegraf.conf -input-filter nginx_plus -test
+```
+
+It produces:
+```
+* Plugin: inputs.nginx_plus, Collection 1
+> nginx_plus_processes,server=localhost,port=12021,host=word.local respawned=0i 1505782513000000000
+> nginx_plus_connections,server=localhost,port=12021,host=word.local accepted=5535735212i,dropped=10140186i,active=9541i,idle=67540i 1505782513000000000
+> nginx_plus_ssl,server=localhost,port=12021,host=word.local handshakes=0i,handshakes_failed=0i,session_reuses=0i 1505782513000000000
+> nginx_plus_requests,server=localhost,port=12021,host=word.local total=186780541173i,current=9037i 1505782513000000000
+> nginx_plus_upstream,port=12021,host=word.local,upstream=dataserver80,server=localhost keepalive=0i,zombies=0i 1505782513000000000
+> nginx_plus_upstream_peer,upstream=dataserver80,upstream_address=10.10.102.181:80,id=0,server=localhost,port=12021,host=word.local sent=53806910399i,received=7516943964i,fails=207i,downtime=2325979i,selected=1505782512000i,backup=false,active=6i,responses_4xx=6935i,header_time=80i,response_time=80i,healthchecks_last_passed=true,responses_1xx=0i,responses_2xx=36299890i,responses_5xx=360450i,responses_total=36667275i,unavail=154i,downstart=0i,state="up",requests=36673741i,responses_3xx=0i,healthchecks_unhealthy=5i,weight=1i,healthchecks_checks=177209i,healthchecks_fails=29i 1505782513000000000
+> nginx_plus_stream_upstream,server=localhost,port=12021,host=word.local,upstream=dataserver443 zombies=0i 1505782513000000000
+> nginx_plus_stream_upstream_peer,server=localhost,upstream_address=10.10.102.181:443,id=0,port=12021,host=word.local,upstream=dataserver443 active=1i,healthchecks_unhealthy=1i,weight=1i,unavail=0i,connect_time=24i,first_byte_time=78i,healthchecks_last_passed=true,state="up",sent=4457713140i,received=698065272i,fails=0i,healthchecks_checks=178421i,downstart=0i,selected=1505782512000i,response_time=5156i,backup=false,connections=56251i,healthchecks_fails=20i,downtime=391017i 1505782513000000000
+```
+
+### Reference material
+
+Subsequent versions of status response structure available here:
+
+- [version 1](http://web.archive.org/web/20130805111222/http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+- [version 2](http://web.archive.org/web/20131218101504/http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+- version 3 - not available
+
+- [version 4](http://web.archive.org/web/20141218170938/http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+- [version 5](http://web.archive.org/web/20150414043916/http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+- [version 6](http://web.archive.org/web/20150918163811/http://nginx.org/en/docs/http/ngx_http_status_module.html)
+
+- [version 7](http://web.archive.org/web/20161107221028/http://nginx.org/en/docs/http/ngx_http_status_module.html)
diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go
new file mode 100644
index 00000000..60fe55a5
--- /dev/null
+++ b/plugins/inputs/nginx_plus/nginx_plus.go
@@ -0,0 +1,568 @@
+package nginx_plus
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/influxdata/telegraf"
+	"github.com/influxdata/telegraf/internal"
+	"github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type NginxPlus struct {
+	Urls []string
+
+	client *http.Client
+
+	ResponseTimeout internal.Duration
+}
+
+var sampleConfig = `
+  ## An array of ngx_http_status_module or status URI to gather stats.
+  urls = ["http://localhost/status"]
+
+  # HTTP response timeout (default: 5s)
+  response_timeout = "5s"
+`
+
+func (n *NginxPlus) SampleConfig() string {
+	return sampleConfig
+}
+
+func (n *NginxPlus) Description() string {
+	return "Read Nginx Plus' full status information (ngx_http_status_module)"
+}
+
+func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
+	var wg sync.WaitGroup
+
+	// Create an HTTP client that is re-used for each
+	// collection interval
+
+	if n.client == nil {
+		client, err := n.createHttpClient()
+		if err != nil {
+			return err
+		}
+		n.client = client
+	}
+
+	for _, u := range n.Urls {
+		addr, err := url.Parse(u)
+		if err != nil {
+			acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
+		}
+
+		wg.Add(1)
+		go func(addr *url.URL) {
+			defer wg.Done()
+			acc.AddError(n.gatherUrl(addr, acc))
+		}(addr)
+	}
+
+	wg.Wait()
+	return nil
+}
+
+func (n *NginxPlus) createHttpClient() (*http.Client, error) {
+
+	if n.ResponseTimeout.Duration < time.Second {
+		n.ResponseTimeout.Duration = time.Second * 5
+	}
+
+	client := &http.Client{
+		Transport: &http.Transport{},
+		Timeout:   n.ResponseTimeout.Duration,
+	}
+
+	return client, nil
+}
+
+func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
+	resp, err := n.client.Get(addr.String())
+
+	if err != nil {
+		return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
+	}
+	contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0]
+	switch contentType {
+	case "application/json":
+		return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc)
+	default:
+		return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType)
+	}
+}
+
+func getTags(addr *url.URL) map[string]string {
+	h := addr.Host
+	host, port, err := net.SplitHostPort(h)
+	if err != nil {
+		host = addr.Host
+		if addr.Scheme == "http" {
+			port = "80"
+		} else if addr.Scheme == "https" {
+			port = "443"
+		} else {
+			port = ""
+		}
+	}
+	return map[string]string{"server": host, "port": port}
+}
+
+type ResponseStats struct {
+	Responses1xx int64 `json:"1xx"`
+	Responses2xx int64 `json:"2xx"`
+	Responses3xx int64 `json:"3xx"`
+	Responses4xx int64 `json:"4xx"`
+	Responses5xx int64 `json:"5xx"`
+	Total        int64 `json:"total"`
+}
+
+type BasicHitStats struct {
+	Responses int64 `json:"responses"`
+	Bytes     int64 `json:"bytes"`
+}
+
+type ExtendedHitStats struct {
+	BasicHitStats
+	ResponsesWritten int64 `json:"responses_written"`
+	BytesWritten     int64 `json:"bytes_written"`
+}
+
+type HealthCheckStats struct {
+	Checks     int64 `json:"checks"`
+	Fails      int64 `json:"fails"`
+	Unhealthy  int64 `json:"unhealthy"`
+	LastPassed *bool `json:"last_passed"`
+}
+
+type Status struct {
+	Version       int    `json:"version"`
+	NginxVersion  string `json:"nginx_version"`
+	Address       string `json:"address"`
+	Generation    *int   `json:"generation"`     // added in version 5
+	LoadTimestamp *int64 `json:"load_timestamp"` // added in version 2
+	Timestamp     int64  `json:"timestamp"`
+	Pid           *int   `json:"pid"` // added in version 6
+
+	Processes *struct { // added in version 5
+		Respawned *int `json:"respawned"`
+	} `json:"processes"`
+
+	Connections struct {
+		Accepted int `json:"accepted"`
+		Dropped  int `json:"dropped"`
+		Active   int `json:"active"`
+		Idle     int `json:"idle"`
+	} `json:"connections"`
+
+	Ssl *struct { // added in version 6
+		Handshakes       int64 `json:"handshakes"`
+		HandshakesFailed int64 `json:"handshakes_failed"`
+		SessionReuses    int64 `json:"session_reuses"`
+	} `json:"ssl"`
+
+	Requests struct {
+		Total   int64 `json:"total"`
+		Current int   `json:"current"`
+	} `json:"requests"`
+
+	ServerZones map[string]struct { // added in version 2
+		Processing int           `json:"processing"`
+		Requests   int64         `json:"requests"`
+		Responses  ResponseStats `json:"responses"`
+		Discarded  *int64        `json:"discarded"` // added in version 6
+		Received   int64         `json:"received"`
+		Sent       int64         `json:"sent"`
+	} `json:"server_zones"`
+
+	Upstreams map[string]struct {
+		Peers []struct {
+			ID           *int             `json:"id"` // added in version 3
+			Server       string           `json:"server"`
+			Backup       bool             `json:"backup"`
+			Weight       int              `json:"weight"`
+			State        string           `json:"state"`
+			Active       int              `json:"active"`
+			Keepalive    *int             `json:"keepalive"` // removed in version 5
+			MaxConns     *int             `json:"max_conns"` // added in version 3
+			Requests     int64            `json:"requests"`
+			Responses    ResponseStats    `json:"responses"`
+			Sent         int64            `json:"sent"`
+			Received     int64            `json:"received"`
+			Fails        int64            `json:"fails"`
+			Unavail      int64            `json:"unavail"`
+			HealthChecks HealthCheckStats `json:"health_checks"`
+			Downtime     int64            `json:"downtime"`
+			Downstart    int64            `json:"downstart"`
+			Selected     *int64           `json:"selected"`      // added in version 4
+			HeaderTime   *int64           `json:"header_time"`   // added in version 5
+			ResponseTime *int64           `json:"response_time"` // added in version 5
+		} `json:"peers"`
+		Keepalive int       `json:"keepalive"`
+		Zombies   int       `json:"zombies"` // added in version 6
+		Queue     *struct { // added in version 6
+			Size      int   `json:"size"`
+			MaxSize   int   `json:"max_size"`
+			Overflows int64 `json:"overflows"`
+		} `json:"queue"`
+	} `json:"upstreams"`
+
+	Caches map[string]struct { // added in version 2
+		Size        int64            `json:"size"`
+		MaxSize     int64            `json:"max_size"`
+		Cold        bool             `json:"cold"`
+		Hit         BasicHitStats    `json:"hit"`
+		Stale       BasicHitStats    `json:"stale"`
+		Updating    BasicHitStats    `json:"updating"`
+		Revalidated *BasicHitStats   `json:"revalidated"` // added in version 3
+		Miss        ExtendedHitStats `json:"miss"`
+		Expired     ExtendedHitStats `json:"expired"`
+		Bypass      ExtendedHitStats `json:"bypass"`
+	} `json:"caches"`
+
+	Stream struct {
+		ServerZones map[string]struct {
+			Processing  int            `json:"processing"`
+			Connections int            `json:"connections"`
+			Sessions    *ResponseStats `json:"sessions"`
+			Discarded   *int64         `json:"discarded"` // added in version 7
+			Received    int64          `json:"received"`
+			Sent        int64          `json:"sent"`
+		} `json:"server_zones"`
+		Upstreams map[string]struct {
+			Peers []struct {
+				ID            int              `json:"id"`
+				Server        string           `json:"server"`
+				Backup        bool             `json:"backup"`
+				Weight        int              `json:"weight"`
+				State         string           `json:"state"`
+				Active        int              `json:"active"`
+				Connections   int64            `json:"connections"`
+				ConnectTime   *int             `json:"connect_time"`
+				FirstByteTime *int             `json:"first_byte_time"`
+				ResponseTime  *int             `json:"response_time"`
+				Sent          int64            `json:"sent"`
+				Received      int64            `json:"received"`
+				Fails         int64            `json:"fails"`
+				Unavail       int64            `json:"unavail"`
+				HealthChecks  HealthCheckStats `json:"health_checks"`
+				Downtime      int64            `json:"downtime"`
+				Downstart     int64            `json:"downstart"`
+				Selected      int64            `json:"selected"`
+			} `json:"peers"`
+			Zombies int `json:"zombies"`
+		} `json:"upstreams"`
+	} `json:"stream"`
+}
+
+func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error {
+	dec := json.NewDecoder(r)
+	status := &Status{}
+	if err := dec.Decode(status); err != nil {
+		return fmt.Errorf("Error while decoding JSON response")
+	}
+	status.Gather(tags, acc)
+	return nil
+}
+
+func (s *Status) Gather(tags map[string]string, acc telegraf.Accumulator) {
+	s.gatherProcessesMetrics(tags, acc)
+	s.gatherConnectionsMetrics(tags, acc)
+	s.gatherSslMetrics(tags, acc)
+	s.gatherRequestMetrics(tags, acc)
+	s.gatherZoneMetrics(tags, acc)
+	s.gatherUpstreamMetrics(tags, acc)
+	s.gatherCacheMetrics(tags, acc)
+	s.gatherStreamMetrics(tags, acc)
+}
+
+func (s *Status) gatherProcessesMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	var respawned int
+
+	if s.Processes.Respawned != nil {
+		respawned = *s.Processes.Respawned
+	}
+
+	acc.AddFields(
+		"nginx_plus_processes",
+		map[string]interface{}{
+			"respawned": respawned,
+		},
+		tags,
+	)
+
+}
+
+func (s *Status) gatherConnectionsMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	acc.AddFields(
+		"nginx_plus_connections",
+		map[string]interface{}{
+			"accepted": s.Connections.Accepted,
+			"dropped":  s.Connections.Dropped,
+			"active":   s.Connections.Active,
+			"idle":     s.Connections.Idle,
+		},
+		tags,
+	)
+}
+
+func (s *Status) gatherSslMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	acc.AddFields(
+		"nginx_plus_ssl",
+		map[string]interface{}{
+			"handshakes":        s.Ssl.Handshakes,
+			"handshakes_failed": s.Ssl.HandshakesFailed,
+			"session_reuses":    s.Ssl.SessionReuses,
+		},
+		tags,
+	)
+}
+
+func (s *Status) gatherRequestMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	acc.AddFields(
+		"nginx_plus_requests",
+		map[string]interface{}{
+			"total":   s.Requests.Total,
+			"current": s.Requests.Current,
+		},
+		tags,
+	)
+}
+
+func (s *Status) gatherZoneMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	for zoneName, zone := range s.ServerZones {
+		zoneTags := map[string]string{}
+		for k, v := range tags {
+			zoneTags[k] = v
+		}
+		zoneTags["zone"] = zoneName
+		acc.AddFields(
+			"nginx_plus_zone",
+			func() map[string]interface{} {
+				result := map[string]interface{}{
+					"processing":      zone.Processing,
+					"requests":        zone.Requests,
+					"responses_1xx":   zone.Responses.Responses1xx,
+					"responses_2xx":   zone.Responses.Responses2xx,
+					"responses_3xx":   zone.Responses.Responses3xx,
+					"responses_4xx":   zone.Responses.Responses4xx,
+					"responses_5xx":   zone.Responses.Responses5xx,
+					"responses_total": zone.Responses.Total,
+					"received":        zone.Received,
+					"sent":            zone.Sent,
+				}
+				if zone.Discarded != nil {
+					result["discarded"] = *zone.Discarded
+				}
+				return result
+			}(),
+			zoneTags,
+		)
+	}
+}
+
+func (s *Status) gatherUpstreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	for upstreamName, upstream := range s.Upstreams {
+		upstreamTags := map[string]string{}
+		for k, v := range tags {
+			upstreamTags[k] = v
+		}
+		upstreamTags["upstream"] = upstreamName
+		upstreamFields := map[string]interface{}{
+			"keepalive": upstream.Keepalive,
+			"zombies":   upstream.Zombies,
+		}
+		if upstream.Queue != nil {
+			upstreamFields["queue_size"] = upstream.Queue.Size
+			upstreamFields["queue_max_size"] = upstream.Queue.MaxSize
+			upstreamFields["queue_overflows"] = upstream.Queue.Overflows
+		}
+		acc.AddFields(
+			"nginx_plus_upstream",
+			upstreamFields,
+			upstreamTags,
+		)
+		for _, peer := range upstream.Peers {
+			var selected int64
+
+			if peer.Selected != nil {
+				selected = *peer.Selected
+			}
+
+			peerFields := map[string]interface{}{
+				"backup":                 peer.Backup,
+				"weight":                 peer.Weight,
+				"state":                  peer.State,
+				"active":                 peer.Active,
+				"requests":               peer.Requests,
+				"responses_1xx":          peer.Responses.Responses1xx,
+				"responses_2xx":          peer.Responses.Responses2xx,
+				"responses_3xx":          peer.Responses.Responses3xx,
+				"responses_4xx":          peer.Responses.Responses4xx,
+				"responses_5xx":          peer.Responses.Responses5xx,
+				"responses_total":        peer.Responses.Total,
+				"sent":                   peer.Sent,
+				"received":               peer.Received,
+				"fails":                  peer.Fails,
+				"unavail":                peer.Unavail,
+				"healthchecks_checks":    peer.HealthChecks.Checks,
+				"healthchecks_fails":     peer.HealthChecks.Fails,
+				"healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
+				"downtime":               peer.Downtime,
+				"downstart":              peer.Downstart,
+				"selected":               selected,
+			}
+			if peer.HealthChecks.LastPassed != nil {
+				peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed
+			}
+			if peer.HeaderTime != nil {
+				peerFields["header_time"] = *peer.HeaderTime
+			}
+			if peer.ResponseTime != nil {
+				peerFields["response_time"] = *peer.ResponseTime
+			}
+			if peer.MaxConns != nil {
+				peerFields["max_conns"] = *peer.MaxConns
+			}
+			peerTags := map[string]string{}
+			for k, v := range upstreamTags {
+				peerTags[k] = v
+			}
+			peerTags["upstream_address"] = peer.Server
+			if peer.ID != nil {
+				peerTags["id"] = strconv.Itoa(*peer.ID)
+			}
+			acc.AddFields("nginx_plus_upstream_peer", peerFields, peerTags)
+		}
+	}
+}
+
+func (s *Status) gatherCacheMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	for cacheName, cache := range s.Caches {
+		cacheTags := map[string]string{}
+		for k, v := range tags {
+			cacheTags[k] = v
+		}
+		cacheTags["cache"] = cacheName
+		acc.AddFields(
+			"nginx_plus_cache",
+			map[string]interface{}{
+				"size":                      cache.Size,
+				"max_size":                  cache.MaxSize,
+				"cold":                      cache.Cold,
+				"hit_responses":             cache.Hit.Responses,
+				"hit_bytes":                 cache.Hit.Bytes,
+				"stale_responses":           cache.Stale.Responses,
+				"stale_bytes":               cache.Stale.Bytes,
+				"updating_responses":        cache.Updating.Responses,
+				"updating_bytes":            cache.Updating.Bytes,
+				"revalidated_responses":     cache.Revalidated.Responses,
+				"revalidated_bytes":         cache.Revalidated.Bytes,
+				"miss_responses":            cache.Miss.Responses,
+				"miss_bytes":                cache.Miss.Bytes,
+				"miss_responses_written":    cache.Miss.ResponsesWritten,
+				"miss_bytes_written":        cache.Miss.BytesWritten,
+				"expired_responses":         cache.Expired.Responses,
+				"expired_bytes":             cache.Expired.Bytes,
+				"expired_responses_written": cache.Expired.ResponsesWritten,
+				"expired_bytes_written":     cache.Expired.BytesWritten,
+				"bypass_responses":          cache.Bypass.Responses,
+				"bypass_bytes":              cache.Bypass.Bytes,
+				"bypass_responses_written":  cache.Bypass.ResponsesWritten,
+				"bypass_bytes_written":      cache.Bypass.BytesWritten,
+			},
+			cacheTags,
+		)
+	}
+}
+
+func (s *Status) gatherStreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
+	for zoneName, zone := range s.Stream.ServerZones {
+		zoneTags := map[string]string{}
+		for k, v := range tags {
+			zoneTags[k] = v
+		}
+		zoneTags["zone"] = zoneName
+		acc.AddFields(
+			"nginx.stream.zone",
+			map[string]interface{}{
+				"processing":  zone.Processing,
+				"connections": zone.Connections,
+				"received":    zone.Received,
+				"sent":        zone.Sent,
+			},
+			zoneTags,
+		)
+	}
+	for upstreamName, upstream := range s.Stream.Upstreams {
+		upstreamTags := map[string]string{}
+		for k, v := range tags {
+			upstreamTags[k] = v
+		}
+		upstreamTags["upstream"] = upstreamName
+		acc.AddFields(
+			"nginx_plus_stream_upstream",
+			map[string]interface{}{
+				"zombies": upstream.Zombies,
+			},
+			upstreamTags,
+		)
+		for _, peer := range upstream.Peers {
+			peerFields := map[string]interface{}{
+				"backup":                 peer.Backup,
+				"weight":                 peer.Weight,
+				"state":                  peer.State,
+				"active":                 peer.Active,
+				"connections":            peer.Connections,
+				"sent":                   peer.Sent,
+				"received":               peer.Received,
+				"fails":                  peer.Fails,
+				"unavail":                peer.Unavail,
+				"healthchecks_checks":    peer.HealthChecks.Checks,
+				"healthchecks_fails":     peer.HealthChecks.Fails,
+				"healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
+				"downtime":               peer.Downtime,
+				"downstart":              peer.Downstart,
+				"selected":               peer.Selected,
+			}
+			if peer.HealthChecks.LastPassed != nil {
+				peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed
+			}
+			if peer.ConnectTime != nil {
+				peerFields["connect_time"] = *peer.ConnectTime
+			}
+			if peer.FirstByteTime != nil {
+				peerFields["first_byte_time"] = *peer.FirstByteTime
+			}
+			if peer.ResponseTime != nil {
+				peerFields["response_time"] = *peer.ResponseTime
+			}
+			peerTags := map[string]string{}
+			for k, v := range upstreamTags {
+				peerTags[k] = v
+			}
+			peerTags["upstream_address"] = peer.Server
+			peerTags["id"] = strconv.Itoa(peer.ID)
+			acc.AddFields("nginx_plus_stream_upstream_peer", peerFields, peerTags)
+		}
+	}
+}
+
+func init() {
+	inputs.Add("nginx_plus", func() telegraf.Input {
+		return &NginxPlus{}
+	})
+}
diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go
new file mode 100644
index 00000000..3357e1d0
--- /dev/null
+++ b/plugins/inputs/nginx_plus/nginx_plus_test.go
@@ -0,0 +1,413 @@
+package nginx_plus
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"testing"
+
+	"github.com/influxdata/telegraf/testutil"
+	//"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+const sampleStatusResponse = `
+{
+    "version": 6,
+    "nginx_version":  "1.22.333",
+    "address":        "1.2.3.4",
+    "generation":     88,
+    "load_timestamp": 1451606400000,
+    "timestamp":      1451606400000,
+    "pid":            9999,
+    "processes": {
+        "respawned": 9999
+     },
+    "connections": { 
+        "accepted": 1234567890000,
+        "dropped":  2345678900000,
+        "active":   345,
+        "idle":     567
+    },
+    "ssl": {
+        "handshakes":        1234567800000,
+        "handshakes_failed": 5432100000000,
+        "session_reuses":    6543210000000
+    },
+    "requests": {
+        "total":   9876543210000,
+        "current": 98
+    },
+    "server_zones": {
+        "zone.a_80": {
+            "processing": 12,
+            "requests": 34,
+            "responses": {
+                "1xx": 111,
+                "2xx": 222,
+                "3xx": 333,
+                "4xx": 444,
+                "5xx": 555,
+                "total": 999
+            },
+            "discarded": 11,
+            "received": 22,
+            "sent": 33
+        },
+        "zone.a_443": {
+            "processing": 45,
+            "requests": 67,
+            "responses": {
+                "1xx": 1111,
+                "2xx": 2222,
+                "3xx": 3333,
+                "4xx": 4444,
+                "5xx": 5555,
+                "total": 999
+            },
+            "discarded": 44,
+            "received": 55,
+            "sent": 66
+        }
+    },
+    "upstreams": {
+        "first_upstream": {
+            "peers": [
+                {
+                    "id": 0,
+                    "server": "1.2.3.123:80",
+                    "backup": false,
+                    "weight": 1,
+                    "state": "up",
+                    "active": 0,
+                    "requests": 9876,
+                    "responses": {
+                        "1xx": 1111,
+                        "2xx": 2222,
+                        "3xx": 3333,
+                        "4xx": 4444,
+                        "5xx": 5555,
+                        "total": 987654
+                    },
+                    "sent": 987654321,
+                    "received": 87654321,
+                    "fails": 98,
+                    "unavail": 65,
+                    "health_checks": {
+                        "checks": 54,
+                        "fails": 32,
+                        "unhealthy": 21
+                    },
+                    "downtime": 5432,
+                    "downstart": 4321,
+                    "selected": 1451606400000
+                },
+                {
+                    "id": 1,
+                    "server": "1.2.3.123:80",
+                    "backup": true,
+                    "weight": 1,
+                    "state": "up",
+                    "active": 0,
+                    "requests": 8765,
+                    "responses": {
+                        "1xx": 1112,
+                        "2xx": 2223,
+                        "3xx": 3334,
+                        "4xx": 4445,
+                        "5xx": 5556,
+                        "total": 987655
+                    },
+                    "sent": 987654322,
+                    "received": 87654322,
+                    "fails": 99,
+                    "unavail": 88,
+                    "health_checks": {
+                        "checks": 77,
+                        "fails": 66,
+                        "unhealthy": 55
+                    },
+                    "downtime": 5433,
+                    "downstart": 4322,
+                    "selected": 1451606400000
+                }
+            ],
+            "keepalive": 1,
+            "zombies": 2
+        }
+    },
+    "caches": {
+        "cache_01": {
+            "size": 12,
+            "max_size": 23,
+            "cold": false,
+            "hit": {
+                "responses": 34,
+                "bytes": 45
+            },
+            "stale": {
+                "responses": 56,
+                "bytes": 67
+            },
+            "updating": {
+                "responses": 78,
+                "bytes": 89
+            },
+            "revalidated": {
+                "responses": 90,
+                "bytes": 98
+            },
+            "miss": {
+                "responses": 87,
+                "bytes": 76,
+                "responses_written": 65,
+                "bytes_written": 54
+            },
+            "expired": {
+                "responses": 43,
+                "bytes": 32,
+                "responses_written": 21,
+                "bytes_written": 10
+            },
+            "bypass": {
+                "responses": 13,
+                "bytes": 35,
+                "responses_written": 57,
+                "bytes_written": 79
+            }
+        }
+    },
+    "stream": {
+        "server_zones": {
+            "stream.zone.01": {
+                "processing": 24,
+                "connections": 46,
+                "received": 68,
+                "sent": 80
+            },
+            "stream.zone.02": {
+                "processing": 96,
+                "connections": 63,
+                "received": 31,
+                "sent": 25
+            }
+        },
+        "upstreams": {
+            "upstream.01": {
+                "peers": [
+                    {
+                        "id": 0,
+                        "server": "4.3.2.1:2345",
+                        "backup": false,
+                        "weight": 1,
+                        "state": "up",
+                        "active": 0,
+                        "connections": 0,
+                        "sent": 0,
+                        "received": 0,
+                        "fails": 0,
+                        "unavail": 0,
+                        "health_checks": {
+                            "checks": 40848,
+                            "fails": 0,
+                            "unhealthy": 0,
+                            "last_passed": true
+                        },
+                        "downtime": 0,
+                        "downstart": 0,
+                        "selected": 0
+                    },
+                    {
+                        "id": 1,
+                        "server": "5.4.3.2:2345",
+                        "backup": false,
+                        "weight": 1,
+                        "state": "up",
+                        "active": 0,
+                        "connections": 0,
+                        "sent": 0,
+                        "received": 0,
+                        "fails": 0,
+                        "unavail": 0,
+                        "health_checks": {
+                            "checks": 40851,
+                            "fails": 0,
+                            "unhealthy": 0,
+                            "last_passed": true
+                        },
+                        "downtime": 0,
+                        "downstart": 0,
+                        "selected": 0
+                    }
+                ],
+                "zombies": 0
+            }
+        }
+    }
+}
+`
+
+func TestNginxPlusGeneratesMetrics(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		var rsp string
+
+		if r.URL.Path == "/status" {
+			rsp = sampleStatusResponse
+			w.Header()["Content-Type"] = []string{"application/json"}
+		} else {
+			panic("Cannot handle request")
+		}
+
+		fmt.Fprintln(w, rsp)
+	}))
+	defer ts.Close()
+
+	n := &NginxPlus{
+		Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
+	}
+
+	var acc testutil.Accumulator
+
+	err_nginx := n.Gather(&acc)
+
+	require.NoError(t, err_nginx)
+
+	addr, err := url.Parse(ts.URL)
+	if err != nil {
+		panic(err)
+	}
+
+	host, port, err := net.SplitHostPort(addr.Host)
+	if err != nil {
+		host = addr.Host
+		if addr.Scheme == "http" {
+			port = "80"
+		} else if addr.Scheme == "https" {
+			port = "443"
+		} else {
+			port = ""
+		}
+	}
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_processes",
+		map[string]interface{}{
+			"respawned": int(9999),
+		},
+		map[string]string{
+			"server": host,
+			"port":   port,
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_connections",
+		map[string]interface{}{
+			"accepted": int(1234567890000),
+			"dropped":  int(2345678900000),
+			"active":   int(345),
+			"idle":     int(567),
+		},
+		map[string]string{
+			"server": host,
+			"port":   port,
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_ssl",
+		map[string]interface{}{
+			"handshakes":        int64(1234567800000),
+			"handshakes_failed": int64(5432100000000),
+			"session_reuses":    int64(6543210000000),
+		},
+		map[string]string{
+			"server": host,
+			"port":   port,
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_requests",
+		map[string]interface{}{
+			"total":   int64(9876543210000),
+			"current": int(98),
+		},
+		map[string]string{
+			"server": host,
+			"port":   port,
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_zone",
+		map[string]interface{}{
+			"processing":      int(12),
+			"requests":        int64(34),
+			"responses_1xx":   int64(111),
+			"responses_2xx":   int64(222),
+			"responses_3xx":   int64(333),
+			"responses_4xx":   int64(444),
+			"responses_5xx":   int64(555),
+			"responses_total": int64(999),
+			"discarded":       int64(11),
+			"received":        int64(22),
+			"sent":            int64(33),
+		},
+		map[string]string{
+			"server": host,
+			"port":   port,
+			"zone":   "zone.a_80",
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_upstream",
+		map[string]interface{}{
+			"keepalive": int(1),
+			"zombies":   int(2),
+		},
+		map[string]string{
+			"server":   host,
+			"port":     port,
+			"upstream": "first_upstream",
+		})
+
+	acc.AssertContainsTaggedFields(
+		t,
+		"nginx_plus_upstream_peer",
+		map[string]interface{}{
+			"backup":                 false,
+			"weight":                 int(1),
+			"state":                  "up",
+			"active":                 int(0),
+			"requests":               int64(9876),
+			"responses_1xx":          int64(1111),
+			"responses_2xx":          int64(2222),
+			"responses_3xx":          int64(3333),
+			"responses_4xx":          int64(4444),
+			"responses_5xx":          int64(5555),
+			"responses_total":        int64(987654),
+			"sent":                   int64(987654321),
+			"received":               int64(87654321),
+			"fails":                  int64(98),
+			"unavail":                int64(65),
+			"healthchecks_checks":    int64(54),
+			"healthchecks_fails":     int64(32),
+			"healthchecks_unhealthy": int64(21),
+			"downtime":               int64(5432),
+			"downstart":              int64(4321),
+			"selected":               int64(1451606400000),
+		},
+		map[string]string{
+			"server":           host,
+			"port":             port,
+			"upstream":         "first_upstream",
+			"upstream_address": "1.2.3.123:80",
+			"id":               "0",
+		})
+
+}
-- 
GitLab