Browse code

BM-14204 Fix: remove useless product bm-grafana

kladier authored on 08/02/2019 07:54:46
Showing 6 changed files
1 1
deleted file mode 100644
... ...
@@ -1,13 +0,0 @@
1
-<?xml version="1.0"?>
2
-<projectDescription>
3
-	<name>bm-grafana</name>
4
-	<comment/>
5
-	<projects>
6
-	</projects>
7
-	<buildSpec>
8
-		
9
-	</buildSpec>
10
-	<natures>
11
-		
12
-	</natures>
13
-</projectDescription>
14 0
deleted file mode 100644
... ...
@@ -1,173 +0,0 @@
1
-# Welcome to the InfluxDB configuration file.
2
-
3
-# If hostname (on the OS) doesn't return a name that can be resolved by the other
4
-# systems in the cluster, you'll have to set the hostname to an IP or something
5
-# that can be resolved here.
6
-# hostname = ""
7
-
8
-bind-address = "0.0.0.0"
9
-
10
-[logging]
11
-# logging level can be one of "debug", "info", "warn" or "error"
12
-level  = "info"
13
-file   = "/opt/influxdb/shared/log.txt"         # stdout to log to standard out
14
-
15
-# Configure the admin server
16
-[admin]
17
-port   = 9083              # binding is disabled if the port isn't set
18
-assets = "/opt/influxdb/current/admin"
19
-
20
-# Configure the http api
21
-[api]
22
-port     = 9086    # binding is disabled if the port isn't set
23
-# ssl-port = 8084    # Ssl support is enabled if you set a port and cert
24
-# ssl-cert = /path/to/cert.pem
25
-
26
-# connections will timeout after this amount of time. Ensures that clients that misbehave
27
-# and keep alive connections they don't use won't end up connection a million times.
28
-# However, if a request is taking longer than this to complete, could be a problem.
29
-read-timeout = "5s"
30
-
31
-[input_plugins]
32
-
33
-  # Configure the graphite api
34
-  [input_plugins.graphite]
35
-  enabled = false
36
-  # port = 2003
37
-  # database = ""  # store graphite data in this database
38
-
39
-  [input_plugins.udp]
40
-  enabled = true
41
-  port = 4444
42
-  database = "bm"
43
-
44
-# Raft configuration
45
-[raft]
46
-# The raft port should be open between all servers in a cluster.
47
-# However, this port shouldn't be accessible from the internet.
48
-
49
-port = 9090
50
-
51
-# Where the raft logs are stored. The user running InfluxDB will need read/write access.
52
-dir  = "/opt/influxdb/shared/data/raft"
53
-
54
-# election-timeout = "1s"
55
-
56
-[storage]
57
-dir = "/opt/influxdb/shared/data/db"
58
-# How many requests to potentially buffer in memory. If the buffer gets filled then writes
59
-# will still be logged and once the local storage has caught up (or compacted) the writes
60
-# will be replayed from the WAL
61
-write-buffer-size = 10000
62
-
63
-[cluster]
64
-# A comma separated list of servers to seed
65
-# this server. this is only relevant when the
66
-# server is joining a new cluster. Otherwise
67
-# the server will use the list of known servers
68
-# prior to shutting down. Any server can be pointed to
69
-# as a seed. It will find the Raft leader automatically.
70
-
71
-# Here's an example. Note that the port on the host is the same as the raft port.
72
-# seed-servers = ["hosta:8090","hostb:8090"]
73
-
74
-# Replication happens over a TCP connection with a Protobuf protocol.
75
-# This port should be reachable between all servers in a cluster.
76
-# However, this port shouldn't be accessible from the internet.
77
-
78
-protobuf_port = 9099
79
-protobuf_timeout = "2s" # the write timeout on the protobuf conn any duration parseable by time.ParseDuration
80
-protobuf_heartbeat = "200ms" # the heartbeat interval between the servers. must be parseable by time.ParseDuration
81
-protobuf_min_backoff = "1s" # the minimum backoff after a failed heartbeat attempt
82
-protobuf_max_backoff = "10s" # the maxmimum backoff after a failed heartbeat attempt
83
-
84
-# How many write requests to potentially buffer in memory per server. If the buffer gets filled then writes
85
-# will still be logged and once the server has caught up (or come back online) the writes
86
-# will be replayed from the WAL
87
-write-buffer-size = 10000
88
-
89
-# the maximum number of responses to buffer from remote nodes, if the
90
-# expected number of responses exceed this number then querying will
91
-# happen sequentially and the buffer size will be limited to this
92
-# number
93
-max-response-buffer-size = 100
94
-
95
-# When queries get distributed out to shards, they go in parallel. This means that results can get buffered
96
-# in memory since results will come in any order, but have to be processed in the correct time order.
97
-# Setting this higher will give better performance, but you'll need more memory. Setting this to 1 will ensure
98
-# that you don't need to buffer in memory, but you won't get the best performance.
99
-concurrent-shard-query-limit = 10
100
-
101
-[leveldb]
102
-
103
-# Maximum mmap open files, this will affect the virtual memory used by
104
-# the process
105
-max-open-files = 40
106
-
107
-# LRU cache size, LRU is used by leveldb to store contents of the
108
-# uncompressed sstables. You can use `m` or `g` prefix for megabytes
109
-# and gigabytes, respectively.
110
-lru-cache-size = "200m"
111
-
112
-# The default setting on this is 0, which means unlimited. Set this to something if you want to
113
-# limit the max number of open files. max-open-files is per shard so this * that will be max.
114
-max-open-shards = 0
115
-
116
-# The default setting is 100. This option tells how many points will be fetched from LevelDb before
117
-# they get flushed into backend.
118
-point-batch-size = 100
119
-
120
-# These options specify how data is sharded across the cluster. There are two
121
-# shard configurations that have the same knobs: short term and long term.
122
-# Any series that begins with a capital letter like Exceptions will be written
123
-# into the long term storage. Any series beginning with a lower case letter
124
-# like exceptions will be written into short term. The idea being that you
125
-# can write high precision data into short term and drop it after a couple
126
-# of days. Meanwhile, continuous queries can run downsampling on the short term
127
-# data and write into the long term area.
128
-[sharding]
129
-  # how many servers in the cluster should have a copy of each shard.
130
-  # this will give you high availability and scalability on queries
131
-  replication-factor = 1
132
-
133
-  [sharding.short-term]
134
-  # each shard will have this period of time. Note that it's best to have
135
-  # group by time() intervals on all queries be < than this setting. If they are
136
-  # then the aggregate is calculated locally. Otherwise, all that data gets sent
137
-  # over the network when doing a query.
138
-  duration = "7d"
139
-
140
-  # split will determine how many shards to split each duration into. For example,
141
-  # if we created a shard for 2014-02-10 and split was set to 2. Then two shards
142
-  # would be created that have the data for 2014-02-10. By default, data will
143
-  # be split into those two shards deterministically by hashing the (database, serise)
144
-  # tuple. That means that data for a given series will be written to a single shard
145
-  # making querying efficient. That can be overridden with the next option.
146
-  split = 1
147
-
148
-  # You can override the split behavior to have the data for series that match a
149
-  # given regex be randomly distributed across the shards for a given interval.
150
-  # You can use this if you have a hot spot for a given time series writing more
151
-  # data than a single server can handle. Most people won't have to resort to this
152
-  # option. Also note that using this option means that queries will have to send
153
-  # all data over the network so they won't be as efficient.
154
-  # split-random = "/^hf.*/"
155
-
156
-  [sharding.long-term]
157
-  duration = "30d"
158
-  split = 1
159
-  # split-random = "/^Hf.*/"
160
-
161
-[wal]
162
-
163
-dir   = "/opt/influxdb/shared/data/wal"
164
-flush-after = 1000 # the number of writes after which wal will be flushed, 0 for flushing on every write
165
-bookmark-after = 1000 # the number of writes after which a bookmark will be created
166
-
167
-# the number of writes after which an index entry is created pointing
168
-# to the offset of the first request, default to 1k
169
-index-after = 1000
170
-
171
-# the number of requests per one log file, if new requests came in a
172
-# new log file will be created
173
-requests-per-logfile = 10000
174 0
deleted file mode 100644
... ...
@@ -1,11 +0,0 @@
1
-Source: bluemind
2
-Section: net
3
-Priority: optional
4
-Maintainer: BlueMind Team <team@blue-mind.net>
5
-Standards-Version: 3.9.1
6
-
7
-Package: bm-grafana
8
-Architecture: amd64
9
-Depends: bm-nginx (= 1.14.0-bluemind76), influxdb, curl
10
-Description: BlueMind monitoring dashboards
11
-  BlueMind monitoring dashboards
12 0
deleted file mode 100644
... ...
@@ -1,13 +0,0 @@
1
-#!/usr/bin/make -f
2
-
3
-include /usr/share/cdbs/1/rules/debhelper.mk
4
-
5
-JAVA_HOME := /usr/lib/jvm/bm-jdk
6
-# Do not generate crappy dependencies for unused libs
7
-DEB_DH_SHLIBDEPS_ARGS_bm-core := "-Xlib"
8
-
9
-build:
10
-	
11
-
12
-install/bm-grafana::
13
-	cp -rf ${CURDIR}/ROOT/* ${CURDIR}/debian/bm-grafana
14 0
deleted file mode 100644
... ...
@@ -1,50 +0,0 @@
1
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
2
-	 xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
3
-  <modelVersion>4.0.0</modelVersion>
4
-
5
-  <artifactId>bm-grafana</artifactId>
6
-  <packaging>bm-package</packaging>
7
-  <parent>
8
-    <groupId>net.bluemind</groupId>
9
-    <artifactId>net.bluemind.packaging</artifactId>
10
-    <version>4.1.0-SNAPSHOT</version>
11
-  </parent>
12
-
13
-  <dependencies>
14
-    <dependency>
15
-      <groupId>bluemind-dependencies</groupId>
16
-      <artifactId>bm-grafana</artifactId>
17
-      <version>1.6.0</version>
18
-      <type>tgz</type>
19
-    </dependency>
20
-  </dependencies>
21
-
22
-  <build>
23
-    <plugins>
24
-      <plugin>
25
-	<artifactId>maven-antrun-plugin</artifactId>
26
-	<executions>
27
-	  <execution>
28
-	    <phase>prepare-package</phase>
29
-	    <configuration>
30
-	      <tasks>
31
-		<untar compression="gzip" dest="${basedir}/target/packaging/ROOT/usr/share">
32
-		  <patternset>
33
-		    <exclude name="BM_NOTES"/>
34
-		  </patternset>
35
-		  <fileset dir="${basedir}/target/dependency">
36
-		    <include name="**/*.tgz" />
37
-		  </fileset>
38
-		</untar>
39
-	      </tasks>
40
-	    </configuration>
41
-	    <goals>
42
-	      <goal>run</goal>
43
-	    </goals>
44
-	  </execution>
45
-	</executions>
46
-      </plugin>
47
-    </plugins>
48
-  </build>
49
-</project>
50
-
51 0
deleted file mode 100644
... ...
@@ -1,20 +0,0 @@
1
-Name:               bm-grafana
2
-Version:            %{_bmrelease}
3
-Release:            0
4
-License:            GNU Affero General Public License v3
5
-Group:              Applications/messaging
6
-URL:                http://www.bluemind.net/
7
-ExcludeArch:        s390 s390x
8
-Summary:            BlueMind monitoring dashboards
9
-Requires:           bm-nginx = 1.14.0-bluemind76, influxdb, curl
10
-
11
-%description
12
-BlueMind monitoring dashboards
13
-
14
-%global _curdir %_topdir/..
15
-%global _initrddir /etc/rc.d/init.d
16
-
17
-%prep
18
-rm -rf %{buildroot}/*
19
-
20
-%files