Skip to content

Commit 27a6dd4

Browse files
committed
Merge remote-tracking branch 'origin/feat_1.10_4.1.x_35406' into feat_1.10_4.1.x_35399_test
# Conflicts: # localTest/src/main/java/com/dtstack/flink/sql/localTest/LocalTest.java # pom.xml
2 parents a7cb12c + 738a4c7 commit 27a6dd4

File tree

9 files changed

+592
-0
lines changed

9 files changed

+592
-0
lines changed

docs/plugin/httpSink.md

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
## 1.格式:
2+
```
3+
CREATE TABLE tableName(
4+
colName colType,
5+
...
6+
colNameX colType
7+
)WITH(
8+
type ='http',
9+
url ='http://xxx:8080/test/returnAll'
10+
,flag ='aa'
11+
,delay = '10'
12+
);
13+
14+
```
15+
16+
17+
## 3.表结构定义
18+
19+
|参数名称|含义|
20+
|----|---|
21+
| tableName| http表名称|
22+
| colName | 列名称|
23+
| colType | 列类型 [colType支持的类型](../colType.md)|
24+
25+
## 4.参数:
26+
27+
|参数名称|含义|是否必填|默认值|
28+
|----|----|----|----|
29+
|http |结果表插件类型,必须为http|||
30+
|url | 地址 |||
31+
|flag | 结果返回标识符|||
32+
|delay |每条结果数据之间延时时间 ||默认20毫秒|
33+
34+
35+
## 5.样例:
36+
37+
```
38+
39+
-- {"name":"maqi","id":1001}
40+
CREATE TABLE sourceIn (
41+
id int,
42+
name VARCHAR
43+
)WITH(
44+
type = 'kafka',
45+
bootstrapServers = 'localhost:9092',
46+
topic ='test1'
47+
);
48+
49+
CREATE TABLE sinkOut (
50+
id int
51+
, name varchar
52+
) WITH (
53+
type ='http',
54+
url ='http://xxx:8080/test/returnAll'
55+
,flag ='aa'
56+
,delay = '10'
57+
);
58+
59+
insert into sinkOut select id,name from sourceIn;
60+
61+
```
62+
63+
发送数据:{"name":"maqi","id":1001}
64+
</br>结果数据:
65+
</br>1.flag不填或者为空串:{"name":"maqi","id":1001}
66+
</br>2.flag有内容:{"flag":"11111111","name":"maqi","id":1001,"tableName":"sinkOut"}

docs/pluginsInfo.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
* [impala 结果表插件](plugin/impalaSink.md)
2020
* [db2 结果表插件](plugin/db2Sink.md)
2121
* [sqlserver 结果表插件](plugin/sqlserverSink.md)
22+
* [http 结果表插件](plugin/httpSink.md)
2223

2324
#### 1.3 维表插件
2425
* [hbase 维表插件](plugin/hbaseSide.md)

http/http-sink/pom.xml

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<project xmlns="http://maven.apache.org/POM/4.0.0"
3+
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4+
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
5+
<parent>
6+
<artifactId>sql.http</artifactId>
7+
<groupId>com.dtstack.flink</groupId>
8+
<version>1.0-SNAPSHOT</version>
9+
</parent>
10+
<modelVersion>4.0.0</modelVersion>
11+
12+
<artifactId>sql.sink.http</artifactId>
13+
<name>http-sink</name>
14+
15+
<build>
16+
<plugins>
17+
<plugin>
18+
<groupId>org.apache.maven.plugins</groupId>
19+
<artifactId>maven-shade-plugin</artifactId>
20+
<version>1.4</version>
21+
<executions>
22+
<execution>
23+
<phase>package</phase>
24+
<goals>
25+
<goal>shade</goal>
26+
</goals>
27+
<configuration>
28+
<createDependencyReducedPom>false</createDependencyReducedPom>
29+
<artifactSet>
30+
<excludes>
31+
<exclude>org.slf4j:*</exclude>
32+
<exclude>org.apache.hadoop:hadoop-common</exclude>
33+
<exclude>org.apache.hadoop:hadoop-auth</exclude>
34+
<exclude>org.apache.hadoop:hadoop-mapreduce-client-core</exclude>
35+
</excludes>
36+
</artifactSet>
37+
<filters>
38+
<filter>
39+
<artifact>*:*</artifact>
40+
<excludes>
41+
<exclude>META-INF/*.SF</exclude>
42+
<exclude>META-INF/*.DSA</exclude>
43+
<exclude>META-INF/*.RSA</exclude>
44+
</excludes>
45+
</filter>
46+
</filters>
47+
</configuration>
48+
</execution>
49+
</executions>
50+
</plugin>
51+
52+
<plugin>
53+
<artifactId>maven-antrun-plugin</artifactId>
54+
<version>1.2</version>
55+
<executions>
56+
<execution>
57+
<id>copy-resources</id>
58+
<!-- here the phase you need -->
59+
<phase>package</phase>
60+
<goals>
61+
<goal>run</goal>
62+
</goals>
63+
<configuration>
64+
<tasks>
65+
<copy todir="${basedir}/../../sqlplugins/httpsink">
66+
<fileset dir="target/">
67+
<include name="${project.artifactId}-${project.version}.jar" />
68+
</fileset>
69+
</copy>
70+
71+
<move file="${basedir}/../../sqlplugins/httpsink/${project.artifactId}-${project.version}.jar"
72+
tofile="${basedir}/../../sqlplugins/httpsink/${project.name}-${git.branch}.jar" />
73+
</tasks>
74+
</configuration>
75+
</execution>
76+
</executions>
77+
</plugin>
78+
</plugins>
79+
</build>
80+
81+
</project>
Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package com.dtstack.flink.sql.sink.http;
20+
21+
import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat;
22+
import com.dtstack.flink.sql.sink.http.table.HttpTableInfo;
23+
import com.fasterxml.jackson.databind.ObjectMapper;
24+
import org.apache.commons.lang.StringUtils;
25+
import org.apache.flink.api.java.tuple.Tuple2;
26+
import org.apache.flink.configuration.Configuration;
27+
import org.apache.flink.types.Row;
28+
import org.apache.http.client.config.RequestConfig;
29+
import org.apache.http.client.methods.HttpPost;
30+
import org.apache.http.entity.StringEntity;
31+
import org.apache.http.impl.client.CloseableHttpClient;
32+
import org.apache.http.impl.client.HttpClients;
33+
import org.slf4j.Logger;
34+
import org.slf4j.LoggerFactory;
35+
36+
import java.io.IOException;
37+
import java.util.HashMap;
38+
import java.util.Map;
39+
import java.util.concurrent.TimeUnit;
40+
41+
/**
42+
* @author: chuixue
43+
* @create: 2021-03-03 10:41
44+
* @description:
45+
**/
46+
public class HttpOutputFormat extends AbstractDtRichOutputFormat<Tuple2> {
47+
private static final Logger LOG = LoggerFactory.getLogger(HttpOutputFormat.class);
48+
49+
private ObjectMapper mapper = new ObjectMapper();
50+
/**
51+
* Default connection timeout when connecting to the server socket (infinite).
52+
*/
53+
private HttpTableInfo httpTableInfo;
54+
55+
private transient Map<String, Object> sendMessage;
56+
57+
private HttpOutputFormat() {
58+
}
59+
60+
public static HttpOutputFormatBuilder buildHttpOutputFormat() {
61+
return new HttpOutputFormatBuilder();
62+
}
63+
64+
@Override
65+
public void configure(Configuration parameters) {
66+
67+
}
68+
69+
@Override
70+
public void open(int taskNumber, int numTasks) {
71+
initMetric();
72+
sendMessage = new HashMap<>();
73+
}
74+
75+
@Override
76+
public void writeRecord(Tuple2 record) {
77+
String value = null;
78+
CloseableHttpClient client = null;
79+
try {
80+
client = HttpClients.createDefault();
81+
RequestConfig requestConfig = RequestConfig.custom()
82+
.setSocketTimeout(10000)
83+
.setConnectTimeout(10000)
84+
.setConnectionRequestTimeout(10000)
85+
.build();
86+
HttpPost post = new HttpPost(this.httpTableInfo.getUrl());
87+
post.setConfig(requestConfig);
88+
// read data
89+
Tuple2<Boolean, Row> tupleTrans = record;
90+
if (!tupleTrans.f0) {
91+
return;
92+
}
93+
// not empty ,need send flag、tablename、fieldinfo、value.
94+
if (StringUtils.isNotEmpty(httpTableInfo.getFlag())) {
95+
sendMessage.put("flag", httpTableInfo.getFlag());
96+
sendMessage.put("tableName", httpTableInfo.getName());
97+
}
98+
// add field
99+
String[] fields = httpTableInfo.getFields();
100+
Row row = tupleTrans.getField(1);
101+
for (int i = 0; i < fields.length; i++) {
102+
sendMessage.put(fields[i], row.getField(i));
103+
}
104+
// send data
105+
value = mapper.writeValueAsString(sendMessage);
106+
sendMsg(value, client, post);
107+
// metrics
108+
outRecords.inc();
109+
if (outRecords.getCount() % ROW_PRINT_FREQUENCY == 0) {
110+
LOG.info(value);
111+
}
112+
TimeUnit.MILLISECONDS.sleep(httpTableInfo.getDelay());
113+
} catch (Exception e) {
114+
if (outDirtyRecords.getCount() % DIRTY_PRINT_FREQUENCY == 0 || LOG.isDebugEnabled()) {
115+
LOG.error("record insert failed ..{}", value);
116+
LOG.error("", e);
117+
}
118+
outDirtyRecords.inc();
119+
} finally {
120+
sendMessage.clear();
121+
if (client != null) {
122+
try {
123+
client.close();
124+
} catch (IOException e) {
125+
LOG.error(e.getMessage());
126+
}
127+
}
128+
}
129+
}
130+
131+
/**
132+
* send data
133+
*
134+
* @param value
135+
* @param client
136+
* @param post
137+
* @throws Exception
138+
*/
139+
private void sendMsg(String value, CloseableHttpClient client, HttpPost post) throws Exception {
140+
StringEntity stringEntity = new StringEntity(value);
141+
stringEntity.setContentEncoding("UTF-8");
142+
stringEntity.setContentType("application/json");
143+
post.setEntity(stringEntity);
144+
client.execute(post);
145+
}
146+
147+
@Override
148+
public void close() {
149+
}
150+
151+
public static class HttpOutputFormatBuilder {
152+
private final HttpOutputFormat httpOutputFormat;
153+
154+
protected HttpOutputFormatBuilder() {
155+
this.httpOutputFormat = new HttpOutputFormat();
156+
}
157+
158+
public HttpOutputFormatBuilder setHttpTableInfo(HttpTableInfo httpTableInfo) {
159+
httpOutputFormat.httpTableInfo = httpTableInfo;
160+
return this;
161+
}
162+
163+
public HttpOutputFormat finish() {
164+
if (httpOutputFormat.httpTableInfo.getUrl() == null) {
165+
throw new IllegalArgumentException("No url supplied.");
166+
}
167+
return httpOutputFormat;
168+
}
169+
}
170+
}

0 commit comments

Comments
 (0)