Elasticsearch学习

1、官网下载,安装方式多样,这里选择docker

docker pull docker.elastic.co/elasticsearch/elasticsearch:7.14.0

2、创建普通用户以便于使用Elasticsearch

# root用户可以使用docker,普通用户无法使用,是因为没有权限。
# 查看当前用户
whoami
# 创建新用户
adduser username
# 设置用户密码
passwd username
# 创建一个新的docker组
groupadd docker
# 添加新用户到docker组
usermod -a -G docker username
# 更新docker组
newgrp docker
# 修改docker.sock权限为root用户docker组
chown root:docker /var/run/docker.sock
# 切换到新用户
su username

3、创建Elasticsearch容器

docker run
--name elasticsearch                                # 容器名称
-p 9200:9200                                           # 映射端口 接受http协议
-p 9300:9300                                           # 接受TCP协议
-e "discovery.type=single-node"                        # 单节点式
-e ES_JAVA_OPTS="-Xms84m -Xmx512m"                       # 内存大小
-v /home/docker/elasticsearch/data:/usr/share/elasticsearch/data      # 挂载目录
-v /home/docker/elasticsearch/logs:/usr/share/elasticsearch/logs
-v /home/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins
-d
elasticsearch:8.7.0

4、Kibana安装,安装方式多样,这里也选择docker

# 拉取镜像
docker pull kibana:7.14.0
# 挂载数据卷,创建容器
docker run -d 
--name kibana 
-p 5601:5601 
-v /home/es/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml 
58dffcbc8caa

5、拷贝容器内的文件

docker cp <containerId>:/path/to/file /path/on/host

6、索引操作

# 查询全部索引
GET /_cat/indices?v
# 创建索引
PUT /indexName
# 创建一个索引并使用json格式传参,设置主分片为1副本分片数量为零
PUT /indexName
{
  "settings": {
    "number_of_shards": 1
    , "number_of_replicas": 0
  }
}
# 删除一个索引
DELETE /indexName
# 删除所有索引
DELETE /*

7、映射操作

字符串类型:keyword、text

整数类型:integer、long

小数类型:float、double

布尔类型:boolean

日期类型:date

# 创建索引时创建映射
PUT /indexName
{
  "settings": {
    "number_of_shards": 1
    , "number_of_replicas": 0
  }
  , "mappings": {
    "properties": {
      "id":{
        "type": "integer" 
      },
      "title":{
        "type": "keyword"
      },
      "price":{
        "type": "double"
      },
      "update_time":{
        "type": "date"
      },
      "describe":{
        "type": "text"
      }
    }
    }
}
# 查看索引的映射
GET /indexName/_mapping

8、文档操作

# 根据映射向索引中创建一条文档,自动指定id
POST /indexName/_doc
{
  "title":"电冰箱",
  "price":2.99,
  "update_time":"2023-03-25",
  "describe":"很好用"
}
# 手动指定id
POST /indexName/_doc/id
{
  "title":"电冰箱",
  "price":2.99,
  "update_time":"2023-03-25",
  "describe":"很好用"
}
# 根据文档id查询文档
GET /indexName/_doc/id
# 根据id删除文档
DELETE /indexName/_doc/id
# 根据id更新文档,先删除再更新,覆盖式更新
PUT /indexName/_doc/id
{
  "title":"电视机"
}
# 根据id更新文档,只覆盖更新部分
POST /indexName/_doc/id/_update
{
  "doc" : {
    "title":"电视据"
  }
}
# 批量操作文档,指定id同时插入两条数据
POST /indexName/_doc/_bulk
  {"index":{"_id":7}}
  {"title":"瓜子","price":3.99,"update_time":"2022-04-23","describe":"好吃"}
  {"index":{"_id":8}}
  {"title":"饮料","price":4.99,"update_time":"2020-04-22","describe":"好喝"}
# 批量操作文档,指定id插入更新删除
POST /indexName/_doc/_bulk
  {"index":{"_id":9}}
  {"title":"矿泉水","price":8.99,"update_time":"1921-09-23","describe":"矿泉水也好喝"}
  {"update":{"_id":9}}
  {"doc":{"describe":"矿泉水好喝是好喝,就是太贵啦"}}
  {"delete":{"_id":1}}

9、高级文档查询

# 查询索引中的所有文档
GET /indexName/_doc/_search    # 或使用GET /indexName/_search
{
  "query":{
    "match_all":{}
  }
}
# 根据索引中的关键词查询
# keyword类型、Integer类型、double类型、date类型、boolean类型、float类型、long类型不分词
# text类型,es会使用分词器进行分词,中文单字查询英文单个单词(空格)查询
GET /indexName/_doc/_search
{
  "query":{
    "term":{    # term代表关键词查询
      "price":3.99    
    }
  }
}
# 范围查询
GET /indexName/_doc/_search
{
  "query":{
    "range":{    # range代表范围查询
      "price":{
        "gte":2,    # gte大于等于、gt大于
        "lte":3    # lte小于等于、lt小于
      }
    }
  }
}
# 前缀查询,查询指定前缀的内容
GET /indexName/_doc/_search
{
  "query":{
    "prefix":{    # prefix代表前缀查询
      "title":"瓜"
    }
  }
}
# 通配符查询,"?"代表任意一个字符,"*"代表任意多个字符
GET /indexName/_doc/_search
{
  "query":{
    "wildcard":{    # wildcard代表通配符查询
      "title":"?泉*"
    }
  }
}
# 多id查询
GET /indexName/_doc/_search
{
  "query":{
    "ids":{
      "values":["7","8","9"]
    }
  }
}
# 模糊查询。搜索关键词长度为2不允许出现模糊、关键词长度为3-5允许一次模糊、关键词长度大于五允许两次模糊
GET /indexName/_doc/_search
{
  "query":{
    "fuzzy":{
      "title":"矿石水"    # 文档中为矿泉水
    }
  }
}
# 布尔查询。关键字must等价于&&、should等价于||、must_not相当于!
GET /indexName/_doc/_search
{
  "query":{
    "bool":{
      "must":[{
       "ids":{
         "values":[7,8,9]
       }
      }]
    }
  }
}
# 多字段查找某个值,只能在同一类型(字符串或数字)的字段中查找,如果给出的字段类型不同会报错
# 如果查找的类型为字符串,会进行分词分别查找
GET /indexName/_doc/_search
{
  "query": {
    "multi_match": {
      "query": "饮料",
      "fields": ["title","describe"]
    }
  }
}
# 默认字段查询,默认字段分词就分词查询,默认字段不分词就不分词查询
GET /indexName/_doc/_search
{
  "query":{
    "query_string":{
      "default_field":"describe",
      "query":"好喝"
    }
  }
}

10、文档查询其他操作

# 高亮查询结果
GET /indexName/_doc/_search
{
  "query":{
    "query_string":{
      "default_field":"describe",
      "query":"好喝"
    }
  },
  "highlight":{    # 高亮关键词
    "fields":{
      "*":{}
    }
  }
}
# 控制显示文档条数
GET /indexName/_doc/_search
{
  "query":{
    "match_all":{}
  },
  "size":3    # 文档条数关键词
}
# 选择查询的起始位置,从零开始
GET /indexName/_doc/_search
{
  "query":{
    "match_all":{}
  },
  "from":0
}
# 对查询结果进行排序
GET /indexName/_doc/_search
{
  "query":{
    "match_all":{}
  },
  "sort":{    # 排序关键词
    "price":{    
      "order":"desc"    # 选择降序
    }
  }
}
# 查询指定字段
GET /indexName/_doc/_search
{
  "query":{
    "match_all":{}
  },
  "_source":["title","describe"]    # 指定需要查询的字段
}

11、ES索引规则:倒排索引

ES存储区:索引区、元数据区

倒排索引:把文档存在元数据区,将文档中不同类别和分词的数据分别与文档id绑定存在索引区,查询时,根据索引区数据找到对应元数据区的文档。

12、ES默认标准分词器

结构:字符过滤器(进行预处理例如过滤html标签)、分词器、Token过滤器(加工分好的词)

内置分词器:

①标准分词器Standard:英文按单词切分,中文按字切分,过滤符号,并小写处理

②简单分词器Simple:英文按单词切分中文按空格切分,过滤符号,小写处理

③停用词过滤分词器Stop:小写处理,停用this、a、an等词的过滤

④Whitespace:按空格切分,不过滤符号,不小写处理

⑤Keyword:不分词

# standard
POST /_analyze
{
  "analyzer": "standard"
  , "text": ["i am a good boy"]
}
# simple
POST /_analyze
{
  "analyzer": "simple"
  , "text": ["i am a good boy,这里是中文"]
}
# whitespace
POST /_analyze
{
  "analyzer": "whitespace"
  , "text": ["I am a good boy,这里是中文"]
}
# 创建映射时手动指定字段的分词器,只能指定可以分词的类型
PUT /user
{
  "settings": {
    "number_of_shards": 1
    , "number_of_replicas": 0
  },
  "mappings": {
    "properties": {
      "title":{
        "type": "text"
        , "analyzer": "standard"
      }
    }
  }
}

13、中文分词器IK

# 分词器版本与ES版本一致
# 下载分词器https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.14.0/elasticsearch-analysis-ik-7.14.0.zip
# 解压至es容器中plugins目录
# 粗粒度分词器
POST /_analyze
{
  "analyzer": "ik_smart"
  ,"text": ["这里是中文"]
}
# 细粒度分词器
POST /_analyze
{
  "analyzer": "ik_max_word"
  ,"text": ["这里是中文"]
}

14、扩展词:将非关键词的词加入词典

停用词:将关键词移除词典

# IK设置扩展词典和停用词典
# 在ik/config/IKAnalyzer.cfg.xml文件中设置停用词典文件和扩展词典文件
# 两个文件与IKAnalyzer.cfg.xml放在同一目录中,文件中每一行设置一个关键词

15、过滤查询

# 必须包裹在布尔查询内,与布尔查询连用,有filter关键字省略query关键字
# 先执行filter再执行布尔
GET /product/_doc/_search
{
  "query":{
    "bool":{
      "must":[{
        "range":{
          "price":{
          "lte":10
          ,"gte":0
        }
        }
      }],
      "filter":{
          "wildcard":{
            "describe":"*喝*"
          }
        }
    }
  }
}

16、ES整合SpringBoot

# 依赖
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-elasticsearch</artifactId>
        </dependency>
# 配置
@Configuration
public class ESConfig extends AbstractElasticsearchConfiguration {

    @Value("${url}")
    String url;
    @Override
    @Bean
    public RestHighLevelClient elasticsearchClient() {
        final ClientConfiguration clientConfiguration = ClientConfiguration.builder().connectedTo(url).build();
        return RestClients.create(clientConfiguration).rest();
    }
}

17、两种操作ES方式:

ElasticserachOperations 面向对象

RestHighLevelClient restful风格、封装了ES细节

18、ElasticserachOperations方式

//实体类创建索引
@Document(indexName = "product",createIndex = true) //索引名,是否创建索引
public class Product {

    @Id         //将id与文档_id进行映射
    private Integer id;

    @Field(type = FieldType.Keyword)        //主要是指定字段类型
    private String title;

    @Field(type = FieldType.Double)
    private Double price;

    @Field(type = FieldType.Text)
    private String describe;

}
//创建索引并插入文档对象
@SpringBootTest     //测试类注解该加还得加,不然无法注入
public class Test {


    @Autowired
    ElasticsearchOperations elasticsearchOperations;

    @org.junit.jupiter.api.Test
    public void create(){
        Product product = new Product();
        product.setId(1);
        product.setTitle("上周青花瓷");
        product.setPrice(3.99);
        product.setDescribe("的确是上周的青花瓷,还带着烟火香");
        elasticsearchOperations.save(product);      //文档对象存入es,id不存在添加,id存在更新


    }
}
//根据id查询文档对象
    @org.junit.jupiter.api.Test
    public void get(){

        Product product = elasticsearchOperations.get("1",Product.class);
        System.out.println(product.toString());
    }
//删除全部文档
    @org.junit.jupiter.api.Test
    public void deleteAll(){
        elasticsearchOperations.delete(Query.findAll(), Product.class);
    }
//查询全部文档
    @org.junit.jupiter.api.Test
    public void find() throws JsonProcessingException {
        SearchHits<Product> searchHits =  elasticsearchOperations.search(Query.findAll(), Product.class);
        for (SearchHit<Product> s :searchHits){
            System.out.println(new ObjectMapper().writeValueAsString(s.getContent()));
        }
    }

19、RestHighLevelClient方式

创建索引

//使用Query DSL语法,创建索引
    @Test
    public void creat() throws IOException {
        CreateIndexRequest createIndexRequest = new CreateIndexRequest("product");    //创建索引名
        createIndexRequest.settings("{\n" +        //设置分片
                "    \"number_of_shards\": 1\n" +
                "    , \"number_of_replicas\": 0\n" +
                "  }",XContentType.JSON);
        createIndexRequest.mapping("{\n" +        //设置映射
                "    \"properties\": {\n" +
                "      \"id\":{\n" +
                "        \"type\": \"integer\" \n" +
                "      },\n" +
                "      \"title\":{\n" +
                "        \"type\": \"keyword\"\n" +
                "      },\n" +
                "      \"price\":{\n" +
                "        \"type\": \"double\"\n" +
                "      },\n" +
                "      \"update_time\":{\n" +
                "        \"type\": \"date\"\n" +
                "      },\n" +
                "      \"describe\":{\n" +
                "        \"type\": \"text\"\n" +
                "        , \"analyzer\": \"ik_max_word\"\n" +
                "      }\n" +
                "    }}", XContentType.JSON);
        CreateIndexResponse createIndexResponse = restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
        restHighLevelClient.close();
    }

删除索引

    @Test
    public void delet() throws IOException {

        DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest("product");
        restHighLevelClient.indices().delete(deleteIndexRequest,RequestOptions.DEFAULT);
        restHighLevelClient.close();

    }

向指定索引中增加数据

    @Test
    public void add() throws IOException {

        IndexRequest indexRequest = new IndexRequest("product");    //指定索引
        indexRequest.id("1");       //指定id、可以不指定自动分配
        indexRequest.source("{\n" +         //指定数据
                "  \"title\":\"电冰箱\",\n" +
                "  \"price\":2.99,\n" +
                "  \"update_time\":\"2023-03-25\",\n" +
                "  \"describe\":\"很好用\"\n" +
                "}",XContentType.JSON);
        restHighLevelClient.index(indexRequest,RequestOptions.DEFAULT);
        restHighLevelClient.close();
    }

更新文档

    @Test
    public void update() throws IOException {

        UpdateRequest updateRequest = new UpdateRequest("product","1");
        updateRequest.doc("{\n" +
                "    \"title\":\"电视据\"\n" +
                "  }",XContentType.JSON);
        restHighLevelClient.update(updateRequest,RequestOptions.DEFAULT);
        restHighLevelClient.close();
    }

删除文档

    @Test
    public void dele() throws IOException {

        DeleteRequest deleteRequest = new DeleteRequest("product");
        deleteRequest.id("1");
        restHighLevelClient.delete(deleteRequest,RequestOptions.DEFAULT);
        restHighLevelClient.close();
    }

基于id查询文档

    @Test
    public void gett() throws IOException {

        GetRequest getRequest = new GetRequest("product");
        getRequest.id("1");
        GetResponse getResponse = restHighLevelClient.get(getRequest,RequestOptions.DEFAULT);
        String s = getResponse.toString();
        System.out.println(s);
        restHighLevelClient.close();
    }

查询所有

    @Test
    public void getAll() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery());
        searchRequest.source(searchSourceBuilder);
//searchResponse结果就是kibana中查询返回结果
        SearchResponse searchResponse = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        String s = searchResponse.toString();
        System.out.println(s);
    }

按条件查询

    @Test
    public void quer() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.termQuery("describe","好用"));
        searchRequest.source(searchSourceBuilder);
        SearchResponse searchResponse = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        String s = searchResponse.toString();
        System.out.println(s);

    }

分页查询,指定从第二条开始查询,总计查询一条

    @Test
    public void sort() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).from(1).size(1);
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s.toString());
    }

降序查询

    @Test
    public void sort() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).from(0).size(10).sort("price",SortOrder.DESC);
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s.toString());
    }

返回指定的字段,排除describe,接受title、price

    @Test
    public void sort() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).from(0).size(10).sort("price",SortOrder.DESC).fetchSource(new String[]{"price","title"},new String[]{"describe"});
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s.toString());
    }

使用HighLightBuilder对查询结果进行高亮显示

    @Test
    public void quer() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        HighlightBuilder highlightBuilder = new HighlightBuilder();
        highlightBuilder.requireFieldMatch(false).field("describe").field("title");
        searchSourceBuilder.query(QueryBuilders.termQuery("describe","好喝")).highlighter(highlightBuilder);
        searchRequest.source(searchSourceBuilder);
        SearchResponse searchResponse = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        String s = searchResponse.toString();
        System.out.println(s);

    }

过滤查询

    @Test
    public void fget() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();

        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).postFilter(QueryBuilders.termQuery("describe","好喝"));
        searchRequest.source(searchSourceBuilder);
        SearchResponse s =restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s);


    }

ES对象存取

//作为对象存入
    @Test
    public void las() throws IOException {
    Product product = new Product();
    product.setId(4);
    product.setTitle("黑鱼片");
    product.setPrice(28.0);
    product.setDescribe("很好吃");
    product.setUpdate_time(new Date());
    IndexRequest indexRequest = new IndexRequest("product");
    indexRequest.id(String.valueOf(product.getId())).source(new ObjectMapper().writeValueAsString(product),XContentType.JSON);
    IndexResponse s = restHighLevelClient.index(indexRequest,RequestOptions.DEFAULT);
    System.out.println(s.isFragment());


    }
//取出存为对象
    @Test
    public void lget() throws IOException, JSONException {

        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery());
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        org.elasticsearch.search.SearchHit[] searchHits = s.getHits().getHits();
        List<Product> list = new ArrayList<>();
        for (org.elasticsearch.search.SearchHit a :searchHits){
            Product product = new ObjectMapper().readValue(a.getSourceAsString(),Product.class);
            list.add(product);
        }
        for (int i =0;i<list.size();i++){
        System.out.println(list.get(i));}
    }

20、聚合查询

Query DSL

//查某一个字段,
GET /product/_search
{
  "query":{
    "match_all": {}
  },
  "aggs":{
    "dd_all":{        //这里是聚合查询名,可以随便定义
      "terms": {
        "field": "price",        //查询价格
        "size": 10
      }
    }
  }
}
//查询最大值
GET /product/_search
{
  "query": {
    "match_all": {}
  },
  "aggs": {
    "dd_max":{
      "max": {
        "field": "price"        //查询价格最大值
      }
    }
  }
}
//查询最小值
GET /product/_search
{
  "query": {
    "match_all": {}
  },
  "aggs": {
    "dd_min":{
      "min": {
        "field": "price"        //这里查询价格最小值
      }
    }
  }
}
//查询平均值
GET /product/_search
{
  "query": {
    "match_all": {}
  },
  "aggs": {
    "dd_average":{
      "avg": {
        "field": "price"        //这里查询价格平均值
      }
    }
  }
}

RestHighLevelClient

//查询字段所有
    @Test
    public void agg() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).aggregation(AggregationBuilders.terms("price_all").field("price"));
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s);
    }
//查询字段最大值
    @Test
    public void aggm() throws IOException {
        SearchRequest searchRequest = new SearchRequest("product");
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.matchAllQuery()).aggregation(AggregationBuilders.max("max_all").field("price"));
        searchRequest.source(searchSourceBuilder);
        SearchResponse s = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
        System.out.println(s);
    }
//

21、集群搭建

配置文件

# 集群名称
cluster.name: es_cluster
# 节点名称
node.name: node_1
# 开启远程访问
network.host: 0.0.0.0
# 使用发布地址进行集群间通信
network.publish_host: IP
# http通信端口
http.port: 9201
# tcp通信端口
transport.tcp.port: 9301
# 指定节点的tcp通信
discovery.seed_hosts: ["IP:9301","IP:9302","IP:9303"]
# 集群初始化节点
cluster.initial_master_nodes: ["node_1","node_2","node_3"]
# 集群最少可用节点
gateway.recover_after_nodes: 2
# 开启跨域
http.cors.enabled: true
# 跨域请求前缀
http.cors.allow-origin: "*"

kibana配置到es集群

server.host: "0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://IP:9201","http://IP:9202","http://IP:9203" ]
monitoring.ui.container.elasticsearch.enabled: true
相关推荐
南宫理的日知录6 分钟前
99、Python并发编程:多线程的问题、临界资源以及同步机制
开发语言·python·学习·编程学习
皓7418 分钟前
服饰电商行业知识管理的创新实践与知识中台的重要性
大数据·人工智能·科技·数据分析·零售
Mephisto.java10 分钟前
【大数据学习 | kafka高级部分】kafka的kraft集群
大数据·sql·oracle·kafka·json·hbase
Mephisto.java12 分钟前
【大数据学习 | kafka高级部分】kafka的文件存储原理
大数据·sql·oracle·kafka·json
筱源源24 分钟前
Elasticsearch-linux环境部署
linux·elasticsearch
ycsdn101 小时前
Caused by: org.apache.flink.api.common.io.ParseException: Row too short:
大数据·flink
数据与后端架构提升之路1 小时前
从神经元到神经网络:深度学习的进化之旅
人工智能·神经网络·学习
一行11 小时前
电脑蓝屏debug学习
学习·电脑
星LZX1 小时前
WireShark入门学习笔记
笔记·学习·wireshark
阑梦清川1 小时前
在鱼皮的模拟面试里面学习有感
学习·面试·职场和发展