Centos7 编译安装Doris与测试

Centos7 编译安装DorisDB与测试

安装

只介绍docke模式

  1. 下载

    https://github.com/DorisDB/incubator-doris

  2. 安装docker

    测试过实体机编译,问题过多,所以改为Docker编译,但是Docker支持的操作系统版本为Centos7以上

     yum update
     yum install -y yum-utils device-mapper-persistent-data lvm2
     yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
     yum list docker-ce --showduplicates | sort -r
     yum install docker-ce
     systemctl start docker
    
  3. 配置docker并启动

    去这个网站下载镜像

     https://cloud.baidu.com/doc/PALO/s/Ikivhcwb5#docker-编译环境镜像下载
    
     docker load --input apachedoris-build-env-1.2
     # 冒号左边操作系统路径右边docker镜像路径
     docker run -it -v /app/docker/maven/Repository/:/root/.m2 -v /app/docker/incubator-doris-DORIS-0.13.0-release/:/root/incubator-doris-DORIS-0.13.0-release/ apachedoris/doris-dev:build-env-1.2
    
  4. 将文件上传至宿主机

     sh build.sh
    
  5. 集群安装

     mkdir -p /app/doris
     cp -r fe /app/doris
     cp -r be /app/doris
     # fe节点
     mkdir -p /app/doris/fe/doris-meta/
     # be节点
     mkdir -p /app/doris/be/storage
     vi .bash_profile
     # fe节点
     export DORIS_HOME=/app/doris/fe
     # be节点
     export DORIS_HOME=/app/doris/be
    
     vi fe.conf
     # 必须配置此项 必须是(子网掩码必须相同)
     priority_networks=10.10.100.24/24
    
     vi be.conf
    
     priority_networks = 10.10.100.25/24
    
  6. 修改fe配置文件

    例名称 端口名称 默认端口 通讯方向 说明
    E be_port 9060 FE --> BE BE 上 thrift server 的端口,用于接收来自 FE 的请求
    E webserver_port 8040 BE <–> BE BE 上的 http server 的端口
    E heartbeat_service_port 9050 FE --> BE BE 上心跳服务端口(thrift),用于接收来自 FE 的心跳
    E brpc_port* 8060 FE<–>BE, BE <–> BE BE 上的 brpc 端口,用于 BE 之间通讯
    E http_port * 8030 FE <–> FE, 用户 FE 上的 http server 端口
    E rpc_port 9020 BE --> FE, FE <–> FE FE 上的 thrift server 端口
    E query_port 9030 用户 FE 上的 mysql server 端口
    E edit_log_port 9010 FE <–> FE FE 上的 bdbje 之间通信用的端口
    roker broker_ipc_port 8000 FE --> Broker, BE --> Broker Broker 上的 thrift server,用于接收请求

7.启动fe

	./start_fe.sh --daemon

部署配置doris

  1. 将BE节点添加到FE

    登录FE节点

     ./mysql --host 10.10.100.24 --port 19030 --user root
    
     ALTER SYSTEM ADD BACKEND "10.10.100.25:19050";
    
  2. 启动BE节点

     sh bin/start_be.sh --daemon
    
  3. 查看BE状态

     SHOW PROC '/backends';
    

    使用 mysql-client 连接到 FE,并执行 SHOW PROC ‘/backends’; 查看 BE 运行情况。如一切正常,isAlive 列应为 true

  4. 部署Broker

     cd /root/incubator-doris-DORIS-0.13.15-release/fs_brokers/apache_hdfs_broker
     ./build.sh
    

    生成文件在output里

     mv /app/doris/apache_hdfs_broker /app/doris/broker
     # 所有节点全部启动
     sh bin/start_broker.sh --daemon
    
     ALTER SYSTEM ADD BROKER broker_name "sit1:18000","sit2:18000","sit3:18000","sit4:18000";
     SHOW PROC "/brokers";
    

配置fs_broker

配置

  1. 修改Root密码

     SET PASSWORD FOR 'root' = PASSWORD('root');
    
  2. 添加新用户

     CREATE USER 'dams' IDENTIFIED BY 'dams';
     GRANT ALL ON iptv TO dams;
    

#2.3 建表

  1. 创建数据库

     CREATE DATABASE iptv;
    
  2. 创建复合分区表

     # 动态分区
    
     CREATE TABLE personclusterdata
     (
         userid VARCHAR(1024),
     	time DATE,
     	type VARCHAR(20),
     	pclusterid VARCHAR(200)
     )
     PARTITION BY RANGE(time)()
     DISTRIBUTED BY HASH(userid,type)
     PROPERTIES
     (
         "dynamic_partition.enable" = "true",
         "dynamic_partition.time_unit" = "DAY",
         "dynamic_partition.start" = "-360",
         "dynamic_partition.end" = "3",
         "dynamic_partition.prefix" = "p",
     	"dynamic_partition.replication_num" = "1",
         "dynamic_partition.buckets" = "32"
     );
    
     #非动态分区
    
     CREATE TABLE personclusterdata
     (
         userid VARCHAR(1024),
         time DATE,
         type VARCHAR(20),
         pclusterid VARCHAR(200)
     )
     PARTITION BY RANGE(time)
     (
         PARTITION p20201128 VALUES LESS THAN ('20201128'),
         PARTITION p20201129 VALUES LESS THAN ('20201129')
     )
     DISTRIBUTED BY HASH(userid,type) BUCKETS 32
     PROPERTIES("replication_num" = "2");
    
    
     # 查询创建情况
    
     SHOW DYNAMIC PARTITION TABLES;
     show partitions from personclusterdata;
     SHOW LOAD WHERE LABEL = "bbb";
     CANCEL LOAD WHERE LABEL = "iptv.personclusterdata";
     HELP SHOW LOAD
    
  3. 从hdfs导入数据

     LOAD LABEL bbb
     (
     DATA INFILE("hdfs://10.0.9.53:8020/zhangniantest/*")
     INTO TABLE `personclusterdata`
     COLUMNS TERMINATED BY "|"
     (t_userid,t_type,t_pclusterid,t_time)
     SET (
       userid=t_userid,
       type=t_type,
       pclusterid=t_pclusterid,
       time=t_time
     )
     )
     WITH BROKER broker_name ("username"="hadoop", "password"="hadoop");
    
  4. 正式业务测试

     CREATE TABLE VOD_TIME_COL
     (
       vodname    VARCHAR(500),
       vodtime    INT,
       time       DATE,
       updatetime DATETIME,
       state      INT,
       version    INT,
       length     INT,
       times      DOUBLE,
       playtimes  INT,
       pnum       INT,
       colid      VARCHAR(400),
       stbtype    INT
     )
     PARTITION BY RANGE(time)
     (
         PARTITION p20201128 VALUES LESS THAN ('20210131'),
         PARTITION p20201129 VALUES LESS THAN ('20210201')
     )
     DISTRIBUTED BY HASH(colid) BUCKETS 32
     PROPERTIES(
     "replication_num" = "2",
     "colocate_with" = "colid"
     
     );
     
     
     create table VOD_SERIES_TIME_COL
     (
       vodseriesname VARCHAR(200),
       vodseriestime INT,
       time          DATE,
       updatetime    DATETIME,
       state         INT,
       version       INT,
       length        INT,
       times         DOUBLE,
       num           INT,
       playtimes     INT,
       stbtype       INT,
       pnum          INT,
       colid         VARCHAR(400)
     )
     PARTITION BY RANGE(time)
     (
         PARTITION p20201128 VALUES LESS THAN ('20210131'),
         PARTITION p20201129 VALUES LESS THAN ('20210201')
     )
     DISTRIBUTED BY HASH(colid) BUCKETS 32
     PROPERTIES(
     "replication_num" = "2",
     "colocate_with" = "colid"
     
     );
     
     create table VOD_COL_IMPORT
     (
       colid          VARCHAR(400),
       parentid       VARCHAR(400),
       name           VARCHAR(400),
       sourcecolid    VARCHAR(400),
       sourcename     VARCHAR(400),
       updatetime     DATETIME,
       state          INT,
       version        INT,
       sourceparentid VARCHAR(400)
     )
     DISTRIBUTED BY HASH(colid) BUCKETS 32
     PROPERTIES(
     "replication_num" = "2",
     "colocate_with" = "colid"
     
     );
     
     create table VOD_COL_MERGE
     (
       colid     VARCHAR(400),
       groupname VARCHAR(400),
       type      VARCHAR(10)
     )
     DISTRIBUTED BY HASH(colid) BUCKETS 32
     PROPERTIES(
     "replication_num" = "2",
     "colocate_with" = "colid"
     );
    
     create table VOD_COL_FULL
     (
       colid     VARCHAR(400),
       SOURCECOLID VARCHAR(400),
       GROUPNAME      VARCHAR(400)
     )
     DISTRIBUTED BY HASH(colid) BUCKETS 32
     PROPERTIES(
     "replication_num" = "2",
     "colocate_with" = "colid"
     );
    
     # 以下命令可以查看集群内已存在的 Group 信息。
     
     SHOW PROC '/colocation_group';
    
    
     LOAD LABEL L_VOD_COL_MERGE
     (
     DATA INFILE("hdfs://10.10.100.24:8020/dorisdata/VOD_COL_MERGE.csv")
     INTO TABLE `VOD_COL_MERGE`
     COLUMNS TERMINATED BY ","
     (t_colid,t_groupname,t_type)
     SET (
       colid=t_colid,
       groupname=t_groupname,
       type=t_type
     )
     )
     WITH BROKER broker_name ("username"="hadoop", "password"="hadoop");
     
     
     LOAD LABEL L_VOD_COL_IMPORT
     (
     DATA INFILE("hdfs://10.10.100.24:8020/dorisdata/VOD_COL_IMPORT.csv")
     INTO TABLE `VOD_COL_IMPORT`
     COLUMNS TERMINATED BY ","
     (t_colid,t_groupname,t_type)
     SET (
       colid=t_colid,
       groupname=t_groupname,
       type=t_type
     )
     )
     WITH BROKER broker_name ("username"="hadoop", "password"="hadoop");
     
     LOAD LABEL L_VOD_COL_IMPORT
     (
     DATA INFILE("hdfs://10.10.100.24:8020/dorisdata/VOD_COL_IMPORT.csv")
     INTO TABLE `VOD_COL_IMPORT`
     COLUMNS TERMINATED BY "|"
     (t_colid,t_parentid,t_name,t_sourcecolid,t_sourcename,t_updatetime,t_state,t_version,t_sourceparentid)
     SET (
       colid=t_colid,
       parentid=t_parentid,
       name=t_name,
       sourcecolid=t_sourcecolid,
       sourcename=t_sourcename,
       updatetime=t_updatetime,
       state=t_state,
       version=t_version,
       sourceparentid=t_sourceparentid
     )
     )
     WITH BROKER broker_name ("username"="hadoop", "password"="hadoop");
    
     # 创建rollup
     ALTER TABLE VOD_COL_IMPORT ADD ROLLUP rollup_VCI(colid, sourcecolid);
     ALTER TABLE VOD_COL_MERGE ADD ROLLUP rollup_city(colid, groupname);
    

你可能感兴趣的:(hadoop)