[关闭]
@zhangyy 2021-07-02T13:32:35.000000Z 字数 12890 阅读 197

greeplum的基础

greenplum系列



一: greeplum 的介绍

image_1eknhgr3f1ss44p91d4s13lr1a0a9.png-1729.8kB

二:greeplum的应用场景

image_1ekni3n657va26siq1idvjvlm.png-1385.7kB

三: greeplum 的架构

image_1eknj24n31en9o5o4js1sv3a8d13.png-1394.9kB

image_1eknjb7ol21g1pct1o13pl31qdj1g.png-1339.5kB

四: greeplum 的版本演进

image_1eknk65nd8ji180brq1gub1tlm1t.png-618.4kB

image_1eknkdgsc9s31v1bre6153j180b2a.png-822.6kB

五:greeplum的表的分布策略

image_1eknkifdempo1hn768f59hml2n.png-538.4kB

image_1eknkj4381oup13r5ugkmjo5sk34.png-1355.4kB

image_1eknkp8pk1ng31bba1f4v1v3k1b193h.png-1026.3kB

六:greeplum 的表分区介绍

image_1eknl5f52l911ms716ur5jv13rj3u.png-1621.9kB

七:greeplum 的与hadoop区别

image_1eknlgttfgcv1fipu5p1aerhdt4b.png-595.5kB

八:greeplum 的部署

8.1 环境准备

  1. 环境:
  2. centos7.8x64
  3. 1. master 一台
  4. 2. standby 一台
  5. 3. segment 三台
  6. 4. 扩容集群 二台
  7. ----
  8. cat /etc/hosts
  9. ----
  10. 192.168.3.121 test01.greenplum.uniondrug.com
  11. 192.168.3.122 test02.greenplum.uniondrug.com
  12. 192.168.3.123 test03.greenplum.uniondrug.com
  13. 192.168.3.124 test04.greenplum.uniondrug.com
  14. 192.168.3.125 test05.greenplum.uniondrug.com
  15. 192.168.3.126 test06.greenplum.uniondrug.com
  16. 192.168.3.127 test07.greenplum.uniondrug.com
  17. ----

8.2 系统初始化:

  1. cat >> /etc/sysctl.conf << EOF
  2. fs.aio-max-nr = 1048576
  3. fs.file-max = 6815744
  4. net.ipv4.ip_local_port_range = 9000 65500
  5. net.ipv4.conf.default.accept_source_route = 0
  6. net.ipv4.tcp_max_syn_backlog = 4096
  7. net.ipv4.conf.all.arp_filter = 1
  8. net.core.rmem_default = 262144
  9. net.core.rmem_max = 4194304
  10. net.core.wmem_default = 262144
  11. net.core.wmem_max = 1048586
  12. kernel.sem = 204800 512000 3000 20480
  13. kernel.shmmax = 1073741824
  14. kernel.shmall = 262144
  15. kernel.shmmni = 4096
  16. kernel.sysrq = 1
  17. kernel.core_uses_pid = 1
  18. kernel.msgmnb = 65536
  19. kernel.msgmax = 65536
  20. kernel.msgmni = 2048
  21. vm.swappiness = 10
  22. vm.overcommit_memory = 2
  23. vm.overcommit_ratio = 95
  24. vm.zone_reclaim_mode = 0
  25. vm.dirty_expire_centisecs = 500
  26. vm.dirty_writeback_centisecs = 100
  27. vm.dirty_background_ratio = 3
  28. vm.dirty_ratio = 10
  29. #64g-
  30. #vm.dirty_background_ratio = 3
  31. #vm.dirty_ratio = 10
  32. #64g+
  33. #vm.dirty_background_ratio = 0
  34. #vm.dirty_ratio = 0
  35. #vm.dirty_background_bytes = 1610612736
  36. #vm.dirty_bytes = 4294967296
  37. EOF
  38. ----
  39. sysctl -p

image_1ekvkcbq01f4o1qvfd8214u5qpqm.png-220.5kB

---

  1. --02.资源限制
  2. cat >> /etc/security/limits.conf << EOF
  3. * soft nproc unlimited
  4. * hard nproc unlimited
  5. * soft nofile 524288
  6. * hard nofile 524288
  7. * soft stack unlimited
  8. * hard stack unlimited
  9. * hard memlock unlimited
  10. * soft memlock unlimited
  11. EOF
  12. rm -f /etc/security/limits.d/*

image_1ekvkf61c1ches8t1vv81ipshf13.png-70.4kB

image_1ekvkihfsejm126c17tstmoj621g.png-45.7kB

  1. echo "session required pam_limits.so" >> /etc/pam.d/login
  2. cat /etc/pam.d/login
  3. echo "RemoveIPC=no" >> /etc/systemd/logind.conf
  4. service systemd-logind restart

image_1ekvkk5t31h9t1und747s8a174j1t.png-185.2kB

image_1ekvkl684ulv4aa1isg8ofojj2a.png-67kB

  1. --03.安全配置
  2. echo "SELINUX=disabled" > /etc/selinux/config
  3. setenforce 0
  4. systemctl stop firewalld.service
  5. systemctl disable firewalld.service
  6. systemctl status firewalld.service
  7. systemctl set-default multi-user.target

image_1ekvkluqiedk6rp1sut117h13oi2n.png-84.7kB


  1. XFS
  2. rw,noatime,inode64,allocsize=16m
  3. /dev/sdb1 /greenplum xfs rw,noatime,inode64,allocsize=16m 0 0
  4. /dev/sdb2 /usr/local xfs rw,noatime,inode64,allocsize=16m 0 0
  5. echo "blockdev --setra 65536 /dev/sdb " >> /etc/rc.d/rc.local
  6. echo "blockdev --setra 65536 /dev/sdb1 " >> /etc/rc.d/rc.local
  7. echo "blockdev --setra 65536 /dev/sdb2 " >> /etc/rc.d/rc.local
  8. echo deadline > /sys/block/sdb/queue/scheduler
  9. yum install numactl
  10. vi /etc/default/grub
  11. GRUB_CMDLINE_LINUX="crashkernel=auto rhgb quiet numa=off transparent_hugepage=never elevator=deadline"
  12. grub2-mkconfig -o /etc/grub2.cfg
  13. numastat
  14. numactl --show
  15. numactl --hardware
  16. echo "RemoveIPC=no" >> /etc/systemd/logind.conf
  17. service systemd-logind restart

image_1ekvkrbuf27r1dmc174diqd1mkl34.png-60.4kB


  1. yum -y install openssh-clients gcc gcc-c++ make automake autoconf libtool krb5-devel perl rsync coreutils glib2 ed lrzsz sysstat e4fsprogs xfsprogs ntp readline-devel zlib zlib-devel unzip bzip2 libyaml zip libevent
  2. shutdown -r now

image_1ekvl4anj1f3v6ad87hmnhi8c3h.png-198.1kB

  1. /home/gpadmin/.bash_profile :
  2. echo "source /usr/local/greenplum-db/greenplum_path.sh" >>~/.bash_profile
  3. echo "export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1" >>~/.bash_profile
  4. echo "export PGHOME=/usr/local/greenplum-db" >>~/.bash_profile
  5. echo "export PGPORT=5432" >>~/.bash_profile
  6. echo "export PGDATABASE=postgres" >>~/.bash_profile
  7. echo "export PGUSER=gpadmin" >>~/.bash_profile
  8. cat ~/.bash_profile
  9. source ~/.bash_profile
  10. /home/gpadmin/.bashrc :
  11. echo "source /usr/local/greenplum-db/greenplum_path.sh" >>~/.bashrc
  12. echo "export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1" >>~/.bashrc
  13. echo "export PGHOME=/usr/local/greenplum-db" >>~/.bashrc
  14. echo "export PGPORT=5432" >>~/.bashrc
  15. echo "export PGDATABASE=postgres" >>~/.bashrc
  16. echo "export PGUSER=gpadmin" >>~/.bashrc
  17. cat ~/.bashrc
  18. source ~/.bashrc

8.3 greeplum 的 安装区别

  1. GPDB 4.x /5.x
  2. 先安装master .bin 可以安装目录
  3. gpseginstall
  4. gp 集群参数校验
  5. gpinitsystem 集群初始化
  6. GPDB 6.x
  7. 不在提供zip,bin的格式 只提供rpm
  8. 安装master 目录 安装在 /usr/local/
  9. gp6 没有gpseginstall 包了,只能打包、单独安装
  10. gp集群参数校验
  11. gpinitsystem 集群初始化

8.4 greeplum 安装

  1. 下载安装包(官方下载)
  2. 安装目录:/usr/local/greeplum-db
  3. mkdir -p /greenplum/gpdata
  4. mkdir -p /greenplum/soft
  5. 安装rpm 安装
  6. rpm -ivh greenplum-db-6.11.2-rhel7-x86_64.rpm

  1. 创建用户:
  2. groupadd -g 66000 gpadmin
  3. useradd -u 66000 -g gpadmin -m -d /home/gpadmin -s /bin/bash gpadmin
  4. echo "gpadmin" | passwd --stdin gpadmin
  5. su - gpadmin
  6. exit
  7. chown -R gpadmin:gpadmin /usr/local/greenplum*
  8. chmod -R 775 /usr/local/greenplum*
  9. chmod -R 775 /greenplum
  10. chown -R gpadmin:gpadmin /greenplum

image_1elha8iu1140h1649rne1q1q2eu9.png-179.7kB

  1. 设置主机名 的配置
  2. su - gpadmin
  3. mkdir /usr/local/greenplum-db/config
  4. vim /usr/local/greenplum-db/config/all_hosts.txt
  5. ---
  6. test01.greenplum.uniondrug.com
  7. test02.greenplum.uniondrug.com
  8. test03.greenplum.uniondrug.com
  9. test04.greenplum.uniondrug.com
  10. test05.greenplum.uniondrug.com
  11. ---
  12. vim /usr/local/greenplum-db/config/all_seg.txt
  13. ---
  14. test03.greenplum.uniondrug.com
  15. test04.greenplum.uniondrug.com
  16. test05.greenplum.uniondrug.com
  17. ---
  1. gpadmin 的无密钥认证
  2. ./fgssh -user gpadmin -hosts "fggpmaster01 fggpmaster02 fggpdata01 fggpdata02
  3. fggpdata03" -advanced -exverify -confirm
  4. chmod 600 /home/gpadmin/.ssh/config
  5. gpssh -f /usr/local/greenplum-db/config/all_hosts.txt -e 'ls -ls /greenplum'

image_1ekvn1a0111nf13981f2e1am71vqf3u.png-194kB

  1. su - gpadmin
  2. mkdir -p /greenplum/gpdata/master (主节点与备份节点配置)
  3. gpssh -f /usr/local/greenplum-db/config/all_hosts.txt -e 'mkdir -p /greenplum/gpdata/primary'
  4. gpssh -f /usr/local/greenplum-db/config/all_hosts.txt -e 'mkdir -p /greenplum/gpdata/mirror'

image_1ekvn5qqu164rdmtani1boj1fj84b.png-92.5kB

image_1ekvn692q1hs1lbmug51ot8ahf4o.png-68.7kB


  1. 设置:gpadmin 环境变量
  2. su - gpadmin
  3. /home/gpadmin/.bash_profile :
  4. echo "source /usr/local/greenplum-db/greenplum_path.sh" >>~/.bash_profile
  5. echo "export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1" >>~/.bash_profile
  6. echo "export PGHOME=/usr/local/greenplum-db" >>~/.bash_profile
  7. echo "export PGPORT=5432" >>~/.bash_profile
  8. echo "export PGDATABASE=postgres" >>~/.bash_profile
  9. echo "export PGUSER=gpadmin" >>~/.bash_profile
  10. cat ~/.bash_profile
  11. source ~/.bash_profile
  12. /home/gpadmin/.bashrc :
  13. echo "source /usr/local/greenplum-db/greenplum_path.sh" >>~/.bashrc
  14. echo "export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1" >>~/.bashrc
  15. echo "export PGHOME=/usr/local/greenplum-db" >>~/.bashrc
  16. echo "export PGPORT=5432" >>~/.bashrc
  17. echo "export PGDATABASE=postgres" >>~/.bashrc
  18. echo "export PGUSER=gpadmin" >>~/.bashrc
  19. cat ~/.bashrc
  20. source ~/.bashrc

  1. 同步所有节点
  2. gpscp -f /usr/local/greenplum-db/config/all_hosts.txt /home/gpadmin/.bash_profile
  3. gpadmin@=:/home/gpadmin/.bash_profile
  4. gpscp -f /usr/local/greenplum-db/config/all_hosts.txt /home/gpadmin/.bashrc
  5. gpadmin@=:/home/gpadmin/.bashrc

  1. 主机测试:
  2. 网络:
  3. gpcheckperf -f /usr/local/greenplum-db/config/all_seg.txt -r N -d /tmp
  4. I/O 与内存:
  5. gpcheckperf -f /usr/local/greenplum-db/config/all_seg.txt -r ds -D -d
  6. /greenplum/gpdata/primary -d /greenplum/gpdata/mirror
  7. 时间同步验证:
  8. gpssh -f /usr/local/greenplum-db/config/all_hosts.txt -e 'date'

  1. 时间同步:
  2. 如果不同步,需要配置 NTP
  3. 主库:
  4. root:
  5. echo server 127.127.1.0 iburst >> /etc/ntp.conf
  6. systemctl restart ntpd
  7. systemctl enable ntpd
  8. ntpq -p
  9. 别的库:
  10. root:
  11. echo server 192.168.1.11>> /etc/ntp.conf
  12. echo restrict 192.168.1.11 nomodify notrap noquery >> /etc/ntp.conf
  13. ntpdate -u 192.168.1.11
  14. hwclock -w
  15. systemctl restart ntpd
  16. systemctl enable ntpd
  17. ntpq -p
  18. 测试:
  19. su - gpadmin:
  20. gpssh -f /usr/local/greenplum-db/config/all_hosts.txt -e 'date'

image_1ekvnbfd3t9p1vvj1komh2cmlq55.png-105.1kB


九: 集群节点的安装

  1. vim /usr/local/greenplum-db/config/gpinitsystem_config
  2. ARRAY_NAME="greenplum"
  3. SEG_PREFIX=gpseg
  4. PORT_BASE=55000
  5. declare -a DATA_DIRECTORY=(/greenplum/gpdata/primary /greenplum/gpdata/primary /greenplum/gpdata/primary)
  6. MASTER_HOSTNAME=test01.greenplum.uniondrug.com
  7. MASTER_DIRECTORY=/greenplum/gpdata/master
  8. MASTER_PORT=5432
  9. MACHINE_LIST_FILE=/usr/local/greenplum-db/config/all_seg.txt
  10. TRUSTED_SHELL=ssh
  11. CHECK_POINT_SEGMENTS=8
  12. ENCODING=UNICODE
  13. MIRROR_PORT_BASE=56000
  14. REPLICATION_PORT_BASE=57000
  15. MIRROR_REPLICATION_PORT_BASE=58000
  16. declare -a MIRROR_DATA_DIRECTORY=(/greenplum/gpdata/mirror /greenplum/gpdata/mirror /greenplum/gpdata/mirror)

image_1ekvo0k5mtc19sa5s1pmvt3j5i.png-112.3kB


  1. 初始化数据库:
  2. gpinitsystem -c /usr/local/greenplum-db/config/gpinitsystem_config -a
  3. or
  4. gpinitsystem -c /usr/local/greenplum-db/config/gpinitsystem_config -a -h
  5. /usr/local/greenplum-db/config/all_seg.txt
  6. 完全冗余
  7. gpinitsystem -c /usr/local/greenplum-db/config/gpinitsystem_config -a -h
  8. /usr/local/greenplum-db/config/all_seg.txt -s fggpmaster02 -D
  9. or
  10. gpinitsystem -c /usr/local/greenplum-db/config/gpinitsystem_config -a -h
  11. /usr/local/greenplum-db/config/all_seg.txt \
  12. -s test02.greenplum.uniondrug.com -D -B 2
  13. 机器多
  14. gpinitsytem -c /usr/local/greenplum-db/config/gpinitsystem_config -a -h
  15. /usr/local/greenplum-db/config/seg_hosts.txt \
  16. -s fggpmaster02 -D -B 2 -S /greenplum/gpdata/mirror
  17. 备份方案2
  18. group Mirror 默认 (机器少)
  19. 主机的mirror 节点全部放在下一个主机上面
  20. 1 2 3 4 5
  21. 5 1 2 3 4
  22. 镜像存储存放
  23. Spread mirror 另一种方式 -S 防止一个节点挂掉,另外的一个节点集中成为瓶颈 (机器多)
  24. 一般允许挂掉一台,另一台就负载加重一倍,机器多可以允许两台挂掉
  25. 主机的 第一个mirror 在下一个主机,第二个 mirror 在次下一个主机,第三个mirror 在次次下个主机 下面
  26. 1 2 3 4 5 6 7 8
  27. 5 1 6 2 7 3 8 4
  28. 如果挂掉一台,宕机对应的mirror 机器负载家中,机器多的时候,可以挂掉一台
  29. 如果 安装失败,建议gpstop
  30. /bin/rm -rf /greenplum/gpdata/master/*
  31. /bin/rm -rf /greenplum/gpdata/primary/*
  32. /bin/rm -rf /greenplum/gpdata/mirror/*

  1. 修改参数
  2. gpconfig -c shared_buffers -v 129MB -m 126MB
  3. gpconfig -c max_connections -v 1000 -m 300
  4. gpconfig -s shared_buffers
  5. gpconfig -s max_connections
  6. gpstop -u
  7. 远程登录:
  8. psql -h 192.168.3.121 -p5432 -U gpadmin

image_1ekvopapb12j41pg21jas196e18rg5v.png-81.8kB

image_1ekvovamb2qj1ovad0n1qkl1nm76c.png-41.4kB


  1. 相关使用命令
  2. select version();

image_1ekvp6pt9191r1teo9jcjga15ab6p.png-83.4kB

  1. select * from gp_segment_configuration order by content asc,dbid;

image_1ekvpf6dk43g1kl11kh012h0ksf76.png-230kB


  1. 关于segment 连接
  2. psql -h 192.168.3.123 -p55000 -U gpadmin
  3. ----
  4. psql: FATAL: connections to primary segments are not allowed
  5. DETAIL: This database instance is running as a primary segment in a Greenplum cluster and does not permit direct connections.
  6. HINT: To force a connection anyway (dangerous!), use utility mode.
  7. -----

image_1ekvqqluf13e8295cn8khj162o7j.png-66.2kB

  1. psql -h 192.168.100.13 -p55000 -U gpadmin

image_1el00qrklvlbjub1rgi150t1kvk80.png-68kB


  1. data 节点连接
  2. PGOPTIONS='-c gp_session_role=utility' psql -h127.0.0.1 -p55000
  3. PGOPTIONS='-c gp_session_role=utility' psql -h192.168.100.13 -p55000

image_1el0129vsbo10bq1vmil27b8b8d.png-60.6kB

image_1el014fbe19a81vb1lji16cn10v18q.png-54.9kB


  1. 远程连接:

image_1el01b08jeo3poc17r31sagl797.png-169.1kB

---

  1. psql -h192.168.100.11 -p5432 -U gpadmin
  2. alter role gpadmin with password 'gpadmin';

image_1el01rt551pr8mmp1pal9vi158ja1.png-30.9kB

  1. cd /greenplum/gpdata/master/gpseg-1
  2. vim pg_hba.conf
  3. ----
  4. 到最后加上:
  5. host all all 0.0.0.0/0 md5
  6. ----
  7. gpstop -u

image_1el01hn591fcv2s099b1fd3nlh9k.png-132.3kB

image_1el01tsg8ou81e6r1cfrjll105tae.png-147.3kB

  1. 新建一个查询:
  2. create database fgedu
  3. create user fgedu with password 'fgedu123'
  4. insert into itpux1 values(1,'itpux01',21);
  5. insert into itpux1 values(2,'itpux01',22);
  6. insert into itpux1 values(3,'itpux03',23);
  7. insert into itpux1 values(4,'itpux04',24);
  8. insert into itpux1 values(5,'itpux05',25);
  9. insert into itpux1 values(6,'itpux06',26);
  10. insert into itpux1 values(7,'itpux07',27);
  11. insert into itpux1 values(8,'itpux05',28);
  12. insert into itpux1 values(9,'itpux09',29);
  13. insert into itpux1 values(10,'itpux10',30);
  14. insert into itpux1 values(11,'itpux11',31);
  15. insert into itpux1 values(12,'itpux12',32);

image_1el02dvkt1vgsjqdpnu4b319n0ar.png-425.4kB

image_1el02g6t2qfm1s2p1vuh16iqurrb8.png-234.2kB

image_1el02h7bqpc71as9hmcomo1olnbl.png-259.1kB

image_1el02ocnu1qov1p9b1gcn1dvekc5c2.png-51.7kB

image_1el02pe9g8ro6ak1oup12l3olpcf.png-168kB

image_1el02prcakukbga16qs1thh181ocs.png-129.3kB


  1. select gp_segment_id,count(*) from itpux1 group by gp_segment_id;
  2. 查看 数据分段

image_1el02rvvo1k37g03134vfis6hnd9.png-49.5kB


  1. create table fgedu (name varchar(50));
  2. insert into fgedu values('风哥');
  3. insert into fgedu values('数据库');
  4. insert into fgedu values('培训教程');
  5. insert into fgedu values('fgedu.net.cn');
  6. insert into fgedu values('wx');
  7. insert into fgedu values('itpux-com');
  8. insert into fgedu values('wx-gzh');
  9. insert into fgedu values('itpux_com');
  10. insert into fgedu values('oracle');
  11. insert into fgedu values('mysql');
  12. insert into fgedu values('nosql');
  13. insert into fgedu values('国产数据库');
  14. insert into fgedu values('开源数据库');
  15. insert into fgedu values('高端就业课程');
  16. select * from fgedu;
  17. select gp_segment_id,count(*) from fgedu group by gp_segment_id;

image_1el0314bva131sg61t0bpts112pdm.png-229.3kB

image_1el031m5h2s31pfh1v95qcm1a0oe3.png-107kB


  1. 随机分布
  2. create table itpux2(id int,name varchar(40)) distributed randomly;
  3. insert into itpux2 values(1,'fgedu1');
  4. insert into itpux2 values(2,'fgedu2');
  5. insert into itpux2 select * from itpux2;
  6. insert into itpux2 select * from itpux2;
  7. select count(*) from itpux2;
  8. select gp_segment_id,count(*) from itpux2 group by gp_segment_id;

image_1el0378mr1q0u50km3j1fqt1lpeg.png-112.2kB

  1. 复制表
  2. create table itpux3(id int,name varchar(40)) distributed replicated;
  3. insert into itpux3 values(1,'fgedu1');
  4. insert into itpux3 values(2,'fgedu2');
  5. insert into itpux3 select * from itpux3;
  6. insert into itpux3 select * from itpux3;
  7. select count(*) from itpux3;
  8. PGOPTIONS='-c gp_session_role=utility' psql -h192.168.100.13 -p55002 -d fgedu -U fgedu -c 'select count(*) from itpux3';
  9. PGOPTIONS='-c gp_session_role=utility' psql -h192.168.100.14 -p55002 -d fgedu -U fgedu -c
  10. 'select count(*) from itpux3';
  11. PGOPTIONS='-c gp_session_role=utility' psql -h192.168.100.14 -p55001 -d fgedu -U fgedu -c
  12. 'select count(*) from itpux3';
  13. 复制表 每个节点 表的 是一样的

image_1el03lt82nn21oho1p4n1sb1h8bet.png-160.2kB


十: greenplum 的 监控配置

  1. Pivotal Greenplum Command Center6.3 (gpcc 监控平台)
  2. 下载地址:
  3. https://network.pivotal.io/products/pivotal-gpdb/

  1. gpperfmon_install --enable --password gpmon --port 5432

image_1el4iuosr1jn3jh7ttc1jul1smv9.png-277.3kB

  1. 主要是改了如下东西

image_1el4j352hrfoe4e11djt3phm6m.png-112.9kB

image_1el4j44sj1an01m2hnrdg12ms1j.png-171.6kB

  1. 配置文件
  2. /greenplum/gpdata/master/gpseg-1/postgresql.conf
  3. 访问权限
  4. /greenplum/gpdata/master/gpseg-1/pg_hba.conf

image_1el4j592b18pct0o17pj1g27uls20.png-52.7kB

image_1el4ja0cj6v4t6ku9i1nsv1u0m2d.png-82.6kB

  1. 重启GP数据库
  2. gpstop -r -a

image_1el4jdkceo571aciae01pckrs22q.png-193.3kB

image_1el4jf13aln11i21l221241ri037.png-165.7kB

  1. ps -ef |grep gp
  2. /greenplum/gpdata/master/gpseg-1/gpperfmon/conf/
  3. 生成一个配置文件
  4. gpperfmon.conf
  5. /greenplum/gpdata/master/gpseg-1/gpperfmon/logs 日志路径

image_1el4jkuc81km21vbg1k912kn7os3k.png-83.9kB

  1. psql -d gpperfmon -c 'select * from system_now';

image_1el4jvlrq9kf1ckdk9huhd1otc41.png-149.1kB


  1. master 挂掉之后启用 standby 的时候涉及同步问题
  2. cd /greenplum/gpdata/master/gpseg-1
  3. scp pg_hba.conf gpadmin@192.168.100.12:/greenplum/gpdata/master/gpseg-1/
  4. scp ~/.pgpass gpadmin@192.168.100.12:~/

image_1el4k9l8r1mmkttu18un15u1a004e.png-97.6kB


  1. 下面安装 greenplum-cc-web 配置 使用gpadmin用户安装
  2. cd /greenplum/soft/
  3. unzip greenplum-cc-web-6.0.0-rhel7_x86_64.zip

image_1el4kf9at10tc1lvqjd51dig14ha4r.png-59.8kB

  1. cd greenplum-cc-web-6.0.0-rhel7_x86_64
  2. ./gpccinstall-6.0.0
  3. 会在所有的主机上面安装
  4. 安装目录为 /greenplum/greenplum-cc-web-6.0.0

image_1el4khr6s1d011up735le6vrah5l.png-66kB

image_1el4kl1odjmv1s6blap12na2jk62.png-161.6kB

image_1el4klnbu18e418tli5k1q6f1vav6f.png-50.4kB

image_1el4kmovi1a9avjnlgqffngeh79.png-81.8kB

image_1el4km3a21gdmjh9uotfa1cc16s.png-151.9kB

  1. 加载gpcc_path.sh 文件
  2. ---
  3. vim .bash_profile
  4. source /greenplum/greenplum-cc-web-6.0.0/gpcc_path.sh
  5. ---
  6. ---
  7. vim .bashrc
  8. source /greenplum/greenplum-cc-web-6.0.0/gpcc_path.sh
  9. ---
  10. 同步所有主机
  11. scp .bashrc .bash_profile gpadmin@192.168.100.12:/home/gpadmin/
  12. scp .bashrc .bash_profile gpadmin@192.168.100.13:/home/gpadmin/
  13. scp .bashrc .bash_profile gpadmin@192.168.100.14:/home/gpadmin/
  14. scp .bashrc .bash_profile gpadmin@192.168.100.15:/home/gpadmin/
  15. source /home/gpadmin/.bash_profile
  16. source /home/gpadmin/.bashrc

image_1el4l0erh1fce1em8r431r0q1srj7m.png-148.4kB

  1. cd /greenplum/gpdata/master/gpseg-1/
  2. vim pg-hba.conf
  3. 增加一行授权
  4. ---
  5. host gpperfmon gpmon 192.168.100.11/32 md5
  6. ---

image_1el4lebengl81jnu1ff7gl28er83.png-57.8kB

  1. gpcc start

image_1el4lh0pa3qmc3911qm1e24c718g.png-39.4kB

image_1el4oju2h11gb6ct52n13jdj6sah.png-50.4kB


  1. gpconfig -s gp_enable_gpperfmon
  2. 修改
  3. gpconfig -c gp_enable_gpperfmon -v off

image_1el4otvt91r20uc01f3d1pi2tl0au.png-53.9kB

  1. 浏览器打开
  2. http://192.168.100.11:28080/
  3. 用户名、密码 cat /home/gpadmin/.pgpass
  4. gpmon/gpmon

image_1el4ln5lfvdd1iu91rqu37115nm9a.png-39.4kB

image_1el4lnsk41l3gkns1ilq3m03i69n.png-39kB

image_1el4lp3qttgkl7m1phf7vv1e76a4.png-258.9kB

image_1el4lkpkaavr1c4v1c6p10uk51d8t.png-305.5kB

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注