@zhaikun
2018-05-09T16:36:45.000000Z
字数 8121
阅读 1221
大数据
以下步骤要在所有节点上执行
vim /etc/sysconfig/network
若SELinux没有关闭,按照下述方式关闭
[root@BigDataMaster ~]# vi /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these two values:
# targeted - Targeted processes are protected,
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@BigDataNode04 ~]# yum install bind-utils chkconfig cyrus-sasl-gssapi cyrus-sasl-plain fuse fuse-libs gcc httpd init-functions libxslt mod_ssl MySQL-python openssl openssl-devel perl portmap psmisc python-psycopg2 python-setuptools sed service sqlite swig useradd zlib
service iptables stop
把所有要添加到集群中的主机都要加入hosts中,格式如下:
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.132.160 BigDataMaster
172.16.132.161 BigDataMaster02
172.16.132.162 BigDataNode01
172.16.132.163 BigDataNode02
172.16.132.164 BigDataNode03
172.16.132.165 BigDataNode04
172.16.132.166 BigDataNode05
172.16.132.167 BigDataNode06
172.16.132.168 BigDataNode07
172.16.132.169 BigDataNode08
软件的下载地址下载地址
下载CDH Parcel文件,只需要下载图中所示的3个文件
这里注意下,将.sha1文件后缀更改为.sha,同时把内容只保留hash码部分。
1、生成一个key
[root@BigDataMaster ~]# ssh-keygen -t rsa
2、将这个key传到所有节点
[root@BigDataMaster ~]# ssh-copy-id -i .ssh/id_rsa.pub root@172.16.132.161
[root@BigDataNode01 ~]# mkdir /usr/local/java && tar zxvf jdk-8u101-linux-x64.tar.gz -C /usr/local/java
[root@BigDataMaster parcel-repo]# vim /etc/profile
export JAVA_HOME=/usr/local/java/jdk1.8.0_101
export PATH=$PATH:$JAVA_HOME/bin
[root@BigDataMaster parcel-repo]# . /etc/profile
[root@BigDataMaster ~]# vim /opt/cm-5.7.6/etc/cloudera-scm-agent/config.ini
[General]
# Hostname of the CM server.
server_host=172.16.132.160
# Port that the CM server is listening on.
server_port=7182
echo "vm.swappiness = 0" >>/etc/sysctl.conf && sysctl -p && echo never >/sys/kernel/mm/redhat_transparent_hugepage/defrag && echo "echo never >/sys/kernel/mm/redhat_transparent_hugepage/defrag" >>/etc/rc.local
[root@BigDataMaster] tar zxvf mysql-5.6.27-linux-glibc2.5-x86_64.tar.gz -C /usr/local/
[root@BigDataMaster] mv mysql-5.6.27-linux-glibc2.5-x86_64/ mysql
[root@BigDataMaster] mkdir -p /data/mysql/mysql3306/{data,logs,tmp}
[root@BigDataMaster] vim my3306.cnf
#my.cnf
[client]
port = 3306
socket = /tmp/mysql.sock
[mysql]
prompt='(test)\u@\h [\d]>'
#tee=/data/mysql/mysql3306/data/query.log
no-auto-rehash
[mysqld]
#misc
user = mysql
basedir = /usr/local/mysql/
datadir = /data/mysql/mysql3306/data
port = 3306
socket = /tmp/mysql.sock
event_scheduler = 0
#tmp
tmpdir=/data/mysql/mysql3306/tmp
lower_case_table_names = 1
#timeout
interactive_timeout = 300
wait_timeout = 300
#character set
character-set-server = utf8
open_files_limit = 65535
max_connections = 100
max_connect_errors = 100000
#skip-name-resolve = 1
#logs
log-output=file
slow_query_log = 1
slow_query_log_file = slow.log
log-error = error.log
pid-file = mysql.pid
long_query_time = 1
#log-slow-admin-statements = 1
log-queries-not-using-indexes = 1
log-slow-slave-statements = 1
explicit_defaults_for_timestamp=1
#binlog
binlog_format = row
server-id = 2203306
log-bin = /data/mysql/mysql3306/logs/mysql-bin
binlog_cache_size = 4M
max_binlog_size = 1G
max_binlog_cache_size = 2G
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
expire_logs_days = 10
binlog_checksum = NONE # for GR
#relay log
relay-log = /data/mysql/mysql3306/logs/relay-bin
skip_slave_start = 1
max_relay_log_size = 1G
relay_log_purge = 0
relay_log_recovery = 1
log_slave_updates = 1
#slave-skip-errors=1032,1053,1062
# replication
relay_log_recovery = ON
master_info_repository = TABLE
relay_log_info_repository = TABLE
#buffers & cache
table_open_cache = 2048
table_definition_cache = 2048
table_open_cache = 2048
max_heap_table_size = 96M
sort_buffer_size = 2M
join_buffer_size = 2M
thread_cache_size = 256
query_cache_size = 0
query_cache_type = 0
query_cache_limit = 256K
query_cache_min_res_unit = 512
thread_stack = 192K
tmp_table_size = 96M
key_buffer_size = 8M
read_buffer_size = 2M
read_rnd_buffer_size = 16M
bulk_insert_buffer_size = 32M
#myisam
myisam_sort_buffer_size = 128M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
#innodb
innodb_buffer_pool_size = 100M
innodb_buffer_pool_instances = 4
innodb_data_file_path = ibdata1:1G:autoextend
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 64M
innodb_log_file_size = 500M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 50
innodb_file_per_table = 1
innodb_rollback_on_timeout
innodb_status_file = 1
innodb_io_capacity = 2000
transaction_isolation = READ-COMMITTED
innodb_flush_method = O_DIRECT
[mysqld_safe]
#malloc-lib=/usr/local/mysql/lib/jmalloc.so
nice=-19
open-files-limit=65535
[root@BigDataMaster] /usr/local/mysql_install_db --defaults-file=/data/mysql/mysql3306/my3306.cnf
1、首先需要去MySql的官网下载JDBC驱动,http://dev.mysql.com/downloads/connector/j/,解压后,找到mysql-connector-java-5.1.35-bin.jar,放到/opt/cm-5.7.6/share/cmf/lib/中。
[root@BigDataMaster mysql-connector-java-5.1.41]# cp mysql-connector-java-5.1.41-bin.jar /opt/cm-5.7.6/share/cmf/lib/
2、在主节点初始化CM5的数据库:
mysql> CREATE USER 'scm'@'%' IDENTIFIED BY 'scm';
Query OK, 0 rows affected (0.01 sec)
mysql> grant all privileges on *.* to 'scm'@'%' with grant option;
Query OK, 0 rows affected (0.01 sec)
mysql> grant all privileges on *.* to 'root'@'%' with grant option;
Query OK, 0 rows affected (0.00 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)
mysql> create database hive DEFAULT CHARSET utf8 COLLATE utf8_general_ci;
初始化
[root@BigDataMaster schema]# /opt/cm-5.7.6/share/cmf/schema/scm_prepare_database.sh mysql -hlocalhost -uroot -p123456 --scm-host localhost scm scm scm
JAVA_HOME=/usr/local/java/jdk1.8.0_101
Verifying that we can write to /opt/cm-5.7.6/etc/cloudera-scm-server
Creating SCM configuration file in /opt/cm-5.7.6/etc/cloudera-scm-server
groups: cloudera-scm:无此用户
Executing: /usr/local/java/jdk1.8.0_101/bin/java -cp /usr/share/java/mysql-connector-java.jar:/usr/share/java/oracle-connector-java.jar:/opt/cm-5.7.6/share/cmf/schema/../lib/* com.cloudera.enterprise.dbutil.DbCommandExecutor /opt/cm-5.7.6/etc/cloudera-scm-server/db.properties com.cloudera.cmf.db.
[ main] DbCommandExecutor INFO Successfully connected to database.
All done, your SCM database is configured correctly!
[root@BigDataNode04 ~]# scp -r /opt/cm-5.7.6/ root@172.16.132.169:/opt/
[root@BigDataNode04 ~]# /opt/cm-5.7.6/etc/init.d/cloudera-scm-server start
[root@BigDataNode04 ~]# /opt/cm-5.7.6/etc/init.d/cloudera-scm-agent start
据说此问题是BUG,修复方法
错误日志
-yarn/stacks', u'bytes_free_warning_threshhold_bytes': 0, u'group': u'hadoop', u'user': u'yarn', u'mode': 493}, u'cpu': None, u'contents': None
Traceback (most recent call last):
File "/opt/cm-5.7.6/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.7.6-py2.6.egg/cmf/agent.py", line 1605, in handle_heartbeat_p
new_process.activate()
File "/opt/cm-5.7.6/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.7.6-py2.6.egg/cmf/agent.py", line 3144, in activate
self.write_process_conf()
File "/opt/cm-5.7.6/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.7.6-py2.6.egg/cmf/agent.py", line 3251, in write_process_conf
"source_parcel_environment", env))
File "/opt/cm-5.7.6/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.7.6-py2.6.egg/cmf/util.py", line 373, in source
raise e
ValueError: dictionary update sequence element #93 has length 1; 2 is required
修复方法:
[root@BigDataNode06 cloudera-scm-agent]# vim /opt/cm-5.7.6/lib64/cmf/agent/build/env/lib/python2.6/site-packages/cmf-5.7.6-py2.6.egg/cmf/util.py
#原样子
LOG.warn("None environment value: %s=%s" % (k, v,))
pipe = subprocess.Popen(['/bin/bash', '-c', ". %s; %s; env" % (path, command)],
stdout=subprocess.PIPE, env=caller_env)
data = pipe.communicate()[0]
try:
#修复
pipe = subprocess.Popen(['/bin/bash', '-c', ". %s; %s; env| grep -v { | grep -v }" % (path, command)],
stdout=subprocess.PIPE, env=caller_env)
data = pipe.communicate()[0]
try:
return dict((line.split("=", 1) for line in data.splitlines()))
except Exception, e:
[root@BigDataNode04 tools]# cp mysql-connector-java-5.1.41-bin.jar /opt/cloudera/parcels/CDH/lib/hive/lib/
最好从已经运行的slave机器上scp对应的文件夹,不用修改server_host了,但是需要把对应的[/opt/cm-5.7.0/lib/cloudera-scm-agent]下的文件全部删除;同时删除[/opt/cm-5.7.0/log/cloudera-scm-agent]目录下的所有内容。