J2模板中的Ansible EHT1 IPv4地址

发布于 2025-02-06 20:07:41 字数 9861 浏览 2 评论 0原文

我目前正在从事一项学校作业,在那里我必须配置Haproxy以在两个Web服务器之间的负载。

我正在通过VirtualBox中的Vagrant部署机器。之后,我的Ansible Playbook将运行并从配置Webervers开始。网络服务器完成后,它将配置LoadBalancer。

可悲的是,我无法将两个ETH1适配器的IPv4地址添加到haproxy.conf中。我反复收到一个信息,即Ansible无法在Hostvars内找到变量。

任务[配置Haproxy]


致命:[HDVLD-TEST-LB01]:失败! => {“更改”:false,“ msg”: “ AnsibleUndefinedvariable:'ansible.vars.hostvars.hostvars.hostvars对象' 没有属性“ ansible_eth1'”}

加起来,haproxy没有在10.2.2.2.20:8080 - > chrome上响应

err_connection_refused

我希望这里有人可以帮助我..

我会在这里粘贴代码。

vagrantfile

# -*- mode: ruby -*-
# vi: set ft=ruby :

# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
#  config.ssh.insert_key = false

#webserver
 (1..2).each do |i|
  config.vm.define "HDVLD-TEST-WEB0#{i}" do |webserver|
    webserver.vm.box = "ubuntu/trusty64"
    webserver.vm.hostname = "HDVLD-TEST-WEB0#{i}"
    webserver.vm.network :private_network, ip: "10.2.2.1#{i}"
    webserver.vm.provider :virtualbox do |vb|
      vb.memory = "524"
      vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
    end
    webserver.vm.provision "shell" do |shell|
      ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
      shell.inline = <<-SHELL
      echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
      echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
    SHELL
    end
  end

#  config.vm.define "HDVLD-TEST-DB01" do|db_server|
#    db_server.vm.box = "ubuntu/trusty64"
#    db_server.vm.hostname = "HDVLD-TEST-DB01"
#    db_server.vm.network :private_network, ip: "10.2.2.30"
#  end
  config.vm.define "HDVLD-TEST-LB01" do |lb_server|
    lb_server.vm.box = "ubuntu/trusty64"
    lb_server.vm.hostname = "HDVLD-TEST-LB01"
    lb_server.vm.network :private_network, ip: "10.2.2.20"
    lb_server.vm.provider :virtualbox do |vb|
      vb.memory = "524"
      vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
  end
    lb_server.vm.provision "shell" do |shell|
      ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
      shell.inline = <<-SHELL
      echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
      echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
    SHELL
    end
  end
  config.vm.provision :ansible do |ansible|
    ansible.playbook = "webserver_test.yml"
    ansible.groups = {
      "webservers" => ["HDVLD-TEST-WEB01", "HDVLD-TEST-WEB02"],
      "loadbalancer" => ["HDVLD-TEST-LB01"]
    }
  end
end
end

playbook.yml

- hosts: webservers
  become: true
  vars_files: vars/default.yml
  gather_facts: True
  tasks:
        # Getting the IP address of eth0 interface
    - name: Gather facts from new server
      delegate_facts: True
      setup:
        filter: ansible_eth1.ipv4.address


    - name: Debug facts from Server
      delegate_facts: True
      debug:
        var: ansible_eth1.ipv4.address

    - name: UPurge
      apt: purge=yes

    - name: Install latest version of Apache
      apt: name=apache2 update_cache=yes state=latest

    - name: Install latest version of Facter
      apt: name=facter state=latest

    - name: Create document root for your domain
      file:
        path: /var/www/{{ http_host }}
        state: directory
        mode: '0755'

    - name: Copy your index page
      template:
        src: "files/index.html.j2"
        dest: "/var/www/{{ http_host }}/index.html"

    - name: Set up virtuahHost
      template:
        src: "files/apache.conf.j2"
        dest: "/etc/apache2/sites-available/{{ http_conf }}"
      notify: restart-apache

    - name: Enable new site {{ http_host }}
      command: a2ensite {{ http_host }}

    - name: Disable default site
      command: a2dissite 000-default
      when: disable_default
      notify: restart-apache

    - name: "UFW firewall allow HTTP on port {{ http_port }}"
      ufw:
        rule: allow
        port: "{{ http_port }}"
        proto: tcp

  handlers:
    - name: restart-apache
      service:
        name: apache2
        state: restarted

- hosts: loadbalancer
  become: true
  vars_files: vars/default.yml
  gather_facts: true
  tasks:
    - name: "Installing haproxy"
      package:
        name: "haproxy"
        state: present

    - name: "Starting haproxy"
      service:
        name: "haproxy"
        state: started
        enabled: yes

    - name: "Configuring haproxy"
      template:
        src: "files/haproxy.conf.j2"
        dest: "/etc/haproxy/haproxy.cfg"
      notify: restart-haproxy

    - name: "UFW firewall allow Proxy on port {{ proxy_port }}"
      ufw:
        rule: allow
        port: "{{ proxy_port }}"
        proto: tcp

    - name: "UFW firewall allow static port on port {{ staticlb_port }}"
      ufw:
        rule: allow
        port: "{{ staticlb_port }}"
        proto: tcp
    - name: Gather facts from new Server
      setup:
        filter: ansible_default_ipv4.address


  handlers:
    - name: restart-haproxy
      service:
        name: haproxy
        state: restarted

haproxy.conf.j2

#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------


#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2


    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     1000
    user        haproxy
    group       haproxy
    daemon


    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats


    # utilize system-wide crypto-policies
    ssl-default-bind-ciphers PROFILE=SYSTEM
    ssl-default-server-ciphers PROFILE=SYSTEM


#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
    listen haproxy-monitoring *:{{ proxy_port }}


#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main
    bind *:{{ http_port }}
    acl url_static       path_beg       -i /static /images /javascript /stylesheets
    acl url_static       path_end       -i .jpg .gif .png .css .js


    use_backend static          if url_static
    default_backend             app


#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static 127.0.0.1:{{ staticlb_port }} check


#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    {% for host in groups['webservers'] %}
    {{ hostvars[host].ansible_eth1.ipv4.address }}:{{ http_port }} check
    {% endfor %}`

默认值(vars file)

http_host: "hdvld"
http_conf: "hdvld.conf"
http_port: "80"
proxy_port: "8080"
disable_default: true
staticlb_port: "4331"

我做错了,但是我找不到问题。昨天我一直在搜索和尝试整天,所以文件中有一些引用的零件,请忽略它。

**添加了库存文件,

这是库存文件

#    Generated by Vagrant
    
        HDVLD-TEST-LB01 ansible_host=127.0.0.1 ansible_port=2200 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-LB01/virtualbox/private_key'
        HDVLD-TEST-WEB02 ansible_host=127.0.0.1 ansible_port=2201 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB02/virtualbox/private_key'
        HDVLD-TEST-WEB01 ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB01/virtualbox/private_key'
        
        [webservers]
        HDVLD-TEST-WEB01
        HDVLD-TEST-WEB02
        
        [loadbalancer]
        HDVLD-TEST-LB01


 

I'm currently working on a school assignment where i have to configure HAProxy to loadbalance between my two webservers.

I'm deploying the machines via Vagrant in Virtualbox. After this, my Ansible playbook will run and starts off with configuring the webservers. After the webservers are done, it will configure the loadbalancer.

Sadly, i can't manage to get the ipv4 address of both eth1 adapters added to the HAproxy.conf. I'm repeatedly getting the message that ansible can't find the variable inside the hostvars.

TASK [Configuring haproxy]


fatal: [HDVLD-TEST-LB01]: FAILED! => {"changed": false, "msg":
"AnsibleUndefinedVariable: 'ansible.vars.hostvars.HostVarsVars object'
has no attribute 'ansible_eth1'"}

Adding up to this, HAProxy is not responding on 10.2.2.20:8080 -> Chrome gives me an

ERR_CONNECTION_REFUSED

I hope someone over here can help me out..

I'll paste my code down here.

Vagrantfile

# -*- mode: ruby -*-
# vi: set ft=ruby :

# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
#  config.ssh.insert_key = false

#webserver
 (1..2).each do |i|
  config.vm.define "HDVLD-TEST-WEB0#{i}" do |webserver|
    webserver.vm.box = "ubuntu/trusty64"
    webserver.vm.hostname = "HDVLD-TEST-WEB0#{i}"
    webserver.vm.network :private_network, ip: "10.2.2.1#{i}"
    webserver.vm.provider :virtualbox do |vb|
      vb.memory = "524"
      vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
    end
    webserver.vm.provision "shell" do |shell|
      ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
      shell.inline = <<-SHELL
      echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
      echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
    SHELL
    end
  end

#  config.vm.define "HDVLD-TEST-DB01" do|db_server|
#    db_server.vm.box = "ubuntu/trusty64"
#    db_server.vm.hostname = "HDVLD-TEST-DB01"
#    db_server.vm.network :private_network, ip: "10.2.2.30"
#  end
  config.vm.define "HDVLD-TEST-LB01" do |lb_server|
    lb_server.vm.box = "ubuntu/trusty64"
    lb_server.vm.hostname = "HDVLD-TEST-LB01"
    lb_server.vm.network :private_network, ip: "10.2.2.20"
    lb_server.vm.provider :virtualbox do |vb|
      vb.memory = "524"
      vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
  end
    lb_server.vm.provision "shell" do |shell|
      ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
      shell.inline = <<-SHELL
      echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
      echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
    SHELL
    end
  end
  config.vm.provision :ansible do |ansible|
    ansible.playbook = "webserver_test.yml"
    ansible.groups = {
      "webservers" => ["HDVLD-TEST-WEB01", "HDVLD-TEST-WEB02"],
      "loadbalancer" => ["HDVLD-TEST-LB01"]
    }
  end
end
end

Playbook.yml

- hosts: webservers
  become: true
  vars_files: vars/default.yml
  gather_facts: True
  tasks:
        # Getting the IP address of eth0 interface
    - name: Gather facts from new server
      delegate_facts: True
      setup:
        filter: ansible_eth1.ipv4.address


    - name: Debug facts from Server
      delegate_facts: True
      debug:
        var: ansible_eth1.ipv4.address

    - name: UPurge
      apt: purge=yes

    - name: Install latest version of Apache
      apt: name=apache2 update_cache=yes state=latest

    - name: Install latest version of Facter
      apt: name=facter state=latest

    - name: Create document root for your domain
      file:
        path: /var/www/{{ http_host }}
        state: directory
        mode: '0755'

    - name: Copy your index page
      template:
        src: "files/index.html.j2"
        dest: "/var/www/{{ http_host }}/index.html"

    - name: Set up virtuahHost
      template:
        src: "files/apache.conf.j2"
        dest: "/etc/apache2/sites-available/{{ http_conf }}"
      notify: restart-apache

    - name: Enable new site {{ http_host }}
      command: a2ensite {{ http_host }}

    - name: Disable default site
      command: a2dissite 000-default
      when: disable_default
      notify: restart-apache

    - name: "UFW firewall allow HTTP on port {{ http_port }}"
      ufw:
        rule: allow
        port: "{{ http_port }}"
        proto: tcp

  handlers:
    - name: restart-apache
      service:
        name: apache2
        state: restarted

- hosts: loadbalancer
  become: true
  vars_files: vars/default.yml
  gather_facts: true
  tasks:
    - name: "Installing haproxy"
      package:
        name: "haproxy"
        state: present

    - name: "Starting haproxy"
      service:
        name: "haproxy"
        state: started
        enabled: yes

    - name: "Configuring haproxy"
      template:
        src: "files/haproxy.conf.j2"
        dest: "/etc/haproxy/haproxy.cfg"
      notify: restart-haproxy

    - name: "UFW firewall allow Proxy on port {{ proxy_port }}"
      ufw:
        rule: allow
        port: "{{ proxy_port }}"
        proto: tcp

    - name: "UFW firewall allow static port on port {{ staticlb_port }}"
      ufw:
        rule: allow
        port: "{{ staticlb_port }}"
        proto: tcp
    - name: Gather facts from new Server
      setup:
        filter: ansible_default_ipv4.address


  handlers:
    - name: restart-haproxy
      service:
        name: haproxy
        state: restarted

Haproxy.conf.j2

#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------


#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2


    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     1000
    user        haproxy
    group       haproxy
    daemon


    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats


    # utilize system-wide crypto-policies
    ssl-default-bind-ciphers PROFILE=SYSTEM
    ssl-default-server-ciphers PROFILE=SYSTEM


#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
    listen haproxy-monitoring *:{{ proxy_port }}


#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main
    bind *:{{ http_port }}
    acl url_static       path_beg       -i /static /images /javascript /stylesheets
    acl url_static       path_end       -i .jpg .gif .png .css .js


    use_backend static          if url_static
    default_backend             app


#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static 127.0.0.1:{{ staticlb_port }} check


#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    {% for host in groups['webservers'] %}
    {{ hostvars[host].ansible_eth1.ipv4.address }}:{{ http_port }} check
    {% endfor %}`

defaults (vars file)

http_host: "hdvld"
http_conf: "hdvld.conf"
http_port: "80"
proxy_port: "8080"
disable_default: true
staticlb_port: "4331"

I'm doing something wrong, but i can't find the issue.. Yesterday I have been searching and trying the whole day so there are some quoted pieces off code inside the files, please ignore it..

** Added the inventory file

This is the inventory file

#    Generated by Vagrant
    
        HDVLD-TEST-LB01 ansible_host=127.0.0.1 ansible_port=2200 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-LB01/virtualbox/private_key'
        HDVLD-TEST-WEB02 ansible_host=127.0.0.1 ansible_port=2201 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB02/virtualbox/private_key'
        HDVLD-TEST-WEB01 ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB01/virtualbox/private_key'
        
        [webservers]
        HDVLD-TEST-WEB01
        HDVLD-TEST-WEB02
        
        [loadbalancer]
        HDVLD-TEST-LB01


 

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

疯了 2025-02-13 20:07:41

在第一部剧本中:(您可以替换前两个任务)

- name: N1
  hosts: webservers
  tasks:
    - name: get eth1 adress
      set_fact:
        ips: "{{ ips | d({}) | combine({_ho: _ip}) }}"
      loop: "{{ ansible_play_hosts }}"
      vars:
        _ho: "{{ item }}"
        _ip: "{{ ansible_eth1.ipv4.address }}"

    - name: add variables to dummy host
      add_host:
        name: "variable_holder"
        shared_variable:  "{{ ips }}"
        :

在第二次播放中:

- name: N2
  hosts: loadbalancer
  gather_facts: true
  vars:
    ips: "{{ hostvars['variable_holder']['shared_variable'] }}"        
  tasks:
    - name: check the value of ips
      debug:
        var: ips
       :
       :

在J2.File Change中:

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    {% for host in groups['webservers'] if ips[host] is defined %}
    {{ ips[host] }}:{{ http_port }} check
    {% endfor %}`



    

in the first play: (you could replace the first 2 tasks)

- name: N1
  hosts: webservers
  tasks:
    - name: get eth1 adress
      set_fact:
        ips: "{{ ips | d({}) | combine({_ho: _ip}) }}"
      loop: "{{ ansible_play_hosts }}"
      vars:
        _ho: "{{ item }}"
        _ip: "{{ ansible_eth1.ipv4.address }}"

    - name: add variables to dummy host
      add_host:
        name: "variable_holder"
        shared_variable:  "{{ ips }}"
        :

in the second play:

- name: N2
  hosts: loadbalancer
  gather_facts: true
  vars:
    ips: "{{ hostvars['variable_holder']['shared_variable'] }}"        
  tasks:
    - name: check the value of ips
      debug:
        var: ips
       :
       :

in the j2.file change:

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    {% for host in groups['webservers'] if ips[host] is defined %}
    {{ ips[host] }}:{{ http_port }} check
    {% endfor %}`



    
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文