A grandi linee la procedura sara':
#placement va fatto prima di nova su -s /bin/sh -c "placement-manage db online_data_migration" placement su -s /bin/sh -c "nova-manage db online_data_migration" nova su -s /bin/sh -c "cinder-manage db online_data_migrations" cinder |
Controllare se sono installati openstack-client e selinux
[root@controller-01 ~]# yum list installed | grep openstackclient python-openstackclient-lang.noarch 6.6.1-1.el9s @centos-openstack-caracal python3-openstackclient.noarch 6.6.1-1.el9s @centos-openstack-caracal [root@controller-01 ~]# yum list installed | grep openstack-selinux openstack-selinux.noarch 0.8.40-1.el9s @centos-openstack-zed |
Controllare versione kernel e ceph
[root@controller-01 ~]# yum list installed | grep kernel
kernel.x86_64 5.14.0-427.24.1.el9_4 @anaconda
kernel.x86_64 5.14.0-503.33.1.el9_5 @baseos
kernel-core.x86_64 5.14.0-427.24.1.el9_4 @anaconda
kernel-core.x86_64 5.14.0-503.33.1.el9_5 @baseos
kernel-headers.x86_64 5.14.0-503.33.1.el9_5 @appstream
kernel-modules.x86_64 5.14.0-427.24.1.el9_4 @anaconda
kernel-modules.x86_64 5.14.0-503.33.1.el9_5 @baseos
kernel-modules-core.x86_64 5.14.0-427.24.1.el9_4 @anaconda
kernel-modules-core.x86_64 5.14.0-503.33.1.el9_5 @baseos
kernel-srpm-macros.noarch 1.0-13.el9 @appstream
kernel-tools.x86_64 5.14.0-503.33.1.el9_5 @baseos
kernel-tools-libs.x86_64 5.14.0-503.33.1.el9_5 @baseos
[root@controller-01 ~]# yum list installed | grep ceph
blosc.x86_64 1.21.0-3.el9s @centos-ceph-pacific
centos-release-ceph-reef.noarch 1.0-1.el9 @extras
ceph-common.x86_64
2:18.2.4-2.el9s @centos-ceph-reef
[root@controller-01 ~]# uname -a
Linux controller-01.cloud.pd.infn.it 5.14.0-503.33.1.el9_5.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Mar 20 03:39:23 EDT 2025 x86_64 x86_64 x86_64 GNU/Linux |
Rimuovere release Caracal
yum remove centos-release-openstack-caracal.noarch |
Installare Epoxy
dnf install -y https://trunk.rdoproject.org/rdo_release/rdo-release.el9s.rpm (potrebbe servire) dnf install centos-release-openstack-epoxy |
Salvare configurazioni che di solito vengono sovrascritte
export REL=caracal cp /etc/httpd/conf.d/openstack-dashboard.conf /etc/httpd/conf.d/openstack-dashboard.conf.$REL |
Update pacchetti
dnf update -y dnf upgrade -y da verificare e scrivere output |
# DA VERIFICARE PER EPOXY # Nell’update vengono scaricati i nuovi rpm: attenzione a questi file di configurazione cp /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.$REL cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.$REL cp /etc/nova/nova.conf /etc/nova/nova.conf.$REL cp /etc/placement/placement.conf /etc/placement/placement.conf.$REL cp /etc/heat/heat.conf /etc/heat/heat.conf.$REL cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.$REL cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.$REL cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.$REL cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.$REL cp /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.$REL cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.$REL cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.$REL cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.$REL cp /etc/httpd/conf.d/auth_openidc.conf /etc/httpd/conf.d/auth_openidc.conf.$REL mv -f /etc/openstack-dashboard/local_settings.rpmnew /etc/openstack-dashboard/local_settings mv -f /etc/neutron/neutron.conf.rpmnew /etc/neutron/neutron.conf mv -f /etc/nova/nova.conf.rpmnew /etc/nova/nova.conf mv -f /etc/placement/placement.conf.rpmnew /etc/placement/placement.conf mv -f /etc/heat/heat.conf.rpmnew /etc/heat/heat.conf mv -f /etc/neutron/dhcp_agent.ini.rpmnew /etc/neutron/dhcp_agent.ini mv -f /etc/neutron/l3_agent.ini.rpmnew /etc/neutron/l3_agent.ini mv -f /etc/neutron/metadata_agent.ini.rpmnew /etc/neutron/metadata_agent.ini mv -f /etc/neutron/plugins/ml2/ml2_conf.ini.rpmnew /etc/neutron/plugins/ml2/ml2_conf.ini mv -f /etc/neutron/plugins/ml2/openvswitch_agent.ini.rpmnew /etc/neutron/plugins/ml2/openvswitch_agent.ini mv -f /etc/keystone/keystone.conf.rpmnew /etc/keystone/keystone.conf mv -f /etc/glance/glance-api.conf.rpmnew /etc/glance/glance-api.conf mv -f /etc/cinder/cinder.conf.rpmnew /etc/cinder/cinder.conf mv -f /etc/httpd/conf.d/auth_openidc.conf.rpmnew /etc/httpd/conf.d/auth_openidc.conf |
Aggiorniamo le configurazioni con puppet (ATTENZIONE: modificare puppet perche' non faccia partire i servizi una volta aggiornati) puppet agent -t |
KEYSTONE
su -s /bin/sh -c "keystone-manage doctor" keystone su -s /bin/sh -c "keystone-manage db_sync --expand" keystone Dopo l'aggiornamento del controller2 e fatto ripartire httpd, si deve eseguire il comando su -s /bin/sh -c "keystone-manage db_sync --contract" keystone |
su -s /bin/sh -c "placement-manage db expand" placement ---> ATTENZIONE: da verificare, altre guide dicono di fare solo il sync, che expand e contract non c'e' per placement... Al cnaf hanno fatto solo il sync accendere il servizio httpd sul controller1 (di fatto keystone, dashboard e placement), systemctl start httpd modificare HA proxy in modo che per questi tre servizi punti al controller 1 spegnere httpd sul controller2 systemctl stop httpd Controllare che funzioni tutto a livello di dashboard, in particolare il calendario prenotazioni GPU (se non funziona interviene Sergio) Quando il controller2 sara' aggiornato su -s /bin/sh -c "placement-manage db contract" placement ( da verificare) |
GLANCE
ATTENZIONE: controllare se c'e'un ordine per l'update di glance (si possono avere due release diverse contemporaneamente?) su -s /bin/sh -c "glance-manage db expand" glance su -s /bin/sh -c "glance-manage db migrate" glance systemctl start openstack-glance-api.service Modificare l'HA proxy im podo che per glance punti al controller1 Spegnere il servizio nel controller2 systemctl stop openstack-glance-api.service Quanto anche il controller2 sara' aggiornato, eseguire su -s /bin/sh -c "glance-manage db contract" glance |
su -s /bin/sh -c "nova-status upgrade check" nova
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
Far partire il servizio nel controller1
systemctl start \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
Modificare l'HA in modo che nova punti al controller1
Spegnere il servizio nel controller2
systemctl stop \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
Quando anche il controller2 e tutti i compute saranno aggiornati, eseguire di nuovo
su -s /bin/sh -c "nova-manage db online_data_migrations" nova |
NEUTRON
su -s /bin/sh -c "neutron-db-manage upgrade --expand" neutron Far partire il servizio systemctl start neutron-server.service \ neutron-openvswitch-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service systemctl start neutron-l3-agent.service Modificare l'HA per far puntare neutron al controller2 Stoppare il servizio sul controller2 systemctl stop neutron-server.service \ neutron-openvswitch-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service systemctl stop neutron-l3-agent.service Quando anche il controller2 sara' aggiornato eseguire il comando su -s /bin/sh -c "neutron-db-manage upgrade --contract" neutron |
CINDER
su -s /bin/sh -c "cinder-manage db sync" cinder Far partire il servizio sul controller1 systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service Modificare l'HA per far puntare il servizio al controller1 Stopparlo sul controller2 systemctl stop openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service Quando il controller2 sara' aggiornato rieseguire il online_data_migration su -s /bin/sh -c "cinder-manage db online_data_migrations" cinder |
HEAT
su -s /bin/sh -c "heat-manage db_sync --command expand" heat su -s /bin/sh -c "heat-manage db_sync --command migrate_data" heat Accendere il servizio sul controller1 systemctl start openstack-heat-api.service \ openstack-heat-api-cfn.service openstack-heat-engine.service Modificare l'HA per far puntare il servizio nel controller1 e spegnerlo sul controller2 systemctl stop openstack-heat-api.service \ openstack-heat-api-cfn.service openstack-heat-engine.service Quando anche il controller2 sara' aggiornato su -s /bin/sh -c "heat-manage db_sync --command contract" heat |
Possiamo procedere con l'update del controller2
rimuovere Caracal
installare Epoxy
girare puppet
attivare i servizi
(a questo punto possiamo modificare puppet per far partire i servizi)
fare i contract del db per i servizi che lo richiedono
Compute
Mettere in drain un nodo alla volta.
Per il singolo nodo in drain, migrare le VM con live migration quando possibile (altrimenti si spegne e si migra)
In foreman cambiamo la classe per Epoxy
Giro puppet
In caso di nodi con VM che non possono essere migrate come fare l'update (vedi in passato)
in foreman https://cld-config.cloud.pd.infn.it/hosts/controller-01.cloud.pd.infn.it editare l'host sostituendo l'hostgroup "hosts_all/ControllerNode-Test" con "hosts_all/ControllerNode-Test_Epoxy" ed eseguire puppet nel nodo
[root@controller-01 yum.repos.d]# yum install openstack-heat-ui Last metadata expiration check: 1:28:25 ago on Thu 23 Jan 2025 04:37:45 PM CET. Dependencies resolved. ============================================================================================================================================================================================== Package Architecture Version Repository Size ============================================================================================================================================================================================== Installing: openstack-heat-ui noarch 11.0.0-2.el9s centos-openstack-caracal 892 k Installing dependencies: python3-XStatic-Angular-UUID noarch 0.0.4.0-13.el9s centos-openstack-caracal 13 k python3-XStatic-Angular-Vis noarch 4.16.0.0-10.el9s centos-openstack-caracal 13 k python3-XStatic-FileSaver noarch 1.3.2.0-10.el9s centos-openstack-caracal 13 k python3-XStatic-JS-Yaml noarch 3.8.1.0-11.el9s centos-openstack-caracal 13 k python3-XStatic-Json2yaml noarch 0.1.1.0-10.el9s centos-openstack-caracal 13 k xstatic-angular-uuid-common noarch 0.0.4.0-13.el9s centos-openstack-caracal 11 k xstatic-angular-vis-common noarch 4.16.0.0-10.el9s centos-openstack-caracal 9.6 k xstatic-filesaver-common noarch 1.3.2.0-10.el9s centos-openstack-caracal 11 k xstatic-js-yaml-common noarch 3.8.1.0-11.el9s centos-openstack-caracal 30 k xstatic-json2yaml-common noarch 0.1.1.0-10.el9s centos-openstack-caracal 9.2 k Transaction Summary ===================================================== [root@controller-01 keystone]# yum install python3-osc-placement Last metadata expiration check: 2:05:32 ago on Thu 23 Jan 2025 04:37:45 PM CET. Dependencies resolved. ============================================================================================================================================================================================== Package Architecture Version Repository Size ============================================================================================================================================================================================== Installing: python3-osc-placement noarch 4.3.0-1.el9s centos-openstack-caracal 51 k Transaction Summary ============================================================ |
in caracal abbiamo deciso si utilizzare un rabbit dedicato per il servizio nova, uno per il servizio neutron e uno per tutti gli altri servizi. Va quindi ridefinita cell
Servizio nova usa rabbit-03
transport_url = rabbit://openstack:RABBIT_zzz@192.168.60.225:5672
Servizio neutron usa rabbit-02
transport_url = rabbit://openstack:RABBIT_zzz@192.168.60.224:5672
gli altri servizi (no keystone) usano rabbit-01
[root@controller-01 etc]# nova-manage cell_v2 list_cells --verbose +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ | Name | UUID | Transport URL | Database Connection | Disabled | +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:///// | mysql+pymysql://nova:NOVA_xx_yyy@192.168.60.88:6306/nova_cell0 | False | | cell1 | 8fc9fbbe-697a-4d92-9ff6-cba3feb50b8e | rabbit://openstack:RABBIT_zzz@192.168.60.223:5672 | mysql+pymysql://nova:NOVA_xx_yyy@192.168.60.88:6306/nova | False | +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ [root@controller-01 etc]# nova-manage cell_v2 update_cell --cell 8fc9fbbe-697a-4d92-9ff6-cba3feb50b8e --transport-url rabbit://openstack:RABBIT_zzz@192.168.60.225:5672 --database_connection mysql+pymysql://nova:NOVA_xx_yyy@192.168.60.88:6306/nova [root@controller-01 etc]# nova-manage cell_v2 list_cells --verbose +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ | Name | UUID | Transport URL | Database Connection | Disabled | +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:///// | mysql+pymysql://nova:NOVA_xx_yyy@192.168.60.88:6306/nova_cell0 | False | | cell1 | 8fc9fbbe-697a-4d92-9ff6-cba3feb50b8e | rabbit://openstack:RABBIT_zzz@192.168.60.225:5672 | mysql+pymysql://nova:NOVA_xx_yyy@192.168.60.88:6306/nova | False | +-------+--------------------------------------+----------------------------------------------------+----------------------------------------------------------------+----------+ |
[root@controller-01 log]# yum update \*ceph\* --enablerepo=epel Last metadata expiration check: 1:23:23 ago on Mon 07 Apr 2025 12:52:12 PM CEST. Dependencies resolved. =================================================================================================================================================================================================================== Package Architecture Version Repository Size =================================================================================================================================================================================================================== Upgrading: abseil-cpp x86_64 20211102.0-4.el9 epel 551 k ceph-common x86_64 2:18.2.4-2.el9s centos-ceph-reef 18 M grpc-data noarch 1.46.7-10.el9 epel 19 k libarrow x86_64 9.0.0-13.el9 epel 4.4 M libarrow-doc noarch 9.0.0-13.el9 epel 25 k libcephfs2 x86_64 2:18.2.4-2.el9s centos-ceph-reef 691 k librados2 x86_64 2:18.2.4-2.el9s centos-ceph-reef 3.2 M libradosstriper1 x86_64 2:18.2.4-2.el9s centos-ceph-reef 457 k librbd1 x86_64 2:18.2.4-2.el9s centos-ceph-reef 2.9 M librgw2 x86_64 2:18.2.4-2.el9s centos-ceph-reef 4.4 M parquet-libs x86_64 9.0.0-13.el9 epel 838 k python3-ceph-argparse x86_64 2:18.2.4-2.el9s centos-ceph-reef 46 k python3-ceph-common x86_64 2:18.2.4-2.el9s centos-ceph-reef 130 k python3-cephfs x86_64 2:18.2.4-2.el9s centos-ceph-reef 163 k python3-grpcio x86_64 1.46.7-10.el9 epel 2.0 M python3-rados x86_64 2:18.2.4-2.el9s centos-ceph-reef 320 k python3-rbd x86_64 2:18.2.4-2.el9s centos-ceph-reef 299 k python3-rgw x86_64 2:18.2.4-2.el9s centos-ceph-reef 100 k re2 x86_64 1:20211101-20.el9 epel 191 k thrift x86_64 0.15.0-4.el9 epel 1.6 M Transaction Summary =================================================================================================================================================================================================================== Upgrade 20 Packages |
systemctl enable puppet |
shutdown -r now |