diff --git a/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md b/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md index 7a8deaa6242..a557f69af07 100644 --- a/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md +++ b/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md @@ -5,19 +5,19 @@ The Azure Linux SPEC files originated from a variety of sources with varying lic | CentOS | [MIT](https://www.centos.org/legal/#licensing-policy) | crash-ptdump-command
delve
fstrm
nodejs-nodemon
rhnlib
rt-setup
rt-tests
rtctl
tuned | | Ceph source | [LGPL2.1](https://github.com/ceph/ceph/blob/master/COPYING-LGPL2.1) | ceph | | Debian | [MIT](https://opensource.org/licenses/MIT) | prometheus-process-exporter | -| Fedora | [Fedora MIT License Declaration](https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#License_of_Fedora_SPEC_Files) | 389-ds-base
a52dec
abseil-cpp
accountsservice
acpica-tools
acpid
adcli
adobe-mappings-cmap
adobe-mappings-pdf
advancecomp
adwaita-icon-theme
afflib
aide
alsa-firmware
alsa-plugins
amtk
amtterm
annobin
ansible-freeipa
archivemount
arptables
arpwatch
asio
aspell
aspell-en
at
at-spi2-atk
at-spi2-core
atf
atk
atop
attr
audiofile
augeas
authbind
authd
authselect
autoconf213
avahi
babeltrace
babeltrace2
babl
baekmuk-ttf-fonts
bats
bcache-tools
biosdevname
blosc
bluez
bmake
bolt
boom-boot
booth
botan2
breezy
brotli
buildah
busybox
bwidget
byacc
ca-certificates
cachefilesd
cairomm
calamares
capnproto
capstone
catatonit
catch
catch1
cdrdao
celt051
cereal
certmonger
cfitsio
cgdcbxd
chan
CharLS
checkpolicy
checksec
chrony
cim-schema
cjkuni-uming-fonts
cjose
ck
cldr-emoji-annotation
clucene
clutter
clutter-gst3
clutter-gtk
cmocka
cogl
collectd
colm
color-filesystem
colord
colorize
compat-lua
compiler-rt
conda
conmon
conntrack-tools
console-setup
container-exception-logger
convmv
corosync
corosync-qdevice
cpp-hocon
cppcheck
cpprest
cpptest
cpufrequtils
cpuid
criu
crypto-policies
cryptsetup
cscope
ctags
CUnit
cups
custodia
Cython
dbus-c++
dbus-python
dconf
dcraw
debootstrap
deltarpm
desktop-file-utils
device-mapper-persistent-data
dhcpcd
dietlibc
diffstat
ding-libs
discount
distribution-gpg-keys
dleyna-connector-dbus
dleyna-core
dmraid
dnf
dnf-plugins-core
docbook-dtds
docbook-simple
docbook-slides
docbook-style-dsssl
docbook-utils
docbook2X
docbook5-schemas
docbook5-style-xsl
dogtail
dos2unix
dotconf
dovecot
dpdk
driverctl
dropwatch
drpm
duktape
dumpet
dvd+rw-tools
dwarves
dwz
dyninst
ebtables
edac-utils
edk2
efax
efi-rpm-macros
egl-wayland
eglexternalplatform
elinks
enca
enchant
enchant2
enscript
environment-modules
erofs-utils
evemu
execstack
exempi
exiv2
expected
extra-cmake-modules
fabtests
facter
fakechroot
fakeroot
fdupes
fence-virt
fetchmail
fftw
filebench
fio
firewalld
flac
flashrom
flatbuffers
flite
fltk
fmt
fontawesome-fonts
fontawesome4-fonts
fontpackages
fonts-rpm-macros
foomatic-db
freeglut
freeipmi
freeradius
freetds
freexl
fribidi
fros
frr
fsverity-utils
fuse-overlayfs
fuse-sshfs
fuse-zip
fuse3
future
fwupd
fwupd-efi
fxload
gavl
gbenchmark
gconf-editor
GConf2
gcovr
gcr
gdal
gdisk
gdk-pixbuf2
generic-logos
genwqe-tools
geoclue2
GeoIP
GeoIP-GeoLite-data
geolite2
geos
gfs2-utils
gi-docgen
giflib
gl-manpages
glew
glm
glog
glslang
glusterfs
gnome-desktop-testing
gnome-doc-utils
gnome-icon-theme
gnome-keyring
gnu-efi
go-rpm-macros
gom
google-api-python-client
google-crosextra-caladea-fonts
google-crosextra-carlito-fonts
google-guice
google-noto-cjk-fonts
google-noto-emoji-fonts
google-roboto-slab-fonts
gphoto2
gpm
gpsbabel
graphene
graphite2
graphviz
grubby
gsettings-desktop-schemas
gsl
gsm
gspell
gssdp
gssntlmssp
gstreamer1
gstreamer1-plugins-base
gtk-vnc
gtk2
gtk3
gtkspell
gupnp
gupnp-av
gupnp-dlna
gupnp-igd
hardening-check
hdf
hdf5
heimdal
help2man
hexedit
hicolor-icon-theme
hiera
highlight
hivex
hostname
hping3
hsakmt
htop
hunspell
hunspell-af
hunspell-ar
hunspell-as
hunspell-ast
hunspell-az
hunspell-be
hunspell-bg
hunspell-bn
hunspell-br
hunspell-ca
hunspell-cop
hunspell-csb
hunspell-cv
hunspell-cy
hunspell-da
hunspell-de
hunspell-dsb
hunspell-el
hunspell-en
hunspell-eo
hunspell-es
hunspell-et
hunspell-eu
hunspell-fa
hunspell-fj
hunspell-fo
hunspell-fr
hunspell-fur
hunspell-fy
hunspell-ga
hunspell-gd
hunspell-gl
hunspell-grc
hunspell-gu
hunspell-gv
hunspell-haw
hunspell-hi
hunspell-hil
hunspell-hr
hunspell-hsb
hunspell-ht
hunspell-hu
hunspell-hy
hunspell-ia
hunspell-id
hunspell-is
hunspell-it
hunspell-kk
hunspell-km
hunspell-kn
hunspell-ko
hunspell-ku
hunspell-ky
hunspell-la
hunspell-lb
hunspell-ln
hunspell-mai
hunspell-mg
hunspell-mi
hunspell-mk
hunspell-ml
hunspell-mn
hunspell-mos
hunspell-mr
hunspell-ms
hunspell-mt
hunspell-nds
hunspell-ne
hunspell-nl
hunspell-no
hunspell-nr
hunspell-nso
hunspell-ny
hunspell-om
hunspell-or
hunspell-pa
hunspell-pl
hunspell-pt
hunspell-quh
hunspell-ro
hunspell-ru
hunspell-rw
hunspell-se
hunspell-shs
hunspell-si
hunspell-sk
hunspell-sl
hunspell-smj
hunspell-so
hunspell-sq
hunspell-sr
hunspell-sv
hunspell-sw
hunspell-ta
hunspell-te
hunspell-tet
hunspell-th
hunspell-tk
hunspell-tl
hunspell-tn
hunspell-tpi
hunspell-ts
hunspell-uk
hunspell-uz
hunspell-ve
hunspell-vi
hunspell-wa
hunspell-xh
hunspell-yi
hwdata
hwloc
hyperscan
hyperv-daemons
hyphen
hyphen-as
hyphen-bg
hyphen-bn
hyphen-ca
hyphen-da
hyphen-de
hyphen-el
hyphen-es
hyphen-fa
hyphen-fo
hyphen-fr
hyphen-ga
hyphen-gl
hyphen-grc
hyphen-gu
hyphen-hi
hyphen-hsb
hyphen-hu
hyphen-ia
hyphen-id
hyphen-is
hyphen-it
hyphen-kn
hyphen-ku
hyphen-lt
hyphen-mi
hyphen-ml
hyphen-mn
hyphen-mr
hyphen-nl
hyphen-or
hyphen-pa
hyphen-pl
hyphen-pt
hyphen-ro
hyphen-ru
hyphen-sa
hyphen-sk
hyphen-sl
hyphen-sv
hyphen-ta
hyphen-te
hyphen-tk
hyphen-uk
ibus
ibus-chewing
ibus-hangul
ibus-kkc
ibus-libzhuyin
ibus-m17n
ibus-rawcode
ibus-sayura
ibus-table
ibus-table-chinese
icc-profiles-openicc
icon-naming-utils
icoutils
iftop
iio-sensor-proxy
ilmbase
im-chooser
imaptest
imsettings
indent
infinipath-psm
inih
iniparser
intel-cmt-cat
intel-ipsec-mb
ioping
IP2Location
ipa-pgothic-fonts
ipcalc
ipmitool
iprutils
iptraf-ng
iptstate
irssi
iscsi-initiator-utils
isns-utils
iso-codes
isomd5sum
iw
iwd
jabberpy
jakarta-servlet
jasper
javapackages-bootstrap
javapackages-tools
jbigkit
jdom2
jemalloc
jfsutils
jimtcl
jose
js-jquery
jsoncpp
Judy
jurand
kata-containers
kde-filesystem
kde-settings
kernel-srpm-macros
kexec-tools
keybinder3
keycloak-httpd-client-install
kf
kf-kconfig
kf-kcoreaddons
kf-ki18n
kf-kwidgetsaddons
kpmcore
kronosnet
ksh
kyotocabinet
kyua
ladspa
lame
langtable
lapack
lasso
latencytop
lato-fonts
lcms2
lcov
ldns
leatherman
ledmon
lensfun
leveldb
lftp
libabw
libaec
libao
libappstream-glib
libarrow
libart_lgpl
libasyncns
libatasmart
libavc1394
libblockdev
libbpf
libbsd
libburn
libbytesize
libcacard
libcanberra
libcbor
libcdio
libcdio-paranoia
libcdr
libcgroup
libchewing
libcli
libcmis
libcmpiutil
libcomps
libcroco
libcxx
libdaemon
libdap
libdatrie
libdazzle
libdbi
libdbusmenu
libdc1394
libdecor
libdeflate
libdmx
libdnf
libdrm
libdvdnav
libdvdread
libdwarf
libeasyfc
libecap
libecb
libei
libell
libEMF
libeot
libepoxy
libepubgen
libesmtp
libetonyek
libev
libevdev
libexif
libexttextcat
libfabric
libfontenc
libfreehand
libftdi
libgadu
libgdither
libgee
libgee06
libgeotiff
libgexiv2
libgit2
libgit2-glib
libglade2
libglvnd
libgovirt
libgphoto2
libgsf
libgta
libguestfs
libgusb
libgxim
libgxps
libhangul
libhugetlbfs
libibcommon
libical
libICE
libicns
libid3tag
libIDL
libidn2
libiec61883
libieee1284
libimobiledevice
libindicator
libinput
libiodbc
libipt
libiptcdata
libiscsi
libisoburn
libisofs
libjaylink
libjcat
libkcapi
libkeepalive
libkkc
libkkc-data
libkml
liblangtag
libldb
libldm
liblerc
liblockfile
liblognorm
liblouis
liblqr-1
liblzf
libmad
libmamba
libmd
libmediaart
libmicrohttpd
libmikmod
libmodman
libmodplug
libmodulemd1
libmpcdec
libmspub
libmtp
libmusicbrainz5
libmwaw
libnbd
libnet
libnetfilter_log
libnfs
libnotify
libntlm
libnumbertext
libnvme
liboauth
libodfgen
libofa
libogg
liboggz
liboil
libomxil-bellagio
libopenraw
liboping
libosinfo
libotf
libotr
libpagemaker
libpaper
libpciaccess
libpeas
libpfm
libpinyin
libplist
libpmemobj-cpp
libpng12
libpng15
libproxy
libpsm2
libpwquality
libqb
libqxp
libraqm
LibRaw
libraw1394
libreport
libreswan
librevenge
librsvg2
librx
libsamplerate
libsass
libsecret
libsemanage
libsigc++20
libsigsegv
libslirp
libSM
libsmbios
libsmi
libsndfile
libsodium
libspiro
libsrtp
libssh
libstaroffice
libstemmer
libstoragemgmt
libtdb
libteam
libtevent
libthai
libtnc
libtomcrypt
libtommath
libtpms
libtracecmd
libtraceevent
libtracefs
libtranslit
libucil
libunicap
libuninameslist
liburing
libusb1
libusbmuxd
libuser
libutempter
libvarlink
libverto
libvirt-dbus
libvirt-glib
libvirt-java
libvirt-python
libvisio
libvisual
libvoikko
libvorbis
libvpx
libwacom
libwnck3
libwpd
libwpe
libwpg
libwps
libwvstreams
libX11
libXau
libXaw
libxcb
libXcomposite
libxcrypt
libXcursor
libxcvt
libXdamage
libXdmcp
libXext
libxfce4util
libXfixes
libXfont2
libXft
libXi
libXinerama
libxkbcommon
libxkbfile
libxklavier
libxmlb
libXmu
libXpm
libXrandr
libXrender
libXres
libXScrnSaver
libxshmfence
libXt
libXtst
libXv
libXxf86vm
libyang
libyubikey
libzip
libzmf
lilv
linuxconsoletools
linuxptp
lksctp-tools
lldpd
lockdev
logwatch
lpsolve
lrzsz
lua
lua-expat
lua-filesystem
lua-json
lua-lpeg
lua-lunitx
lua-rpm-macros
lua-term
luajit
lujavrite
luksmeta
lutok
lv2
lzip
lzop
m17n-db
m17n-lib
mac-robber
mailcap
mailx
malaga
malaga-suomi-voikko
mallard-rng
man-pages-cs
man-pages-es
man-pages-it
man-pages-ja
man-pages-ko
man-pages-pl
man-pages-ru
man-pages-zh-CN
mandoc
mariadb
mariadb-connector-c
mariadb-connector-odbc
marisa
maven-compiler-plugin
maven-jar-plugin
maven-parent
maven-resolver
maven-resources-plugin
maven-surefire
maven-wagon
mcelog
mcpp
mcstrans
mdadm
mdds
mdevctl
meanwhile
mecab
mecab-ipadic
media-player-info
memcached
memkind
mesa
mesa-libGLU
metis
microcode_ctl
microdnf
minicom
minizip-ng
mksh
mobile-broadband-provider-info
mock
mock-core-configs
mod_auth_gssapi
mod_auth_mellon
mod_auth_openidc
mod_authnz_pam
mod_fcgid
mod_http2
mod_intercept_form_submit
mod_lookup_identity
mod_md
mod_security
mod_security_crs
mod_wsgi
mokutil
mosh
mpage
mrtg
mstflint
mt-st
mtdev
mtools
mtr
mtx
munge
mutt
mythes
mythes-bg
mythes-ca
mythes-cs
mythes-da
mythes-de
mythes-el
mythes-en
mythes-eo
mythes-es
mythes-fr
mythes-ga
mythes-hu
mythes-mi
mythes-ne
mythes-nl
mythes-pl
mythes-pt
mythes-ro
mythes-ru
mythes-sk
mythes-sl
mythes-sv
mythes-uk
nbd
nbdkit
neon
netavark
netcdf
netcf
netlabel_tools
netpbm
netsniff-ng
nfs4-acl-tools
nftables
nilfs-utils
nkf
nload
nlopt
nodejs-packaging
nss-mdns
nss-pam-ldapd
nss_nis
nss_wrapper
ntfs-3g
ntfs-3g-system-compression
numad
numatop
numpy
nvml
oath-toolkit
ocaml
ocaml-alcotest
ocaml-astring
ocaml-augeas
ocaml-base
ocaml-bigarray-compat
ocaml-bisect-ppx
ocaml-calendar
ocaml-camlp-streams
ocaml-camlp5
ocaml-camomile
ocaml-cinaps
ocaml-cmdliner
ocaml-compiler-libs-janestreet
ocaml-cppo
ocaml-csexp
ocaml-csv
ocaml-ctypes
ocaml-curses
ocaml-dune
ocaml-extlib
ocaml-fileutils
ocaml-findlib
ocaml-fmt
ocaml-fpath
ocaml-gettext
ocaml-integers
ocaml-libvirt
ocaml-luv
ocaml-lwt
ocaml-markup
ocaml-mmap
ocaml-num
ocaml-ocamlbuild
ocaml-ocplib-endian
ocaml-ounit
ocaml-parsexp
ocaml-pp
ocaml-ppx-derivers
ocaml-ppx-here
ocaml-ppx-let
ocaml-ppxlib
ocaml-re
ocaml-react
ocaml-result
ocaml-seq
ocaml-sexplib
ocaml-sexplib0
ocaml-srpm-macros
ocaml-stdio
ocaml-stdlib-random
ocaml-topkg
ocaml-tyxml
ocaml-uutf
ocaml-xml-light
ocaml-zarith
ocl-icd
oddjob
ogdi
omping
opa
opal
open-vm-tools
openblas
opencc
opencl-filesystem
opencl-headers
opencryptoki
opencsd
opendnssec
OpenEXR
openjade
openjpeg2
openmpi
openobex
openoffice-lv
openrdate
opensc
openslp
opensm
opensp
openssl
openssl-ibmpkcs11
openssl-pkcs11
openwsman
optipng
orangefs
ORBit2
orc
os-prober
osinfo-db
osinfo-db-tools
overpass-fonts
p11-kit
p7zip
pacemaker
pacrunner
pakchois
pam_krb5
pam_wrapper
papi
paps
parallel
passim
patchelf
patchutils
pbzip2
pcp
pcsc-lite
pcsc-lite-ccid
PEGTL
perl
perl-Algorithm-C3
perl-Algorithm-Diff
perl-Alien-Build
perl-Alien-pkgconf
perl-AnyEvent
perl-AnyEvent-AIO
perl-AnyEvent-BDB
perl-App-cpanminus
perl-App-FatPacker
perl-AppConfig
perl-Archive-Extract
perl-Archive-Zip
perl-Authen-SASL
perl-B-COW
perl-B-Debug
perl-B-Hooks-EndOfScope
perl-B-Hooks-OP-Check
perl-B-Keywords
perl-B-Lint
perl-bareword-filehandles
perl-BDB
perl-Bit-Vector
perl-boolean
perl-Browser-Open
perl-BSD-Resource
perl-Business-ISBN
perl-Business-ISBN-Data
perl-Bytes-Random-Secure
perl-Capture-Tiny
perl-Carp-Clan
perl-CBOR-XS
perl-Class-Accessor
perl-Class-C3
perl-Class-C3-XS
perl-Class-Data-Inheritable
perl-Class-Factory-Util
perl-Class-Inspector
perl-Class-ISA
perl-Class-Load
perl-Class-Load-XS
perl-Class-Method-Modifiers
perl-Class-Singleton
perl-Class-Tiny
perl-Class-XSAccessor
perl-Clone
perl-Color-ANSI-Util
perl-Color-RGB-Util
perl-ColorThemeBase-Static
perl-ColorThemeRole-ANSI
perl-ColorThemes-Standard
perl-ColorThemeUtil-ANSI
perl-Compress-Bzip2
perl-Compress-LZF
perl-Compress-Raw-Lzma
perl-Config-AutoConf
perl-Config-INI
perl-Config-INI-Reader-Multiline
perl-Config-IniFiles
perl-Config-Simple
perl-Config-Tiny
perl-Const-Fast
perl-Convert-ASN1
perl-Convert-Bencode
perl-Coro
perl-Coro-Multicore
perl-CPAN-Changes
perl-CPAN-DistnameInfo
perl-CPAN-Meta-Check
perl-Cpanel-JSON-XS
perl-Crypt-CBC
perl-Crypt-DES
perl-Crypt-IDEA
perl-Crypt-OpenSSL-Bignum
perl-Crypt-OpenSSL-Guess
perl-Crypt-OpenSSL-Random
perl-Crypt-OpenSSL-RSA
perl-Crypt-PasswdMD5
perl-Crypt-Random-Seed
perl-CSS-Tiny
perl-Data-Dump
perl-Data-Munge
perl-Data-OptList
perl-Data-Peek
perl-Data-Section
perl-Data-UUID
perl-Date-Calc
perl-Date-ISO8601
perl-Date-Manip
perl-DateTime
perl-DateTime-Format-Builder
perl-DateTime-Format-DateParse
perl-DateTime-Format-HTTP
perl-DateTime-Format-IBeat
perl-DateTime-Format-ISO8601
perl-DateTime-Format-Mail
perl-DateTime-Format-Strptime
perl-DateTime-Locale
perl-DateTime-TimeZone
perl-DateTime-TimeZone-SystemV
perl-DateTime-TimeZone-Tzfile
perl-DBD-MySQL
perl-Devel-CallChecker
perl-Devel-Caller
perl-Devel-CheckBin
perl-Devel-CheckLib
perl-Devel-Cycle
perl-Devel-EnforceEncapsulation
perl-Devel-GlobalDestruction
perl-Devel-GlobalDestruction-XS
perl-Devel-Hide
perl-Devel-Leak
perl-Devel-LexAlias
perl-Devel-Refcount
perl-Devel-Size
perl-Devel-StackTrace
perl-Devel-Symdump
perl-Digest-BubbleBabble
perl-Digest-CRC
perl-Digest-HMAC
perl-Digest-SHA1
perl-Dist-CheckConflicts
perl-DynaLoader-Functions
perl-Email-Address
perl-Email-Date-Format
perl-Encode-Detect
perl-Encode-EUCJPASCII
perl-Encode-IMAPUTF7
perl-Encode-Locale
perl-Env-ShellWords
perl-Error
perl-EV
perl-Eval-Closure
perl-Event
perl-Exception-Class
perl-Expect
perl-ExtUtils-Config
perl-ExtUtils-Depends
perl-ExtUtils-Helpers
perl-ExtUtils-InstallPaths
perl-ExtUtils-PkgConfig
perl-FCGI
perl-Fedora-VSP
perl-FFI-CheckLib
perl-File-BaseDir
perl-File-BOM
perl-File-chdir
perl-File-CheckTree
perl-File-Copy-Recursive
perl-File-DesktopEntry
perl-File-Find-Object
perl-File-Find-Object-Rule
perl-File-Find-Rule
perl-File-Find-Rule-Perl
perl-File-Inplace
perl-File-Listing
perl-File-MimeInfo
perl-File-pushd
perl-File-ReadBackwards
perl-File-Remove
perl-File-ShareDir
perl-File-ShareDir-Install
perl-File-Slurp
perl-File-Slurp-Tiny
perl-File-Slurper
perl-File-TreeCreate
perl-File-Type
perl-Font-TTF
perl-FreezeThaw
perl-GD
perl-GD-Barcode
perl-generators
perl-Getopt-ArgvFile
perl-gettext
perl-Graphics-ColorNamesLite-WWW
perl-GSSAPI
perl-Guard
perl-Hook-LexWrap
perl-HTML-Parser
perl-HTML-Tagset
perl-HTML-Tree
perl-HTTP-Cookies
perl-HTTP-Daemon
perl-HTTP-Date
perl-HTTP-Message
perl-HTTP-Negotiate
perl-Image-Base
perl-Image-Info
perl-Image-Xbm
perl-Image-Xpm
perl-Import-Into
perl-Importer
perl-inc-latest
perl-indirect
perl-Inline-Files
perl-IO-AIO
perl-IO-All
perl-IO-CaptureOutput
perl-IO-Compress-Lzma
perl-IO-HTML
perl-IO-Multiplex
perl-IO-SessionData
perl-IO-Socket-INET6
perl-IO-String
perl-IO-stringy
perl-IO-Tty
perl-IPC-Run
perl-IPC-Run3
perl-IPC-System-Simple
perl-JSON
perl-JSON-Color
perl-JSON-MaybeXS
perl-LDAP
perl-libnet
perl-libwww-perl
perl-libxml-perl
perl-Lingua-EN-Inflect
perl-List-MoreUtils-XS
perl-local-lib
perl-Locale-Codes
perl-Locale-Maketext-Gettext
perl-Locale-Msgfmt
perl-Locale-PO
perl-Log-Message
perl-Log-Message-Simple
perl-LWP-MediaTypes
perl-LWP-Protocol-https
perl-Mail-AuthenticationResults
perl-Mail-DKIM
perl-Mail-IMAPTalk
perl-Mail-SPF
perl-MailTools
perl-Match-Simple
perl-Math-Int64
perl-Math-Random-ISAAC
perl-MIME-Charset
perl-MIME-Lite
perl-MIME-Types
perl-Mixin-Linewise
perl-MLDBM
perl-Mock-Config
perl-Module-Build-Tiny
perl-Module-CPANfile
perl-Module-Implementation
perl-Module-Install-AuthorRequires
perl-Module-Install-AuthorTests
perl-Module-Install-AutoLicense
perl-Module-Install-GithubMeta
perl-Module-Install-ManifestSkip
perl-Module-Install-ReadmeFromPod
perl-Module-Install-ReadmeMarkdownFromPod
perl-Module-Install-Repository
perl-Module-Install-TestBase
perl-Module-Load-Util
perl-Module-Manifest
perl-Module-Manifest-Skip
perl-Module-Package
perl-Module-Package-Au
perl-Module-Pluggable
perl-Module-Runtime
perl-Module-Signature
perl-Mojolicious
perl-Moo
perl-Mozilla-CA
perl-Mozilla-LDAP
perl-MRO-Compat
perl-multidimensional
perl-namespace-autoclean
perl-namespace-clean
perl-Net-CIDR-Lite
perl-Net-Daemon
perl-Net-DNS
perl-Net-DNS-Resolver-Mock
perl-Net-DNS-Resolver-Programmable
perl-Net-HTTP
perl-Net-IMAP-Simple
perl-Net-IMAP-Simple-SSL
perl-Net-IP
perl-Net-LibIDN2
perl-Net-Patricia
perl-Net-SMTP-SSL
perl-Net-SNMP
perl-Net-Telnet
perl-Newt
perl-NNTPClient
perl-NTLM
perl-Number-Compare
perl-Object-Deadly
perl-Object-HashBase
perl-Package-Anon
perl-Package-Constants
perl-Package-DeprecationManager
perl-Package-Generator
perl-Package-Stash
perl-Package-Stash-XS
perl-PadWalker
perl-Paper-Specs
perl-PAR-Dist
perl-Parallel-Iterator
perl-Params-Classify
perl-Params-Util
perl-Params-Validate
perl-Params-ValidationCompiler
perl-Parse-PMFile
perl-Parse-RecDescent
perl-Parse-Yapp
perl-Path-Tiny
perl-Perl-Critic
perl-Perl-Critic-More
perl-Perl-Destruct-Level
perl-Perl-MinimumVersion
perl-Perl4-CoreLibs
perl-PerlIO-gzip
perl-PerlIO-utf8_strict
perl-PkgConfig-LibPkgConf
perl-Pod-Coverage
perl-Pod-Coverage-TrustPod
perl-Pod-Escapes
perl-Pod-Eventual
perl-Pod-LaTeX
perl-Pod-Markdown
perl-Pod-Parser
perl-Pod-Plainer
perl-Pod-POM
perl-Pod-Spell
perl-PPI
perl-PPI-HTML
perl-PPIx-QuoteLike
perl-PPIx-Regexp
perl-PPIx-Utilities
perl-prefork
perl-Probe-Perl
perl-Razor-Agent
perl-Readonly
perl-Readonly-XS
perl-Ref-Util
perl-Ref-Util-XS
perl-Regexp-Pattern-Perl
perl-Return-MultiLevel
perl-Role-Tiny
perl-Scope-Guard
perl-Scope-Upper
perl-SGMLSpm
perl-SNMP_Session
perl-Socket6
perl-Software-License
perl-Sort-Versions
perl-Specio
perl-Spiffy
perl-strictures
perl-String-CRC32
perl-String-Format
perl-String-ShellQuote
perl-String-Similarity
perl-Sub-Exporter
perl-Sub-Exporter-Progressive
perl-Sub-Identify
perl-Sub-Infix
perl-Sub-Info
perl-Sub-Install
perl-Sub-Name
perl-Sub-Quote
perl-Sub-Uplevel
perl-SUPER
perl-Switch
perl-Syntax-Highlight-Engine-Kate
perl-Sys-CPU
perl-Sys-MemInfo
perl-Sys-Virt
perl-Taint-Runtime
perl-Task-Weaken
perl-Term-Size-Any
perl-Term-Size-Perl
perl-Term-Table
perl-Term-UI
perl-TermReadKey
perl-Test-Base
perl-Test-ClassAPI
perl-Test-CPAN-Meta
perl-Test-CPAN-Meta-JSON
perl-Test-Deep
perl-Test-Differences
perl-Test-DistManifest
perl-Test-Distribution
perl-Test-EOL
perl-Test-Exception
perl-Test-Exit
perl-Test-FailWarnings
perl-Test-Fatal
perl-Test-File
perl-Test-File-ShareDir
perl-Test-Harness
perl-Test-HasVersion
perl-Test-InDistDir
perl-Test-Inter
perl-Test-LeakTrace
perl-Test-LongString
perl-Test-Manifest
perl-Test-Memory-Cycle
perl-Test-MinimumVersion
perl-Test-MockObject
perl-Test-MockRandom
perl-Test-Needs
perl-Test-NoTabs
perl-Test-NoWarnings
perl-Test-Object
perl-Test-Output
perl-Test-Pod
perl-Test-Pod-Coverage
perl-Test-Portability-Files
perl-Test-Requires
perl-Test-RequiresInternet
perl-Test-Script
perl-Test-Simple
perl-Test-SubCalls
perl-Test-Synopsis
perl-Test-Taint
perl-Test-TrailingSpace
perl-Test-utf8
perl-Test-Vars
perl-Test-Warn
perl-Test-Without-Module
perl-Test2-Plugin-NoWarnings
perl-Test2-Suite
perl-Test2-Tools-Explain
perl-Text-CharWidth
perl-Text-CSV_XS
perl-Text-Diff
perl-Text-Glob
perl-Text-Iconv
perl-Text-Soundex
perl-Text-Unidecode
perl-Text-WrapI18N
perl-Tie-IxHash
perl-TimeDate
perl-Tree-DAG_Node
perl-Type-Tiny
perl-Unicode-EastAsianWidth
perl-Unicode-LineBreak
perl-Unicode-Map8
perl-Unicode-String
perl-Unicode-UTF8
perl-UNIVERSAL-can
perl-UNIVERSAL-isa
perl-Unix-Syslog
perl-URI
perl-Variable-Magic
perl-Version-Requirements
perl-WWW-RobotRules
perl-XML-Catalog
perl-XML-DOM
perl-XML-Dumper
perl-XML-Filter-BufferText
perl-XML-Generator
perl-XML-Grove
perl-XML-Handler-YAWriter
perl-XML-LibXML
perl-XML-LibXSLT
perl-XML-NamespaceSupport
perl-XML-Parser-Lite
perl-XML-RegExp
perl-XML-SAX
perl-XML-SAX-Base
perl-XML-SAX-Writer
perl-XML-Simple
perl-XML-TokeParser
perl-XML-TreeBuilder
perl-XML-Twig
perl-XML-Writer
perl-XML-XPath
perl-XML-XPathEngine
perl-XString
perl-YAML-LibYAML
perl-YAML-PP
perl-YAML-Syck
perltidy
pesign
phodav
php
php-pear
php-pecl-apcu
php-pecl-zip
physfs
picosat
pinfo
pipewire
pixman
pkcs11-helper
pkgconf
plexus-cipher
plexus-containers
plexus-pom
plexus-sec-dispatcher
plotutils
pmdk-convert
pmix
pngcrush
pngnq
po4a
podman
poetry
policycoreutils
polkit-pkla-compat
polkit-qt-1
portreserve
postfix
potrace
powertop
ppp
pps-tools
pptp
priv_wrapper
procmail
prometheus-node-exporter
ps_mem
psacct
pssh
psutils
ptlib
publicsuffix-list
pugixml
pulseaudio
puppet
pwgen
pyatspi
pybind11
pycairo
pyelftools
pyflakes
pygobject3
PyGreSQL
pykickstart
pylint
pyparted
pyproject-rpm-macros
pyserial
python-absl-py
python-aiodns
python-aiohttp
python-alsa
python-archspec
python-argcomplete
python-argparse-manpage
python-astroid
python-astunparse
python-async-generator
python-augeas
python-azure-sdk
python-backoff
python-beautifulsoup4
python-betamax
python-blinker
python-blivet
python-boltons
python-breathe
python-cached_property
python-cbor2
python-charset-normalizer
python-cheetah
python-click
python-cmd2
python-colorama
python-CommonMark
python-conda-libmamba-solver
python-conda-package-handling
python-conda-package-streaming
python-configshell
python-cpuinfo
python-cups
python-curio
python-cytoolz
python-d2to1
python-dbus-client-gen
python-dbus-python-client-gen
python-dbus-signature-pyparsing
python-dbusmock
python-ddt
python-debtcollector
python-decorator
python-distlib
python-dmidecode
python-dns
python-dtopt
python-dulwich
python-editables
python-enchant
python-entrypoints
python-ethtool
python-evdev
python-extras
python-faker
python-fasteners
python-fastjsonschema
python-fields
python-filelock
python-fixtures
python-flake8
python-flaky
python-flask
python-flit
python-flit-core
python-fluidity-sm
python-frozendict
python-funcsigs
python-gast
python-genshi
python-google-auth
python-google-auth-oauthlib
python-greenlet
python-gssapi
python-h5py
python-hatch-fancy-pypi-readme
python-hatch-vcs
python-hatchling
python-hs-dbus-signature
python-html5lib
python-httplib2
python-humanize
python-hwdata
python-importlib-metadata
python-iniconfig
python-inotify
python-into-dbus-python
python-IPy
python-iso8601
python-isodate
python-isort
python-itsdangerous
python-junit_xml
python-junitxml
python-justbases
python-justbytes
python-jwcrypto
python-jwt
python-kdcproxy
python-kerberos
python-kmod
python-kubernetes
python-lark
python-lazy-object-proxy
python-ldap
python-linux-procfs
python-lit
python-looseversion
python-markdown
python-markdown-it-py
python-mccabe
python-mdurl
python-memcached
python-menuinst
python-mimeparse
python-mock
python-monotonic
python-more-itertools
python-mpmath
python-msal
python-msrestazure
python-mutagen
python-networkx
python-nose2
python-ntlm-auth
python-oauth2client
python-openpyxl
python-openstackdocstheme
python-oslo-i18n
python-oslo-sphinx
python-paramiko
python-pathspec
python-pefile
python-pexpect
python-pkgconfig
python-platformdirs
python-pluggy
python-podman-api
python-poetry-core
python-process-tests
python-productmd
python-prometheus_client
python-ptyprocess
python-pycosat
python-pydbus
python-pymongo
python-PyMySQL
python-pyperclip
python-pyproject-api
python-pyproject-metadata
python-pyroute2
python-pyrsistent
python-pytest-benchmark
python-pytest-cov
python-pytest-expect
python-pytest-flake8
python-pytest-flakes
python-pytest-forked
python-pytest-mock
python-pytest-relaxed
python-pytest-runner
python-pytest-subtests
python-pytest-timeout
python-pytest-xdist
python-pytoml
python-pyudev
python-pywbem
python-qrcode
python-rdflib
python-recommonmark
python-requests-file
python-requests-ftp
python-requests-kerberos
python-requests-mock
python-requests-oauthlib
python-requests-toolbelt
python-requests_ntlm
python-responses
python-retrying
python-rfc3986
python-rich
python-rpm-generators
python-rpmautospec-core
python-rpmfluff
python-rtslib
python-ruamel-yaml
python-ruamel-yaml-clib
python-s3transfer
python-schedutils
python-semantic_version
python-should_dsl
python-simpleline
python-slip
python-smartypants
python-sniffio
python-sortedcontainers
python-soupsieve
python-sphinx
python-sphinx-epytext
python-sphinx-theme-py3doc-enhanced
python-sphinx_rtd_theme
python-sphinxcontrib-apidoc
python-sphinxcontrib-applehelp
python-sphinxcontrib-devhelp
python-sphinxcontrib-htmlhelp
python-sphinxcontrib-httpdomain
python-sphinxcontrib-jquery
python-sphinxcontrib-jsmath
python-sphinxcontrib-qthelp
python-sphinxcontrib-serializinghtml
python-sphinxygen
python-spnego
python-sqlalchemy
python-suds
python-systemd
python-tempita
python-templated-dictionary
python-termcolor
python-testpath
python-testresources
python-testscenarios
python-testtools
python-tidy
python-toml
python-tomli
python-toolz
python-tornado
python-tox
python-tox-current-env
python-tqdm
python-trio
python-trove-classifiers
python-typing-extensions
python-typogrify
python-uamqp
python-uritemplate
python-urwid
python-uswid
python-varlink
python-versioneer
python-virt-firmware
python-voluptuous
python-waitress
python-webencodings
python-webtest
python-wheel
python-whoosh
python-winrm
python-wrapt
python-xlrd
python-xlsxwriter
python-xmltodict
python-yubico
python-zipp
python-zmq
python-zstandard
python-zstd
python3-mallard-ducktype
python3-pycares
python3-pytest-asyncio
python3-typed_ast
pyusb
pywbem
pyxattr
qemu
qhull
qpdf
qperf
qr-code-generator
qt-rpm-macros
qt5-qtconnectivity
qt5-qtsensors
qt5-qtserialport
qtbase
qtdeclarative
qtsvg
qttools
quagga
quota
radvd
ragel
raptor2
rarian
rasdaemon
rasqal
rcs
rdist
rdma-core
re2
re2c
realmd
rear
recode
reproc
resource-agents
rest
rhash
rlwrap
rp-pppoe
rpm-mpi-hooks
rpmdevtools
rpmlint
rr
rtkit
rtl-sdr
ruby-augeas
rubygem-bson
rubygem-coderay
rubygem-diff-lcs
rubygem-flexmock
rubygem-hpricot
rubygem-introspection
rubygem-liquid
rubygem-maruku
rubygem-metaclass
rubygem-mongo
rubygem-mustache
rubygem-mysql2
rubygem-pkg-config
rubygem-rake
rubygem-rake-compiler
rubygem-ronn
rubygem-rouge
rubygem-rspec
rubygem-rspec-expectations
rubygem-rspec-mocks
rubygem-rspec-support
rubygem-scanf
rubygem-sys-filesystem
rubygem-thread_order
rusers
rust-cbindgen
s-nail
samba
sanlock
sassist
satyr
sbc
sblim-cim-client2
sblim-cmpi-base
sblim-cmpi-devel
sblim-cmpi-fsvol
sblim-cmpi-network
sblim-cmpi-nfsv3
sblim-cmpi-nfsv4
sblim-cmpi-params
sblim-cmpi-sysfs
sblim-cmpi-syslog
sblim-indication_helper
sblim-sfcb
sblim-sfcc
sblim-sfcCommon
sblim-testsuite
sblim-wbemcli
scl-utils
scotch
screen
scrub
sdl12-compat
SDL2
SDL_sound
sdparm
seabios
secilc
selinux-policy
serd
setools
setserial
setuptool
sgabios
sgml-common
sgpio
shared-mime-info
sharutils
shim-unsigned-aarch64
shim-unsigned-x64
simdjson
sip
sisu
skkdic
sleuthkit
slirp4netns
smartmontools
smc-tools
socket_wrapper
softhsm
sombok
sord
sos
sound-theme-freedesktop
soundtouch
sox
soxr
sparsehash
spausedd
spdlog
speex
speexdsp
spice-protocol
spice-vdagent
spirv-headers
spirv-tools
splix
squashfs-tools
squid
sratom
sscg
star
startup-notification
stress-ng
strongswan
stunnel
subscription-manager
subunit
suitesparse
SuperLU
supermin
switcheroo-control
swtpm
symlinks
sympy
sysfsutils
systemd
systemd-bootchart
t1lib
t1utils
taglib
tang
targetcli
tbb
tcl-pgtcl
tclx
teckit
telnet
thrift
tidy
time
tini
tinycdb
tix
tk
tlog
tmpwatch
tn5250
tofrodos
tokyocabinet
trace-cmd
tss2
ttembed
ttmkfdir
tuna
twolame
uchardet
uclibc-ng
ucpp
ucs-miscfixed-fonts
ucx
udftools
udica
udisks2
uglify-js
uid_wrapper
umockdev
unicode-emoji
unicode-ucd
unique3
units
upower
uriparser
urlview
usb_modeswitch
usb_modeswitch-data
usbguard
usbip
usbmuxd
usbredir
usermode
ustr
uthash
uuid
uw-imap
v4l-utils
vhostmd
vino
virglrenderer
virt-p2v
virt-top
virt-what
virt-who
vitess
vmem
volume_key
vorbis-tools
vte291
vulkan-headers
vulkan-loader
watchdog
wavpack
wayland
wayland-protocols
web-assets
webrtc-audio-processing
websocketpp
wget
whois
wireguard-tools
wireless-regdb
wireshark
woff2
wordnet
words
wpebackend-fdo
wsmancli
wvdial
x3270
xapian-core
Xaw3d
xcb-proto
xcb-util
xcb-util-image
xcb-util-keysyms
xcb-util-renderutil
xcb-util-wm
xdelta
xdg-dbus-proxy
xdg-utils
xdp-tools
xerces-c
xfconf
xfsdump
xhtml1-dtds
xkeyboard-config
xmlstarlet
xmltoman
xmvn
xorg-x11-apps
xorg-x11-drv-libinput
xorg-x11-font-utils
xorg-x11-fonts
xorg-x11-proto-devel
xorg-x11-server
xorg-x11-server-utils
xorg-x11-server-Xwayland
xorg-x11-util-macros
xorg-x11-utils
xorg-x11-xauth
xorg-x11-xbitmaps
xorg-x11-xinit
xorg-x11-xkb-utils
xorg-x11-xtrans-devel
xpp3
xrestop
xterm
xxhash
yajl
yaml-cpp
yasm
yelp-tools
yelp-xsl
ykclient
yp-tools
ypbind
ypserv
yq
z3
zenity
zerofree
zfs-fuse
zipper
zix
zopfli
zziplib | +| Fedora | [Fedora MIT License Declaration](https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#License_of_Fedora_SPEC_Files) | 389-ds-base
a52dec
abseil-cpp
accountsservice
acpica-tools
acpid
adcli
adobe-mappings-cmap
adobe-mappings-pdf
advancecomp
adwaita-icon-theme
afflib
aide
alsa-firmware
alsa-plugins
amtk
amtterm
annobin
ansible-freeipa
archivemount
arptables
arpwatch
asio
aspell
aspell-en
at
at-spi2-atk
at-spi2-core
atf
atk
atop
attr
audiofile
augeas
authbind
authd
authselect
autoconf213
avahi
babeltrace
babeltrace2
babl
baekmuk-ttf-fonts
bats
bcache-tools
biosdevname
blosc
bluez
bmake
bolt
boom-boot
booth
botan2
breezy
brotli
buildah
busybox
bwidget
byacc
ca-certificates
cachefilesd
cairomm
calamares
capnproto
capstone
catatonit
catch
catch1
cdrdao
celt051
cereal
certmonger
cfitsio
cgdcbxd
chan
CharLS
checkpolicy
checksec
chrony
cim-schema
cjkuni-uming-fonts
cjose
ck
cldr-emoji-annotation
clucene
clutter
clutter-gst3
clutter-gtk
cmocka
cogl
collectd
colm
color-filesystem
colord
colorize
compat-lua
compiler-rt
conda
conmon
conntrack-tools
console-setup
container-exception-logger
convmv
corosync
corosync-qdevice
cpp-hocon
cppcheck
cpprest
cpptest
cpufrequtils
cpuid
criu
crun
crypto-policies
cryptsetup
cscope
ctags
CUnit
cups
custodia
Cython
dbus-c++
dbus-python
dconf
dcraw
debootstrap
deltarpm
desktop-file-utils
device-mapper-persistent-data
dhcpcd
dietlibc
diffstat
ding-libs
discount
distribution-gpg-keys
dleyna-connector-dbus
dleyna-core
dmraid
dnf
dnf-plugins-core
docbook-dtds
docbook-simple
docbook-slides
docbook-style-dsssl
docbook-utils
docbook2X
docbook5-schemas
docbook5-style-xsl
dogtail
dos2unix
dotconf
dovecot
dpdk
driverctl
dropwatch
drpm
duktape
dumpet
dvd+rw-tools
dwarves
dwz
dyninst
ebtables
edac-utils
edk2
efax
efi-rpm-macros
egl-wayland
eglexternalplatform
elinks
enca
enchant
enchant2
enscript
environment-modules
erofs-utils
evemu
execstack
exempi
exiv2
expected
extra-cmake-modules
fabtests
facter
fakechroot
fakeroot
fdupes
fence-virt
fetchmail
fftw
filebench
fio
firewalld
flac
flashrom
flatbuffers
flite
fltk
fmt
fontawesome-fonts
fontawesome4-fonts
fontpackages
fonts-rpm-macros
foomatic-db
freeglut
freeipmi
freeradius
freetds
freexl
fribidi
fros
frr
fsverity-utils
fuse-overlayfs
fuse-sshfs
fuse-zip
fuse3
future
fwupd
fwupd-efi
fxload
gavl
gbenchmark
gconf-editor
GConf2
gcovr
gcr
gdal
gdisk
gdk-pixbuf2
generic-logos
genwqe-tools
geoclue2
GeoIP
GeoIP-GeoLite-data
geolite2
geos
gfs2-utils
gi-docgen
giflib
gl-manpages
glew
glm
glog
glslang
glusterfs
gnome-desktop-testing
gnome-doc-utils
gnome-icon-theme
gnome-keyring
gnu-efi
go-rpm-macros
gom
google-api-python-client
google-crosextra-caladea-fonts
google-crosextra-carlito-fonts
google-guice
google-noto-cjk-fonts
google-noto-emoji-fonts
google-roboto-slab-fonts
gphoto2
gpm
gpsbabel
graphene
graphite2
graphviz
grubby
gsettings-desktop-schemas
gsl
gsm
gspell
gssdp
gssntlmssp
gstreamer1
gstreamer1-plugins-base
gtk-vnc
gtk2
gtk3
gtkspell
gupnp
gupnp-av
gupnp-dlna
gupnp-igd
hardening-check
hdf
hdf5
heimdal
help2man
hexedit
hicolor-icon-theme
hiera
highlight
hivex
hostname
hping3
hsakmt
htop
hunspell
hunspell-af
hunspell-ar
hunspell-as
hunspell-ast
hunspell-az
hunspell-be
hunspell-bg
hunspell-bn
hunspell-br
hunspell-ca
hunspell-cop
hunspell-csb
hunspell-cv
hunspell-cy
hunspell-da
hunspell-de
hunspell-dsb
hunspell-el
hunspell-en
hunspell-eo
hunspell-es
hunspell-et
hunspell-eu
hunspell-fa
hunspell-fj
hunspell-fo
hunspell-fr
hunspell-fur
hunspell-fy
hunspell-ga
hunspell-gd
hunspell-gl
hunspell-grc
hunspell-gu
hunspell-gv
hunspell-haw
hunspell-hi
hunspell-hil
hunspell-hr
hunspell-hsb
hunspell-ht
hunspell-hu
hunspell-hy
hunspell-ia
hunspell-id
hunspell-is
hunspell-it
hunspell-kk
hunspell-km
hunspell-kn
hunspell-ko
hunspell-ku
hunspell-ky
hunspell-la
hunspell-lb
hunspell-ln
hunspell-mai
hunspell-mg
hunspell-mi
hunspell-mk
hunspell-ml
hunspell-mn
hunspell-mos
hunspell-mr
hunspell-ms
hunspell-mt
hunspell-nds
hunspell-ne
hunspell-nl
hunspell-no
hunspell-nr
hunspell-nso
hunspell-ny
hunspell-om
hunspell-or
hunspell-pa
hunspell-pl
hunspell-pt
hunspell-quh
hunspell-ro
hunspell-ru
hunspell-rw
hunspell-se
hunspell-shs
hunspell-si
hunspell-sk
hunspell-sl
hunspell-smj
hunspell-so
hunspell-sq
hunspell-sr
hunspell-sv
hunspell-sw
hunspell-ta
hunspell-te
hunspell-tet
hunspell-th
hunspell-tk
hunspell-tl
hunspell-tn
hunspell-tpi
hunspell-ts
hunspell-uk
hunspell-uz
hunspell-ve
hunspell-vi
hunspell-wa
hunspell-xh
hunspell-yi
hwdata
hwloc
hyperscan
hyperv-daemons
hyphen
hyphen-as
hyphen-bg
hyphen-bn
hyphen-ca
hyphen-da
hyphen-de
hyphen-el
hyphen-es
hyphen-fa
hyphen-fo
hyphen-fr
hyphen-ga
hyphen-gl
hyphen-grc
hyphen-gu
hyphen-hi
hyphen-hsb
hyphen-hu
hyphen-ia
hyphen-id
hyphen-is
hyphen-it
hyphen-kn
hyphen-ku
hyphen-lt
hyphen-mi
hyphen-ml
hyphen-mn
hyphen-mr
hyphen-nl
hyphen-or
hyphen-pa
hyphen-pl
hyphen-pt
hyphen-ro
hyphen-ru
hyphen-sa
hyphen-sk
hyphen-sl
hyphen-sv
hyphen-ta
hyphen-te
hyphen-tk
hyphen-uk
ibus
ibus-chewing
ibus-hangul
ibus-kkc
ibus-libzhuyin
ibus-m17n
ibus-rawcode
ibus-sayura
ibus-table
ibus-table-chinese
icc-profiles-openicc
icon-naming-utils
icoutils
iftop
iio-sensor-proxy
ilmbase
im-chooser
imaptest
imsettings
indent
infinipath-psm
inih
iniparser
intel-cmt-cat
intel-ipsec-mb
ioping
IP2Location
ipa-pgothic-fonts
ipcalc
ipmitool
iprutils
iptraf-ng
iptstate
irssi
iscsi-initiator-utils
isns-utils
iso-codes
isomd5sum
iw
iwd
jabberpy
jakarta-servlet
jasper
javapackages-bootstrap
javapackages-tools
jbigkit
jdom2
jemalloc
jfsutils
jimtcl
jose
js-jquery
jsoncpp
Judy
jurand
kata-containers
kde-filesystem
kde-settings
kernel-srpm-macros
kexec-tools
keybinder3
keycloak-httpd-client-install
kf
kf-kconfig
kf-kcoreaddons
kf-ki18n
kf-kwidgetsaddons
kpmcore
kronosnet
ksh
kyotocabinet
kyua
ladspa
lame
langtable
lapack
lasso
latencytop
lato-fonts
lcms2
lcov
ldns
leatherman
ledmon
lensfun
leveldb
lftp
libabw
libaec
libao
libappstream-glib
libarrow
libart_lgpl
libasyncns
libatasmart
libavc1394
libblockdev
libbpf
libbsd
libburn
libbytesize
libcacard
libcanberra
libcbor
libcdio
libcdio-paranoia
libcdr
libcgroup
libchewing
libcli
libcmis
libcmpiutil
libcomps
libcroco
libcxx
libdaemon
libdap
libdatrie
libdazzle
libdbi
libdbusmenu
libdc1394
libdecor
libdeflate
libdmx
libdnf
libdrm
libdvdnav
libdvdread
libdwarf
libeasyfc
libecap
libecb
libei
libell
libEMF
libeot
libepoxy
libepubgen
libesmtp
libetonyek
libev
libevdev
libexif
libexttextcat
libfabric
libfontenc
libfreehand
libftdi
libgadu
libgdither
libgee
libgee06
libgeotiff
libgexiv2
libgit2
libgit2-glib
libglade2
libglvnd
libgovirt
libgphoto2
libgsf
libgta
libguestfs
libgusb
libgxim
libgxps
libhangul
libhugetlbfs
libibcommon
libical
libICE
libicns
libid3tag
libIDL
libidn2
libiec61883
libieee1284
libimobiledevice
libindicator
libinput
libiodbc
libipt
libiptcdata
libiscsi
libisoburn
libisofs
libjaylink
libjcat
libkcapi
libkeepalive
libkkc
libkkc-data
libkml
liblangtag
libldb
libldm
liblerc
liblockfile
liblognorm
liblouis
liblqr-1
liblzf
libmad
libmamba
libmd
libmediaart
libmicrohttpd
libmikmod
libmodman
libmodplug
libmodulemd1
libmpcdec
libmspub
libmtp
libmusicbrainz5
libmwaw
libnbd
libnet
libnetfilter_log
libnfs
libnotify
libntlm
libnumbertext
libnvme
liboauth
libodfgen
libofa
libogg
liboggz
liboil
libomxil-bellagio
libopenraw
liboping
libosinfo
libotf
libotr
libpagemaker
libpaper
libpciaccess
libpeas
libpfm
libpinyin
libplist
libpmemobj-cpp
libpng12
libpng15
libproxy
libpsm2
libpwquality
libqb
libqxp
libraqm
LibRaw
libraw1394
libreport
libreswan
librevenge
librsvg2
librx
libsamplerate
libsass
libsecret
libsemanage
libsigc++20
libsigsegv
libslirp
libSM
libsmbios
libsmi
libsndfile
libsodium
libspiro
libsrtp
libssh
libstaroffice
libstemmer
libstoragemgmt
libtdb
libteam
libtevent
libthai
libtnc
libtomcrypt
libtommath
libtpms
libtracecmd
libtraceevent
libtracefs
libtranslit
libucil
libunicap
libuninameslist
liburing
libusb1
libusbmuxd
libuser
libutempter
libvarlink
libverto
libvirt-dbus
libvirt-glib
libvirt-java
libvirt-python
libvisio
libvisual
libvoikko
libvorbis
libvpx
libwacom
libwnck3
libwpd
libwpe
libwpg
libwps
libwvstreams
libX11
libXau
libXaw
libxcb
libXcomposite
libxcrypt
libXcursor
libxcvt
libXdamage
libXdmcp
libXext
libxfce4util
libXfixes
libXfont2
libXft
libXi
libXinerama
libxkbcommon
libxkbfile
libxklavier
libxmlb
libXmu
libXpm
libXrandr
libXrender
libXres
libXScrnSaver
libxshmfence
libXt
libXtst
libXv
libXxf86vm
libyang
libyubikey
libzip
libzmf
lilv
linuxconsoletools
linuxptp
lksctp-tools
lldpd
llhttp
lockdev
logwatch
lpsolve
lrzsz
lua
lua-expat
lua-filesystem
lua-json
lua-lpeg
lua-lunitx
lua-rpm-macros
lua-term
luajit
lujavrite
luksmeta
lutok
lv2
lzip
lzop
m17n-db
m17n-lib
mac-robber
mailcap
mailx
malaga
malaga-suomi-voikko
mallard-rng
man-pages-cs
man-pages-es
man-pages-it
man-pages-ja
man-pages-ko
man-pages-pl
man-pages-ru
man-pages-zh-CN
mandoc
mariadb
mariadb-connector-c
mariadb-connector-odbc
marisa
maven-compiler-plugin
maven-jar-plugin
maven-parent
maven-resolver
maven-resources-plugin
maven-surefire
maven-wagon
mcelog
mcpp
mcstrans
mdadm
mdds
mdevctl
meanwhile
mecab
mecab-ipadic
media-player-info
memcached
memkind
mesa
mesa-libGLU
metis
microcode_ctl
microdnf
minicom
minizip-ng
mksh
mobile-broadband-provider-info
mock
mock-core-configs
mod_auth_gssapi
mod_auth_mellon
mod_auth_openidc
mod_authnz_pam
mod_fcgid
mod_http2
mod_intercept_form_submit
mod_lookup_identity
mod_md
mod_security
mod_security_crs
mod_wsgi
mokutil
mosh
mpage
mrtg
mstflint
mt-st
mtdev
mtools
mtr
mtx
munge
mutt
mythes
mythes-bg
mythes-ca
mythes-cs
mythes-da
mythes-de
mythes-el
mythes-en
mythes-eo
mythes-es
mythes-fr
mythes-ga
mythes-hu
mythes-mi
mythes-ne
mythes-nl
mythes-pl
mythes-pt
mythes-ro
mythes-ru
mythes-sk
mythes-sl
mythes-sv
mythes-uk
nbd
nbdkit
neon
netavark
netcdf
netcf
netlabel_tools
netpbm
netsniff-ng
nfs4-acl-tools
nftables
nilfs-utils
nkf
nload
nlopt
nodejs-packaging
nss-mdns
nss-pam-ldapd
nss_nis
nss_wrapper
ntfs-3g
ntfs-3g-system-compression
numad
numatop
numpy
nvml
oath-toolkit
ocaml
ocaml-alcotest
ocaml-astring
ocaml-augeas
ocaml-base
ocaml-bigarray-compat
ocaml-bisect-ppx
ocaml-calendar
ocaml-camlp-streams
ocaml-camlp5
ocaml-camomile
ocaml-cinaps
ocaml-cmdliner
ocaml-compiler-libs-janestreet
ocaml-cppo
ocaml-csexp
ocaml-csv
ocaml-ctypes
ocaml-curses
ocaml-dune
ocaml-extlib
ocaml-fileutils
ocaml-findlib
ocaml-fmt
ocaml-fpath
ocaml-gettext
ocaml-integers
ocaml-libvirt
ocaml-luv
ocaml-lwt
ocaml-markup
ocaml-mmap
ocaml-num
ocaml-ocamlbuild
ocaml-ocplib-endian
ocaml-ounit
ocaml-parsexp
ocaml-pp
ocaml-ppx-derivers
ocaml-ppx-here
ocaml-ppx-let
ocaml-ppxlib
ocaml-re
ocaml-react
ocaml-result
ocaml-seq
ocaml-sexplib
ocaml-sexplib0
ocaml-srpm-macros
ocaml-stdio
ocaml-stdlib-random
ocaml-topkg
ocaml-tyxml
ocaml-uutf
ocaml-xml-light
ocaml-zarith
ocl-icd
oddjob
ogdi
omping
opa
opal
open-vm-tools
openblas
opencc
opencl-filesystem
opencl-headers
opencryptoki
opencsd
opendnssec
OpenEXR
openjade
openjpeg2
openmpi
openobex
openoffice-lv
openrdate
opensc
openslp
opensm
opensp
openssl
openssl-ibmpkcs11
openssl-pkcs11
openwsman
optipng
orangefs
ORBit2
orc
os-prober
osinfo-db
osinfo-db-tools
overpass-fonts
p11-kit
p7zip
pacemaker
pacrunner
pakchois
pam_krb5
pam_wrapper
papi
paps
parallel
passim
patchelf
patchutils
pbzip2
pcp
pcsc-lite
pcsc-lite-ccid
PEGTL
perl
perl-Algorithm-C3
perl-Algorithm-Diff
perl-Alien-Build
perl-Alien-pkgconf
perl-AnyEvent
perl-AnyEvent-AIO
perl-AnyEvent-BDB
perl-App-cpanminus
perl-App-FatPacker
perl-AppConfig
perl-Archive-Extract
perl-Archive-Zip
perl-Authen-SASL
perl-B-COW
perl-B-Debug
perl-B-Hooks-EndOfScope
perl-B-Hooks-OP-Check
perl-B-Keywords
perl-B-Lint
perl-bareword-filehandles
perl-BDB
perl-Bit-Vector
perl-boolean
perl-Browser-Open
perl-BSD-Resource
perl-Business-ISBN
perl-Business-ISBN-Data
perl-Bytes-Random-Secure
perl-Capture-Tiny
perl-Carp-Clan
perl-CBOR-XS
perl-Class-Accessor
perl-Class-C3
perl-Class-C3-XS
perl-Class-Data-Inheritable
perl-Class-Factory-Util
perl-Class-Inspector
perl-Class-ISA
perl-Class-Load
perl-Class-Load-XS
perl-Class-Method-Modifiers
perl-Class-Singleton
perl-Class-Tiny
perl-Class-XSAccessor
perl-Clone
perl-Color-ANSI-Util
perl-Color-RGB-Util
perl-ColorThemeBase-Static
perl-ColorThemeRole-ANSI
perl-ColorThemes-Standard
perl-ColorThemeUtil-ANSI
perl-Compress-Bzip2
perl-Compress-LZF
perl-Compress-Raw-Lzma
perl-Config-AutoConf
perl-Config-INI
perl-Config-INI-Reader-Multiline
perl-Config-IniFiles
perl-Config-Simple
perl-Config-Tiny
perl-Const-Fast
perl-Convert-ASN1
perl-Convert-Bencode
perl-Coro
perl-Coro-Multicore
perl-CPAN-Changes
perl-CPAN-DistnameInfo
perl-CPAN-Meta-Check
perl-Cpanel-JSON-XS
perl-Crypt-CBC
perl-Crypt-DES
perl-Crypt-IDEA
perl-Crypt-OpenSSL-Bignum
perl-Crypt-OpenSSL-Guess
perl-Crypt-OpenSSL-Random
perl-Crypt-OpenSSL-RSA
perl-Crypt-PasswdMD5
perl-Crypt-Random-Seed
perl-CSS-Tiny
perl-Data-Dump
perl-Data-Munge
perl-Data-OptList
perl-Data-Peek
perl-Data-Section
perl-Data-UUID
perl-Date-Calc
perl-Date-ISO8601
perl-Date-Manip
perl-DateTime
perl-DateTime-Format-Builder
perl-DateTime-Format-DateParse
perl-DateTime-Format-HTTP
perl-DateTime-Format-IBeat
perl-DateTime-Format-ISO8601
perl-DateTime-Format-Mail
perl-DateTime-Format-Strptime
perl-DateTime-Locale
perl-DateTime-TimeZone
perl-DateTime-TimeZone-SystemV
perl-DateTime-TimeZone-Tzfile
perl-DBD-MySQL
perl-Devel-CallChecker
perl-Devel-Caller
perl-Devel-CheckBin
perl-Devel-CheckLib
perl-Devel-Cycle
perl-Devel-EnforceEncapsulation
perl-Devel-GlobalDestruction
perl-Devel-GlobalDestruction-XS
perl-Devel-Hide
perl-Devel-Leak
perl-Devel-LexAlias
perl-Devel-Refcount
perl-Devel-Size
perl-Devel-StackTrace
perl-Devel-Symdump
perl-Digest-BubbleBabble
perl-Digest-CRC
perl-Digest-HMAC
perl-Digest-SHA1
perl-Dist-CheckConflicts
perl-DynaLoader-Functions
perl-Email-Address
perl-Email-Date-Format
perl-Encode-Detect
perl-Encode-EUCJPASCII
perl-Encode-IMAPUTF7
perl-Encode-Locale
perl-Env-ShellWords
perl-Error
perl-EV
perl-Eval-Closure
perl-Event
perl-Exception-Class
perl-Expect
perl-ExtUtils-Config
perl-ExtUtils-Depends
perl-ExtUtils-Helpers
perl-ExtUtils-InstallPaths
perl-ExtUtils-PkgConfig
perl-FCGI
perl-Fedora-VSP
perl-FFI-CheckLib
perl-File-BaseDir
perl-File-BOM
perl-File-chdir
perl-File-CheckTree
perl-File-Copy-Recursive
perl-File-DesktopEntry
perl-File-Find-Object
perl-File-Find-Object-Rule
perl-File-Find-Rule
perl-File-Find-Rule-Perl
perl-File-Inplace
perl-File-Listing
perl-File-MimeInfo
perl-File-pushd
perl-File-ReadBackwards
perl-File-Remove
perl-File-ShareDir
perl-File-ShareDir-Install
perl-File-Slurp
perl-File-Slurp-Tiny
perl-File-Slurper
perl-File-TreeCreate
perl-File-Type
perl-Font-TTF
perl-FreezeThaw
perl-GD
perl-GD-Barcode
perl-generators
perl-Getopt-ArgvFile
perl-gettext
perl-Graphics-ColorNamesLite-WWW
perl-GSSAPI
perl-Guard
perl-Hook-LexWrap
perl-HTML-Parser
perl-HTML-Tagset
perl-HTML-Tree
perl-HTTP-Cookies
perl-HTTP-Daemon
perl-HTTP-Date
perl-HTTP-Message
perl-HTTP-Negotiate
perl-Image-Base
perl-Image-Info
perl-Image-Xbm
perl-Image-Xpm
perl-Import-Into
perl-Importer
perl-inc-latest
perl-indirect
perl-Inline-Files
perl-IO-AIO
perl-IO-All
perl-IO-CaptureOutput
perl-IO-Compress-Lzma
perl-IO-HTML
perl-IO-Multiplex
perl-IO-SessionData
perl-IO-Socket-INET6
perl-IO-String
perl-IO-stringy
perl-IO-Tty
perl-IPC-Run
perl-IPC-Run3
perl-IPC-System-Simple
perl-JSON
perl-JSON-Color
perl-JSON-MaybeXS
perl-LDAP
perl-libnet
perl-libwww-perl
perl-libxml-perl
perl-Lingua-EN-Inflect
perl-List-MoreUtils-XS
perl-local-lib
perl-Locale-Codes
perl-Locale-Maketext-Gettext
perl-Locale-Msgfmt
perl-Locale-PO
perl-Log-Message
perl-Log-Message-Simple
perl-LWP-MediaTypes
perl-LWP-Protocol-https
perl-Mail-AuthenticationResults
perl-Mail-DKIM
perl-Mail-IMAPTalk
perl-Mail-SPF
perl-MailTools
perl-Match-Simple
perl-Math-Int64
perl-Math-Random-ISAAC
perl-MIME-Charset
perl-MIME-Lite
perl-MIME-Types
perl-Mixin-Linewise
perl-MLDBM
perl-Mock-Config
perl-Module-Build-Tiny
perl-Module-CPANfile
perl-Module-Implementation
perl-Module-Install-AuthorRequires
perl-Module-Install-AuthorTests
perl-Module-Install-AutoLicense
perl-Module-Install-GithubMeta
perl-Module-Install-ManifestSkip
perl-Module-Install-ReadmeFromPod
perl-Module-Install-ReadmeMarkdownFromPod
perl-Module-Install-Repository
perl-Module-Install-TestBase
perl-Module-Load-Util
perl-Module-Manifest
perl-Module-Manifest-Skip
perl-Module-Package
perl-Module-Package-Au
perl-Module-Pluggable
perl-Module-Runtime
perl-Module-Signature
perl-Mojolicious
perl-Moo
perl-Mozilla-CA
perl-Mozilla-LDAP
perl-MRO-Compat
perl-multidimensional
perl-namespace-autoclean
perl-namespace-clean
perl-Net-CIDR-Lite
perl-Net-Daemon
perl-Net-DNS
perl-Net-DNS-Resolver-Mock
perl-Net-DNS-Resolver-Programmable
perl-Net-HTTP
perl-Net-IMAP-Simple
perl-Net-IMAP-Simple-SSL
perl-Net-IP
perl-Net-LibIDN2
perl-Net-Patricia
perl-Net-SMTP-SSL
perl-Net-SNMP
perl-Net-Telnet
perl-Newt
perl-NNTPClient
perl-NTLM
perl-Number-Compare
perl-Object-Deadly
perl-Object-HashBase
perl-Package-Anon
perl-Package-Constants
perl-Package-DeprecationManager
perl-Package-Generator
perl-Package-Stash
perl-Package-Stash-XS
perl-PadWalker
perl-Paper-Specs
perl-PAR-Dist
perl-Parallel-Iterator
perl-Params-Classify
perl-Params-Util
perl-Params-Validate
perl-Params-ValidationCompiler
perl-Parse-PMFile
perl-Parse-RecDescent
perl-Parse-Yapp
perl-Path-Tiny
perl-Perl-Critic
perl-Perl-Critic-More
perl-Perl-Destruct-Level
perl-Perl-MinimumVersion
perl-Perl4-CoreLibs
perl-PerlIO-gzip
perl-PerlIO-utf8_strict
perl-PkgConfig-LibPkgConf
perl-Pod-Coverage
perl-Pod-Coverage-TrustPod
perl-Pod-Escapes
perl-Pod-Eventual
perl-Pod-LaTeX
perl-Pod-Markdown
perl-Pod-Parser
perl-Pod-Plainer
perl-Pod-POM
perl-Pod-Spell
perl-PPI
perl-PPI-HTML
perl-PPIx-QuoteLike
perl-PPIx-Regexp
perl-PPIx-Utilities
perl-prefork
perl-Probe-Perl
perl-Razor-Agent
perl-Readonly
perl-Readonly-XS
perl-Ref-Util
perl-Ref-Util-XS
perl-Regexp-Pattern-Perl
perl-Return-MultiLevel
perl-Role-Tiny
perl-Scope-Guard
perl-Scope-Upper
perl-SGMLSpm
perl-SNMP_Session
perl-Socket6
perl-Software-License
perl-Sort-Versions
perl-Specio
perl-Spiffy
perl-strictures
perl-String-CRC32
perl-String-Format
perl-String-ShellQuote
perl-String-Similarity
perl-Sub-Exporter
perl-Sub-Exporter-Progressive
perl-Sub-Identify
perl-Sub-Infix
perl-Sub-Info
perl-Sub-Install
perl-Sub-Name
perl-Sub-Quote
perl-Sub-Uplevel
perl-SUPER
perl-Switch
perl-Syntax-Highlight-Engine-Kate
perl-Sys-CPU
perl-Sys-MemInfo
perl-Sys-Virt
perl-Taint-Runtime
perl-Task-Weaken
perl-Term-Size-Any
perl-Term-Size-Perl
perl-Term-Table
perl-Term-UI
perl-TermReadKey
perl-Test-Base
perl-Test-ClassAPI
perl-Test-CPAN-Meta
perl-Test-CPAN-Meta-JSON
perl-Test-Deep
perl-Test-Differences
perl-Test-DistManifest
perl-Test-Distribution
perl-Test-EOL
perl-Test-Exception
perl-Test-Exit
perl-Test-FailWarnings
perl-Test-Fatal
perl-Test-File
perl-Test-File-ShareDir
perl-Test-Harness
perl-Test-HasVersion
perl-Test-InDistDir
perl-Test-Inter
perl-Test-LeakTrace
perl-Test-LongString
perl-Test-Manifest
perl-Test-Memory-Cycle
perl-Test-MinimumVersion
perl-Test-MockObject
perl-Test-MockRandom
perl-Test-Needs
perl-Test-NoTabs
perl-Test-NoWarnings
perl-Test-Object
perl-Test-Output
perl-Test-Pod
perl-Test-Pod-Coverage
perl-Test-Portability-Files
perl-Test-Requires
perl-Test-RequiresInternet
perl-Test-Script
perl-Test-SubCalls
perl-Test-Synopsis
perl-Test-Taint
perl-Test-TrailingSpace
perl-Test-utf8
perl-Test-Vars
perl-Test-Warn
perl-Test-Without-Module
perl-Test2-Plugin-NoWarnings
perl-Test2-Suite
perl-Test2-Tools-Explain
perl-Text-CharWidth
perl-Text-CSV_XS
perl-Text-Diff
perl-Text-Glob
perl-Text-Iconv
perl-Text-Soundex
perl-Text-Unidecode
perl-Text-WrapI18N
perl-Tie-IxHash
perl-TimeDate
perl-Tree-DAG_Node
perl-Type-Tiny
perl-Unicode-EastAsianWidth
perl-Unicode-LineBreak
perl-Unicode-Map8
perl-Unicode-String
perl-Unicode-UTF8
perl-UNIVERSAL-can
perl-UNIVERSAL-isa
perl-Unix-Syslog
perl-URI
perl-Variable-Magic
perl-Version-Requirements
perl-WWW-RobotRules
perl-XML-Catalog
perl-XML-DOM
perl-XML-Dumper
perl-XML-Filter-BufferText
perl-XML-Generator
perl-XML-Grove
perl-XML-Handler-YAWriter
perl-XML-LibXML
perl-XML-LibXSLT
perl-XML-NamespaceSupport
perl-XML-Parser-Lite
perl-XML-RegExp
perl-XML-SAX
perl-XML-SAX-Base
perl-XML-SAX-Writer
perl-XML-Simple
perl-XML-TokeParser
perl-XML-TreeBuilder
perl-XML-Twig
perl-XML-Writer
perl-XML-XPath
perl-XML-XPathEngine
perl-XString
perl-YAML-LibYAML
perl-YAML-PP
perl-YAML-Syck
perltidy
pesign
phodav
php
php-pear
php-pecl-apcu
php-pecl-zip
physfs
picosat
pinfo
pipewire
pixman
pkcs11-helper
pkgconf
plexus-cipher
plexus-containers
plexus-pom
plexus-sec-dispatcher
plotutils
pmdk-convert
pmix
pngcrush
pngnq
po4a
podman
poetry
policycoreutils
polkit-pkla-compat
polkit-qt-1
portreserve
postfix
potrace
powertop
ppp
pps-tools
pptp
priv_wrapper
procmail
prometheus-node-exporter
ps_mem
psacct
pssh
psutils
ptlib
publicsuffix-list
pugixml
pulseaudio
puppet
pwgen
pyatspi
pybind11
pycairo
pyelftools
pyflakes
pygobject3
PyGreSQL
pykickstart
pylint
pyparted
pyproject-rpm-macros
pyserial
python-absl-py
python-aiodns
python-aiohttp
python-alsa
python-archspec
python-argcomplete
python-argparse-manpage
python-astroid
python-astunparse
python-async-generator
python-augeas
python-azure-sdk
python-backoff
python-beautifulsoup4
python-betamax
python-blinker
python-blivet
python-boltons
python-breathe
python-cached_property
python-cbor2
python-charset-normalizer
python-cheetah
python-click
python-cmd2
python-colorama
python-CommonMark
python-conda-libmamba-solver
python-conda-package-handling
python-conda-package-streaming
python-configshell
python-cpuinfo
python-cups
python-curio
python-cytoolz
python-d2to1
python-dbus-client-gen
python-dbus-python-client-gen
python-dbus-signature-pyparsing
python-dbusmock
python-ddt
python-debtcollector
python-decorator
python-distlib
python-dmidecode
python-dns
python-dtopt
python-dulwich
python-editables
python-enchant
python-entrypoints
python-ethtool
python-evdev
python-extras
python-faker
python-fasteners
python-fastjsonschema
python-fields
python-filelock
python-fixtures
python-flake8
python-flaky
python-flask
python-flit
python-flit-core
python-fluidity-sm
python-frozendict
python-funcsigs
python-gast
python-genshi
python-google-auth
python-google-auth-oauthlib
python-greenlet
python-gssapi
python-h5py
python-hatch-fancy-pypi-readme
python-hatch-vcs
python-hatchling
python-hs-dbus-signature
python-html5lib
python-httplib2
python-humanize
python-hwdata
python-importlib-metadata
python-iniconfig
python-inotify
python-into-dbus-python
python-IPy
python-iso8601
python-isodate
python-isort
python-itsdangerous
python-junit_xml
python-junitxml
python-justbases
python-justbytes
python-jwcrypto
python-jwt
python-kdcproxy
python-kerberos
python-kmod
python-kubernetes
python-lark
python-lazy-object-proxy
python-ldap
python-linux-procfs
python-lit
python-looseversion
python-markdown
python-markdown-it-py
python-mccabe
python-mdurl
python-memcached
python-menuinst
python-mimeparse
python-mock
python-monotonic
python-more-itertools
python-mpmath
python-msal
python-msrestazure
python-mutagen
python-networkx
python-nose2
python-ntlm-auth
python-oauth2client
python-openpyxl
python-openstackdocstheme
python-oslo-i18n
python-oslo-sphinx
python-paramiko
python-pathspec
python-pefile
python-pexpect
python-pkgconfig
python-platformdirs
python-pluggy
python-podman-api
python-poetry-core
python-process-tests
python-productmd
python-prometheus_client
python-ptyprocess
python-pycosat
python-pydbus
python-pymongo
python-PyMySQL
python-pyperclip
python-pyproject-api
python-pyproject-metadata
python-pyroute2
python-pyrsistent
python-pytest-benchmark
python-pytest-cov
python-pytest-expect
python-pytest-flake8
python-pytest-flakes
python-pytest-forked
python-pytest-mock
python-pytest-relaxed
python-pytest-runner
python-pytest-subtests
python-pytest-timeout
python-pytest-xdist
python-pytoml
python-pyudev
python-pywbem
python-qrcode
python-rdflib
python-recommonmark
python-requests-file
python-requests-ftp
python-requests-kerberos
python-requests-mock
python-requests-oauthlib
python-requests-toolbelt
python-requests_ntlm
python-responses
python-retrying
python-rfc3986
python-rich
python-rpm-generators
python-rpmautospec-core
python-rpmfluff
python-rtslib
python-ruamel-yaml
python-ruamel-yaml-clib
python-s3transfer
python-schedutils
python-scikit-build-core
python-semantic_version
python-should_dsl
python-simpleline
python-slip
python-smartypants
python-sniffio
python-sortedcontainers
python-soupsieve
python-sphinx
python-sphinx-epytext
python-sphinx-theme-py3doc-enhanced
python-sphinx_rtd_theme
python-sphinxcontrib-apidoc
python-sphinxcontrib-applehelp
python-sphinxcontrib-devhelp
python-sphinxcontrib-htmlhelp
python-sphinxcontrib-httpdomain
python-sphinxcontrib-jquery
python-sphinxcontrib-jsmath
python-sphinxcontrib-qthelp
python-sphinxcontrib-serializinghtml
python-sphinxygen
python-spnego
python-sqlalchemy
python-suds
python-systemd
python-tempita
python-templated-dictionary
python-termcolor
python-testpath
python-testresources
python-testscenarios
python-testtools
python-tidy
python-toml
python-tomli
python-toolz
python-tornado
python-tox
python-tox-current-env
python-tqdm
python-trio
python-trove-classifiers
python-typing-extensions
python-typogrify
python-uamqp
python-uritemplate
python-urwid
python-uswid
python-varlink
python-versioneer
python-virt-firmware
python-voluptuous
python-waitress
python-webencodings
python-webtest
python-wheel
python-whoosh
python-winrm
python-wrapt
python-xlrd
python-xlsxwriter
python-xmltodict
python-yubico
python-zipp
python-zmq
python-zstandard
python-zstd
python3-mallard-ducktype
python3-pycares
python3-pytest-asyncio
python3-typed_ast
pyusb
pywbem
pyxattr
qemu
qhull
qpdf
qperf
qr-code-generator
qt-rpm-macros
qt6-qtconnectivity
qt6-qtsensors
qt6-qtserialport
qtbase
qtdeclarative
qtsvg
qttools
quagga
quota
radvd
ragel
raptor2
rarian
rasdaemon
rasqal
rcs
rdist
rdma-core
re2
re2c
realmd
rear
recode
reproc
resource-agents
rest
rhash
rlwrap
rp-pppoe
rpm-mpi-hooks
rpmdevtools
rpmlint
rr
rtkit
rtl-sdr
ruby-augeas
rubygem-bson
rubygem-coderay
rubygem-diff-lcs
rubygem-flexmock
rubygem-hpricot
rubygem-introspection
rubygem-liquid
rubygem-maruku
rubygem-metaclass
rubygem-mongo
rubygem-mustache
rubygem-mysql2
rubygem-pkg-config
rubygem-rake
rubygem-rake-compiler
rubygem-ronn
rubygem-rouge
rubygem-rspec
rubygem-rspec-expectations
rubygem-rspec-mocks
rubygem-rspec-support
rubygem-scanf
rubygem-sys-filesystem
rubygem-thread_order
rusers
rust-cbindgen
s-nail
samba
sanlock
sassist
satyr
sbc
sblim-cim-client2
sblim-cmpi-base
sblim-cmpi-devel
sblim-cmpi-fsvol
sblim-cmpi-network
sblim-cmpi-nfsv3
sblim-cmpi-nfsv4
sblim-cmpi-params
sblim-cmpi-sysfs
sblim-cmpi-syslog
sblim-indication_helper
sblim-sfcb
sblim-sfcc
sblim-sfcCommon
sblim-testsuite
sblim-wbemcli
scl-utils
scotch
screen
scrub
sdl12-compat
SDL2
SDL_sound
sdparm
seabios
secilc
selinux-policy
serd
setools
setserial
setuptool
sgabios
sgml-common
sgpio
shared-mime-info
sharutils
shim-unsigned-aarch64
shim-unsigned-x64
simdjson
sip
sisu
skkdic
sleuthkit
slirp4netns
smartmontools
smc-tools
socket_wrapper
softhsm
sombok
sord
sos
sound-theme-freedesktop
soundtouch
sox
soxr
sparsehash
spausedd
spdlog
speex
speexdsp
spice-protocol
spice-vdagent
spirv-headers
spirv-tools
splix
squashfs-tools
squid
sratom
sscg
star
startup-notification
stress-ng
strongswan
stunnel
subscription-manager
subunit
suitesparse
SuperLU
supermin
switcheroo-control
swtpm
symlinks
sympy
sysfsutils
systemd
systemd-bootchart
t1lib
t1utils
taglib
tang
targetcli
tbb
tcl-pgtcl
tclx
teckit
telnet
thrift
tidy
time
tini
tinycdb
tix
tk
tlog
tmpwatch
tn5250
tofrodos
tokyocabinet
trace-cmd
tss2
ttembed
ttmkfdir
tuna
twolame
uchardet
uclibc-ng
ucpp
ucs-miscfixed-fonts
ucx
udftools
udica
udisks2
uglify-js
uid_wrapper
umockdev
unicode-emoji
unicode-ucd
unique3
units
upower
uriparser
urlview
usb_modeswitch
usb_modeswitch-data
usbguard
usbip
usbmuxd
usbredir
usermode
ustr
uthash
uuid
uw-imap
v4l-utils
vhostmd
vino
virglrenderer
virt-p2v
virt-top
virt-what
virt-who
vitess
vmem
volume_key
vorbis-tools
vte291
vulkan-headers
vulkan-loader
watchdog
wavpack
wayland
wayland-protocols
web-assets
webrtc-audio-processing
websocketpp
wget
whois
wireguard-tools
wireless-regdb
wireshark
woff2
wordnet
words
wpebackend-fdo
wsmancli
wvdial
x3270
xapian-core
Xaw3d
xcb-proto
xcb-util
xcb-util-image
xcb-util-keysyms
xcb-util-renderutil
xcb-util-wm
xdelta
xdg-dbus-proxy
xdg-utils
xdp-tools
xerces-c
xfconf
xfsdump
xhtml1-dtds
xkeyboard-config
xmlstarlet
xmltoman
xmvn
xorg-x11-apps
xorg-x11-drv-libinput
xorg-x11-font-utils
xorg-x11-fonts
xorg-x11-proto-devel
xorg-x11-server
xorg-x11-server-utils
xorg-x11-server-Xwayland
xorg-x11-util-macros
xorg-x11-utils
xorg-x11-xauth
xorg-x11-xbitmaps
xorg-x11-xinit
xorg-x11-xkb-utils
xorg-x11-xtrans-devel
xpp3
xrestop
xterm
xxhash
yajl
yaml-cpp
yasm
yelp-tools
yelp-xsl
ykclient
yp-tools
ypbind
ypserv
yq
z3
zenity
zerofree
zfs-fuse
zipper
zix
zopfli
zziplib | | Fedora (Copyright Remi Collet) | [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) | libmemcached-awesome
librabbitmq | | Fedora (ISC) | [ISC License](https://github.com/sarugaku/resolvelib/blob/main/LICENSE) | python-resolvelib | | Magnus Edenhill Open Source | [Magnus Edenhill Open Source BSD License](https://github.com/jemalloc/jemalloc/blob/dev/COPYING) | librdkafka | | Microsoft | [Microsoft MIT License](/LICENSES-AND-NOTICES/LICENSE.md) | application-gateway-kubernetes-ingress
asc
azcopy
azl-otel-collector
azure-iot-sdk-c
azure-nvme-utils
azure-storage-cpp
azurelinux-image-tools
azurelinux-release
azurelinux-repos
azurelinux-rpm-macros
azurelinux-sysinfo
bazel
bmon
bpftrace
ccache
cert-manager
cf-cli
check-restart
clamav
cloud-hypervisor
cloud-provider-kubevirt
cmake-fedora
containerd2
coredns
dasel
dcos-cli
debugedit
dejavu-fonts
distroless-packages
docker-buildx
docker-cli
docker-compose
doxygen
dtc
edk2-hvloader-signed
elixir
espeak-ng
espeakup
flannel
fluent-bit
freefont
gflags
gh
go-md2man
grpc
grub2-efi-binary-signed
GSL
gtk-update-icon-cache
intel-pf-bb-config
ivykis
jsonbuilder
jx
kata-containers-cc
kata-packages-uvm
keda
keras
kernel-64k-signed
kernel-hwe-signed
kernel-mshv-signed
kernel-signed
kernel-uki
kernel-uki-signed
kpatch
kube-vip-cloud-provider
kubernetes
libacvp
libconfini
libconfuse
libgdiplus
libimobiledevice-glue
libmaxminddb
libmetalink
libsafec
libuv
libxml++
lld
lsb-release
ltp
lttng-consume
mm-common
moby-containerd-cc
moby-engine
msgpack
ncompress
networkd-dispatcher
nlohmann-json
nmap
ntopng
opentelemetry-cpp
packer
pcaudiolib
pcre2
perl-Test-Warnings
perl-Text-Template
pigz
prebuilt-ca-certificates
prebuilt-ca-certificates-base
prometheus-adapter
python-cachetools
python-cherrypy
python-cstruct
python-execnet
python-google-pasta
python-libclang
python-libevdev
python-logutils
python-ml-dtypes
python-namex
python-nocasedict
python-omegaconf
python-opt-einsum
python-optree
python-pecan
python-pip
python-pyrpm
python-remoto
python-repoze-lru
python-routes
python-rsa
python-setuptools
python-sphinxcontrib-websupport
python-tensorboard
python-tensorboard-plugin-wit
python-yamlloader
R
rabbitmq-server
rocksdb
rubygem-addressable
rubygem-asciidoctor
rubygem-bindata
rubygem-concurrent-ruby
rubygem-connection_pool
rubygem-cool.io
rubygem-deep_merge
rubygem-digest-crc
rubygem-elastic-transport
rubygem-elasticsearch
rubygem-elasticsearch-api
rubygem-eventmachine
rubygem-excon
rubygem-faraday
rubygem-faraday-em_http
rubygem-faraday-em_synchrony
rubygem-faraday-excon
rubygem-faraday-httpclient
rubygem-faraday-multipart
rubygem-faraday-net_http
rubygem-faraday-net_http_persistent
rubygem-faraday-rack
rubygem-faraday-retry
rubygem-ffi
rubygem-fiber-local
rubygem-hirb
rubygem-hocon
rubygem-hoe
rubygem-http_parser
rubygem-httpclient
rubygem-io-event
rubygem-jmespath
rubygem-ltsv
rubygem-mini_portile2
rubygem-minitest
rubygem-mocha
rubygem-msgpack
rubygem-multi_json
rubygem-multipart-post
rubygem-net-http-persistent
rubygem-nio4r
rubygem-nokogiri
rubygem-oj
rubygem-parallel
rubygem-power_assert
rubygem-prometheus-client
rubygem-protocol-hpack
rubygem-protocol-http
rubygem-protocol-http1
rubygem-protocol-http2
rubygem-public_suffix
rubygem-puppet-resource_api
rubygem-rdiscount
rubygem-rdkafka
rubygem-rexml
rubygem-ruby-kafka
rubygem-ruby-progressbar
rubygem-rubyzip
rubygem-semantic_puppet
rubygem-serverengine
rubygem-sigdump
rubygem-strptime
rubygem-systemd-journal
rubygem-test-unit
rubygem-thor
rubygem-timers
rubygem-tzinfo
rubygem-tzinfo-data
rubygem-webhdfs
rubygem-webrick
rubygem-yajl-ruby
rubygem-zip-zip
runc
sdbus-cpp
sgx-backwards-compatibility
shim
skopeo
span-lite
sriov-network-device-plugin
SymCrypt
SymCrypt-OpenSSL
systemd-boot-signed
tardev-snapshotter
tensorflow
tinyxml2
toml11
tracelogging
umoci
usrsctp
vala
valkey
vnstat
zstd | | Netplan source | [GPLv3](https://github.com/canonical/netplan/blob/main/COPYING) | netplan | | Numad source | [LGPLv2 License](https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt) | numad | -| NVIDIA | [ASL 2.0 License and spec specific licenses](http://www.apache.org/licenses/LICENSE-2.0) | fwctl
fwctl-hwe
fwctl-hwe-signed
fwctl-signed
ibarr
ibsim
iser
iser-hwe
iser-hwe-signed
iser-signed
isert
isert-hwe
isert-hwe-signed
isert-signed
knem
knem-hwe
knem-hwe-modules-signed
knem-modules-signed
libnvidia-container
mft_kernel
mft_kernel-hwe
mft_kernel-hwe-signed
mft_kernel-signed
mlnx-ethtool
mlnx-iproute2
mlnx-nfsrdma
mlnx-nfsrdma-hwe
mlnx-nfsrdma-hwe-signed
mlnx-nfsrdma-signed
mlnx-ofa_kernel
mlnx-ofa_kernel-hwe
mlnx-ofa_kernel-hwe-modules-signed
mlnx-ofa_kernel-modules-signed
mlnx-tools
mlx-bootctl
mlx-steering-dump
multiperf
nvidia-container-toolkit
ofed-docs
ofed-scripts
perftest
rshim
sockperf
srp
srp-hwe
srp-hwe-signed
srp-signed
xpmem
xpmem-hwe
xpmem-hwe-modules-signed
xpmem-lib
xpmem-modules-signed | +| NVIDIA | [ASL 2.0 License and spec specific licenses](http://www.apache.org/licenses/LICENSE-2.0) | ibarr
ibsim
iser
iser-hwe
iser-hwe-signed
iser-signed
isert
isert-hwe
isert-hwe-signed
isert-signed
knem
knem-hwe
knem-hwe-modules-signed
knem-modules-signed
libnvidia-container
libvma
mft_kernel
mft_kernel-hwe
mft_kernel-hwe-signed
mft_kernel-signed
mlnx-ethtool
mlnx-iproute2
mlnx-nfsrdma
mlnx-nfsrdma-hwe
mlnx-nfsrdma-hwe-signed
mlnx-nfsrdma-signed
mlnx-ofa_kernel
mlnx-ofa_kernel-hwe
mlnx-ofa_kernel-hwe-modules-signed
mlnx-ofa_kernel-modules-signed
mlnx-tools
mlx-bootctl
mlx-steering-dump
multiperf
nvidia-container-toolkit
ofed-docs
ofed-scripts
perftest
rshim
sockperf
srp
srp-hwe
srp-hwe-signed
srp-signed
xpmem
xpmem-hwe
xpmem-hwe-modules-signed
xpmem-lib
xpmem-modules-signed | | NVIDIA (BSD) | [BSD](https://github.com/Mellanox/sockperf/blob/sockperf_v2/copying) | sockperf | | OpenEuler | [BSD-3 License](https://github.com/pytorch/pytorch/blob/master/LICENSE) | pytorch | | OpenMamba | [Openmamba GPLv2 License](https://www.gnu.org/licenses/old-licenses/gpl-2.0.txt) | bash-completion | -| OpenSUSE | Following [openSUSE guidelines](https://en.opensuse.org/openSUSE:Specfile_guidelines#Specfile_Licensing) | ant
ant-junit
antlr
aopalliance
apache-commons-beanutils
apache-commons-cli
apache-commons-codec
apache-commons-collections
apache-commons-collections4
apache-commons-compress
apache-commons-daemon
apache-commons-dbcp
apache-commons-digester
apache-commons-httpclient
apache-commons-io
apache-commons-jexl
apache-commons-lang3
apache-commons-logging
apache-commons-net
apache-commons-pool
apache-commons-pool2
apache-commons-validator
apache-commons-vfs2
apache-parent
args4j
atinject
base64coder
bcel
bea-stax
beust-jcommander
bsf
byaccj
cal10n
cdparanoia
cglib
cni
containerized-data-importer
cpulimit
cri-o
ecj
fillup
flux
gd
geronimo-specs
glassfish-annotation-api
gnu-getopt
gnu-regexp
golang-packaging
guava
hamcrest
hawtjni-runtime
httpcomponents-core
influx-cli
influxdb
jakarta-taglibs-standard
jansi
jarjar
java-cup
java-cup-bootstrap
javacc
javacc-bootstrap
javassist
jboss-interceptors-1.2-api
jdepend
jflex
jflex-bootstrap
jlex
jline
jna
jsch
jsoup
jsr-305
jtidy
junit
junitperf
jzlib
kubevirt
kured
libcontainers-common
libtheora
libva
libvdpau
lynx
multus
objectweb-anttask
objectweb-asm
objenesis
oro
osgi-annotation
osgi-compendium
osgi-core
patterns-ceph-containers
plexus-classworlds
plexus-interpolation
plexus-utils
proj
psl-make-dafsa
publicsuffix
qdox
regexp
relaxngDatatype
rhino
ripgrep
servletapi4
servletapi5
shapelib
slf4j
trilead-ssh2
virtiofsd
xalan-j2
xbean
xcursor-themes
xerces-j2
xml-commons-apis
xml-commons-resolver
xmldb-api
xmlrpc-c
xmlunit
xz-java | -| Photon | [Photon License](LICENSE-PHOTON.md) and [Photon Notice](NOTICE.APACHE2).
Also see [LICENSE-EXCEPTIONS.PHOTON](LICENSE-EXCEPTIONS.PHOTON). | acl
alsa-lib
alsa-utils
ansible
apparmor
apr
apr-util
asciidoc
atftp
audit
autoconf
autoconf-archive
autofs
autogen
automake
babel
bash
bc
bcc
bind
binutils
bison
blktrace
boost
btrfs-progs
bubblewrap
build-essential
bzip2
c-ares
cairo
cassandra
cassandra-driver
cdrkit
check
chkconfig
chrpath
cifs-utils
clang
cloud-init
cloud-utils-growpart
cmake
cni-plugins
core-packages
coreutils
cpio
cppunit
cqlsh
cracklib
crash
crash-gcore-command
createrepo_c
cri-tools
cronie
curl
cyrus-sasl
cyrus-sasl-bootstrap
dbus
dbus-glib
dejagnu
device-mapper-multipath
dialog
diffutils
dkms
dmidecode
dnsmasq
docbook-dtd-xml
docbook-style-xsl
dosfstools
dracut
dstat
e2fsprogs
ed
efibootmgr
efivar
elfutils
emacs
erlang
etcd
ethtool
expat
expect
fcgi
file
filesystem
findutils
flex
fontconfig
fping
freetype
fuse
gawk
gc
gcc
gdb
gdbm
gettext
git
git-lfs
glib
glib-networking
glibc
glibmm
gmp
gnome-common
gnupg2
gnuplot
gnutls
gobject-introspection
golang
golang-1.23
golang-1.24
gperf
gperftools
gpgme
gptfdisk
grep
groff
grub2
gtest
gtk-doc
guile
gzip
haproxy
harfbuzz
haveged
hdparm
http-parser
httpd
i2c-tools
iana-etc
icu
initramfs
initscripts
inotify-tools
intltool
iotop
iperf3
iproute
ipset
iptables
iputils
ipvsadm
ipxe
irqbalance
itstool
jansson
jq
json-c
json-glib
kbd
keepalived
kernel
kernel-64k
kernel-headers
kernel-hwe
kernel-hwe-headers
kernel-ipe
kernel-lpg-innovate
kernel-mshv
kernel-rt
kernel-uvm
keyutils
kmod
krb5
less
libaio
libarchive
libassuan
libatomic_ops
libcap
libcap-ng
libconfig
libdb
libdnet
libedit
libestr
libevent
libfastjson
libffi
libgcrypt
libgpg-error
libgssglue
libgudev
libjpeg-turbo
libksba
liblogging
libmbim
libmnl
libmodulemd
libmpc
libmspack
libndp
libnetfilter_conntrack
libnetfilter_cthelper
libnetfilter_cttimeout
libnetfilter_queue
libnfnetlink
libnftnl
libnl3
libnsl2
libpcap
libpipeline
libpng
libpsl
libqmi
librelp
librepo
librsync
libseccomp
libselinux
libsepol
libserf
libsigc++30
libsolv
libsoup
libssh2
libtalloc
libtar
libtasn1
libtiff
libtirpc
libtool
libunistring
libunwind
libusb
libvirt
libwebp
libxml2
libxslt
libyaml
linux-firmware
lldb
lldpad
llvm
lm-sensors
lmdb
log4cpp
logrotate
lshw
lsof
lsscsi
ltrace
lttng-tools
lttng-ust
lvm2
lz4
lzo
m2crypto
m4
make
man-db
man-pages
maven
mc
mercurial
meson
mlocate
ModemManager
mpfr
msr-tools
mysql
nano
nasm
ncurses
ndctl
net-snmp
net-tools
nettle
newt
nfs-utils
nghttp2
nginx
ninja-build
nodejs
npth
nspr
nss
nss-altfiles
ntp
numactl
nvme-cli
oniguruma
OpenIPMI
openldap
openscap
openssh
openvswitch
ostree
pam
pango
parted
patch
pciutils
perl-Canary-Stability
perl-CGI
perl-common-sense
perl-Crypt-SSLeay
perl-DBD-SQLite
perl-DBI
perl-DBIx-Simple
perl-Exporter-Tiny
perl-File-HomeDir
perl-File-Which
perl-IO-Socket-SSL
perl-JSON-Any
perl-JSON-XS
perl-libintl-perl
perl-List-MoreUtils
perl-Module-Build
perl-Module-Install
perl-Module-ScanDeps
perl-Net-SSLeay
perl-NetAddr-IP
perl-Object-Accessor
perl-Path-Class
perl-Try-Tiny
perl-Types-Serialiser
perl-WWW-Curl
perl-XML-Parser
perl-YAML
perl-YAML-Tiny
pgbouncer
pinentry
polkit
popt
postgresql
procps-ng
protobuf
protobuf-c
psmisc
pth
pyasn1-modules
pyOpenSSL
pyparsing
pytest
python-appdirs
python-asn1crypto
python-atomicwrites
python-attrs
python-bcrypt
python-certifi
python-cffi
python-chardet
python-configobj
python-constantly
python-coverage
python-cryptography
python-daemon
python-dateutil
python-defusedxml
python-distro
python-docopt
python-docutils
python-ecdsa
python-geomet
python-gevent
python-hyperlink
python-hypothesis
python-idna
python-imagesize
python-incremental
python-iniparse
python-ipaddr
python-jinja2
python-jmespath
python-jsonpatch
python-jsonpointer
python-jsonschema
python-lockfile
python-lxml
python-mako
python-markupsafe
python-mistune
python-msgpack
python-netaddr
python-netifaces
python-ntplib
python-oauthlib
python-packaging
python-pam
python-pbr
python-ply
python-prettytable
python-psutil
python-psycopg2
python-py
python-pyasn1
python-pycodestyle
python-pycparser
python-pycurl
python-pygments
python-pynacl
python-requests
python-setuptools_scm
python-simplejson
python-six
python-snowballstemmer
python-sphinx-theme-alabaster
python-twisted
python-urllib3
python-vcversioner
python-virtualenv
python-wcwidth
python-webob
python-websocket-client
python-werkzeug
python-zope-event
python-zope-interface
python3
pytz
PyYAML
rapidjson
readline
rng-tools
rpcbind
rpcsvc-proto
rpm
rpm-ostree
rrdtool
rsync
rsyslog
ruby
rust
rust-1.75
scons
sed
sg3_utils
shadow-utils
slang
snappy
socat
sqlite
sshpass
strace
subversion
sudo
swig
syslinux
syslog-ng
sysstat
systemd-bootstrap
systemtap
tar
tboot
tcl
tcpdump
tcsh
tdnf
telegraf
texinfo
tmux
tpm2-abrmd
tpm2-pkcs11
tpm2-pytss
tpm2-tools
tpm2-tss
traceroute
tree
tzdata
unbound
unixODBC
unzip
usbutils
userspace-rcu
utf8proc
util-linux
valgrind
vim
vsftpd
WALinuxAgent
which
wpa_supplicant
xfsprogs
xinetd
xmlsec1
xmlto
xz
zchunk
zeromq
zip
zlib
zsh | +| OpenSUSE | Following [openSUSE guidelines](https://en.opensuse.org/openSUSE:Specfile_guidelines#Specfile_Licensing) | ant
ant-junit
antlr
aopalliance
apache-commons-beanutils
apache-commons-cli
apache-commons-codec
apache-commons-collections
apache-commons-collections4
apache-commons-compress
apache-commons-daemon
apache-commons-dbcp
apache-commons-digester
apache-commons-httpclient
apache-commons-io
apache-commons-jexl
apache-commons-lang3
apache-commons-logging
apache-commons-net
apache-commons-pool
apache-commons-pool2
apache-commons-validator
apache-commons-vfs2
apache-parent
args4j
atinject
base64coder
bcel
bea-stax
beust-jcommander
bsf
byaccj
cal10n
cdparanoia
cglib
cni
containerized-data-importer
cpulimit
cri-o
ecj
ed25519-java
fillup
flux
gd
geronimo-specs
glassfish-annotation-api
gnu-getopt
gnu-regexp
golang-packaging
guava
hamcrest
hawtjni-runtime
httpcomponents-core
influx-cli
influxdb
jakarta-taglibs-standard
jansi
jarjar
java-cup
java-cup-bootstrap
javacc
javacc-bootstrap
javassist
jbcrypt
jboss-interceptors-1.2-api
jdepend
jflex
jflex-bootstrap
jlex
jline
jna
jsch
jsoup
jsr-305
jtidy
junit
junitperf
jzlib
kubevirt
kured
libcontainers-common
libtheora
libva
libvdpau
lynx
multus
objectweb-anttask
objectweb-asm
objenesis
oro
osgi-annotation
osgi-compendium
osgi-core
patterns-ceph-containers
plexus-classworlds
plexus-interpolation
plexus-utils
proj
psl-make-dafsa
publicsuffix
qdox
regexp
relaxngDatatype
rhino
ripgrep
servletapi4
servletapi5
shapelib
slf4j
trilead-ssh2
virtiofsd
xalan-j2
xbean
xcursor-themes
xerces-j2
xml-commons-apis
xml-commons-resolver
xmldb-api
xmlrpc-c
xmlunit
xz-java | +| Photon | [Photon License](LICENSE-PHOTON.md) and [Photon Notice](NOTICE.APACHE2).
Also see [LICENSE-EXCEPTIONS.PHOTON](LICENSE-EXCEPTIONS.PHOTON). | acl
alsa-lib
alsa-utils
ansible
apparmor
apr
apr-util
asciidoc
atftp
audit
autoconf
autoconf-archive
autofs
autogen
automake
babel
bash
bc
bcc
bind
binutils
bison
blktrace
boost
btrfs-progs
bubblewrap
build-essential
bzip2
c-ares
cairo
cassandra
cassandra-driver
cdrkit
check
chkconfig
chrpath
cifs-utils
clang
cloud-init
cloud-utils-growpart
cmake
cni-plugins
core-packages
coreutils
cpio
cppunit
cqlsh
cracklib
crash
crash-gcore-command
createrepo_c
cri-tools
cronie
curl
cyrus-sasl
cyrus-sasl-bootstrap
dbus
dbus-glib
dejagnu
device-mapper-multipath
dialog
diffutils
dkms
dmidecode
dnsmasq
docbook-dtd-xml
docbook-style-xsl
dosfstools
dracut
dstat
e2fsprogs
ed
efibootmgr
efivar
elfutils
emacs
erlang
etcd
ethtool
expat
expect
fcgi
file
filesystem
findutils
flex
fontconfig
fping
freetype
fuse
gawk
gc
gcc
gdb
gdbm
gettext
git
git-lfs
glib
glib-networking
glibc
glibmm
gmp
gnome-common
gnupg2
gnuplot
gnutls
gobject-introspection
golang
golang-1.23
golang-1.24
gperf
gperftools
gpgme
gptfdisk
grep
groff
grub2
gtest
gtk-doc
guile
gzip
haproxy
harfbuzz
haveged
hdparm
http-parser
httpd
i2c-tools
iana-etc
icu
initramfs
initscripts
inotify-tools
intltool
iotop
iperf3
iproute
ipset
iptables
iputils
ipvsadm
ipxe
irqbalance
itstool
jansson
jq
json-c
json-glib
kbd
keepalived
kernel
kernel-64k
kernel-headers
kernel-hwe
kernel-hwe-headers
kernel-ipe
kernel-lpg-innovate
kernel-mshv
kernel-rt
kernel-uvm
keyutils
kmod
krb5
less
libaio
libarchive
libassuan
libatomic_ops
libcap
libcap-ng
libconfig
libdb
libdnet
libedit
libestr
libevent
libfastjson
libffi
libgcrypt
libgpg-error
libgssglue
libgudev
libjpeg-turbo
libksba
liblogging
libmbim
libmnl
libmodulemd
libmpc
libmspack
libndp
libnetfilter_conntrack
libnetfilter_cthelper
libnetfilter_cttimeout
libnetfilter_queue
libnfnetlink
libnftnl
libnl3
libnsl2
libpcap
libpipeline
libpng
libpsl
libqmi
librelp
librepo
librsync
libseccomp
libselinux
libsepol
libserf
libsigc++30
libsolv
libsoup
libssh2
libtalloc
libtar
libtasn1
libtiff
libtirpc
libtool
libunistring
libunwind
libusb
libvirt
libwebp
libxml2
libxslt
libyaml
linux-firmware
lldb
lldpad
llvm
lm-sensors
lmdb
log4cpp
logrotate
lshw
lsof
lsscsi
ltrace
lttng-tools
lttng-ust
lvm2
lz4
lzo
m2crypto
m4
make
man-db
man-pages
maven
mc
mercurial
meson
mlocate
ModemManager
mpfr
msr-tools
mysql
nano
nasm
ncurses
ndctl
net-snmp
net-tools
nettle
newt
nfs-utils
nghttp2
nginx
ninja-build
nodejs
nodejs24
npth
nspr
nss
nss-altfiles
ntp
numactl
nvme-cli
oniguruma
OpenIPMI
openldap
openscap
openssh
openvswitch
ostree
pam
pango
parted
patch
pciutils
perl-Canary-Stability
perl-CGI
perl-common-sense
perl-Crypt-SSLeay
perl-DBD-SQLite
perl-DBI
perl-DBIx-Simple
perl-Exporter-Tiny
perl-File-HomeDir
perl-File-Which
perl-IO-Socket-SSL
perl-JSON-Any
perl-JSON-XS
perl-libintl-perl
perl-List-MoreUtils
perl-Module-Build
perl-Module-Install
perl-Module-ScanDeps
perl-Net-SSLeay
perl-NetAddr-IP
perl-Object-Accessor
perl-Path-Class
perl-Try-Tiny
perl-Types-Serialiser
perl-WWW-Curl
perl-XML-Parser
perl-YAML
perl-YAML-Tiny
pgbouncer
pinentry
polkit
popt
postgresql
procps-ng
protobuf
protobuf-c
psmisc
pth
pyasn1-modules
pyOpenSSL
pyparsing
pytest
python-appdirs
python-asn1crypto
python-atomicwrites
python-attrs
python-bcrypt
python-certifi
python-cffi
python-chardet
python-configobj
python-constantly
python-coverage
python-cryptography
python-daemon
python-dateutil
python-defusedxml
python-distro
python-docopt
python-docutils
python-ecdsa
python-geomet
python-gevent
python-hyperlink
python-hypothesis
python-idna
python-imagesize
python-incremental
python-iniparse
python-ipaddr
python-jinja2
python-jmespath
python-jsonpatch
python-jsonpointer
python-jsonschema
python-lockfile
python-lxml
python-mako
python-markupsafe
python-mistune
python-msgpack
python-netaddr
python-netifaces
python-ntplib
python-oauthlib
python-packaging
python-pam
python-pbr
python-ply
python-prettytable
python-psutil
python-psycopg2
python-py
python-pyasn1
python-pycodestyle
python-pycparser
python-pycurl
python-pygments
python-pynacl
python-requests
python-setuptools_scm
python-simplejson
python-six
python-snowballstemmer
python-sphinx-theme-alabaster
python-twisted
python-urllib3
python-vcversioner
python-virtualenv
python-wcwidth
python-webob
python-websocket-client
python-werkzeug
python-zope-event
python-zope-interface
python3
pytz
PyYAML
rapidjson
readline
rng-tools
rpcbind
rpcsvc-proto
rpm
rpm-ostree
rrdtool
rsync
rsyslog
ruby
rust
rust-1.75
scons
sed
sg3_utils
shadow-utils
slang
snappy
socat
sqlite
sshpass
strace
subversion
sudo
swig
syslinux
syslog-ng
sysstat
systemd-bootstrap
systemtap
tar
tboot
tcl
tcpdump
tcsh
tdnf
telegraf
texinfo
tmux
tpm2-abrmd
tpm2-pkcs11
tpm2-pytss
tpm2-tools
tpm2-tss
traceroute
tree
tzdata
unbound
unixODBC
unzip
usbutils
userspace-rcu
utf8proc
util-linux
valgrind
vim
vsftpd
WALinuxAgent
which
wpa_supplicant
xfsprogs
xinetd
xmlsec1
xmlto
xz
zchunk
zeromq
zip
zlib
zsh | | RPM software management source | [GPLv2+ License](https://github.com/rpm-software-management/dnf5/blob/main/COPYING.md) | dnf5 | | Source project | Same as the source project. | python-nocaselist | | Sysbench source | [GPLv2+ License](https://github.com/akopytov/sysbench/blob/master/COPYING) | sysbench | diff --git a/LICENSES-AND-NOTICES/SPECS/data/licenses.json b/LICENSES-AND-NOTICES/SPECS/data/licenses.json index 0cafa4708ed..2f6ef2ad9f2 100644 --- a/LICENSES-AND-NOTICES/SPECS/data/licenses.json +++ b/LICENSES-AND-NOTICES/SPECS/data/licenses.json @@ -147,6 +147,7 @@ "cpufrequtils", "cpuid", "criu", + "crun", "crypto-policies", "cryptsetup", "cscope", @@ -847,6 +848,7 @@ "linuxptp", "lksctp-tools", "lldpd", + "llhttp", "lockdev", "logwatch", "lpsolve", @@ -1516,7 +1518,6 @@ "perl-Test-Requires", "perl-Test-RequiresInternet", "perl-Test-Script", - "perl-Test-Simple", "perl-Test-SubCalls", "perl-Test-Synopsis", "perl-Test-Taint", @@ -1828,6 +1829,7 @@ "python-ruamel-yaml-clib", "python-s3transfer", "python-schedutils", + "python-scikit-build-core", "python-semantic_version", "python-should_dsl", "python-simpleline", @@ -1909,9 +1911,9 @@ "qperf", "qr-code-generator", "qt-rpm-macros", - "qt5-qtconnectivity", - "qt5-qtsensors", - "qt5-qtserialport", + "qt6-qtconnectivity", + "qt6-qtsensors", + "qt6-qtserialport", "qtbase", "qtdeclarative", "qtsvg", @@ -2473,10 +2475,6 @@ "NVIDIA": { "license": "[ASL 2.0 License and spec specific licenses](http://www.apache.org/licenses/LICENSE-2.0)", "specs": [ - "fwctl", - "fwctl-hwe", - "fwctl-hwe-signed", - "fwctl-signed", "ibarr", "ibsim", "iser", @@ -2492,6 +2490,7 @@ "knem-hwe-modules-signed", "knem-modules-signed", "libnvidia-container", + "libvma", "mft_kernel", "mft_kernel-hwe", "mft_kernel-hwe-signed", @@ -2588,6 +2587,7 @@ "cpulimit", "cri-o", "ecj", + "ed25519-java", "fillup", "flux", "gd", @@ -2610,6 +2610,7 @@ "javacc", "javacc-bootstrap", "javassist", + "jbcrypt", "jboss-interceptors-1.2-api", "jdepend", "jflex", @@ -2946,6 +2947,7 @@ "nginx", "ninja-build", "nodejs", + "nodejs24", "npth", "nspr", "nss", diff --git a/SPECS-EXTENDED/389-ds-base/389-ds-base.spec b/SPECS-EXTENDED/389-ds-base/389-ds-base.spec index 02fdf2f65fe..88260f12b33 100644 --- a/SPECS-EXTENDED/389-ds-base/389-ds-base.spec +++ b/SPECS-EXTENDED/389-ds-base/389-ds-base.spec @@ -68,7 +68,7 @@ ExcludeArch: i686 Summary: 389 Directory Server (%{variant}) Name: 389-ds-base Version: 3.1.1 -Release: 7%{?dist} +Release: 9%{?dist} License: GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (CC-BY-4.0 AND MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR CC0-1.0) AND (MIT OR Unlicense) AND 0BSD AND Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MIT AND ISC AND MPL-2.0 AND PSF-2.0 URL: https://www.port389.org Vendor: Microsoft Corporation @@ -83,6 +83,7 @@ Source4: 389-ds-base.sysusers Source5: https://fedorapeople.org/groups/389ds/libdb-5.3.28-59.tar.bz2 %endif +Patch0: rust-1.90-fixes.patch Provides: ldif2ldbm >= 0 # Attach the buildrequires to the top level package: @@ -732,6 +733,13 @@ exit 0 %endif %changelog +* Tue Jan 13 2025 Kavya Sree Kaitepalli - 3.1.1-9 +- Bump release to rebuild with rust +- Add patch add explicit lifetime for ValueArrayRef iterator + +* Tue Jan 06 2026 Pawel Winogrodzki - 3.1.1-8 +- Bumping release to rebuild with new 'net-snmp' libs. + * Fri Aug 08 2025 Azure Linux Security Servicing Account - 3.1.1-7 - Bump release to rebuild with rust diff --git a/SPECS-EXTENDED/389-ds-base/rust-1.90-fixes.patch b/SPECS-EXTENDED/389-ds-base/rust-1.90-fixes.patch new file mode 100644 index 00000000000..c6c5bdaaf36 --- /dev/null +++ b/SPECS-EXTENDED/389-ds-base/rust-1.90-fixes.patch @@ -0,0 +1,25 @@ +From 3a0d6ff3272c4a3d5f2d552a436e4f0fe0756a0a Mon Sep 17 00:00:00 2001 +From: Kavya Sree Kaitepalli +Date: Wed, 29 Oct 2025 06:38:08 +0000 +Subject: [PATCH] Add explicit lifetime for ValueArrayRef iterator for Rust 1.90 + +--- + src/slapi_r_plugin/src/value.rs | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs +index 2fd35c8..fec74ac 100644 +--- a/src/slapi_r_plugin/src/value.rs ++++ b/src/slapi_r_plugin/src/value.rs +@@ -61,7 +61,7 @@ impl ValueArrayRef { + ValueArrayRef { raw_slapi_val } + } + +- pub fn iter(&self) -> ValueArrayRefIter { ++ pub fn iter(&self) -> ValueArrayRefIter<'_> { + ValueArrayRefIter { + idx: 0, + va_ref: &self, +-- +2.45.4 + diff --git a/SPECS-EXTENDED/apache-commons-jexl/apache-commons-jexl.spec b/SPECS-EXTENDED/apache-commons-jexl/apache-commons-jexl.spec old mode 100755 new mode 100644 index e0d9c62c0a1..da575850f59 --- a/SPECS-EXTENDED/apache-commons-jexl/apache-commons-jexl.spec +++ b/SPECS-EXTENDED/apache-commons-jexl/apache-commons-jexl.spec @@ -4,7 +4,7 @@ Summary: Java Expression Language (JEXL) Name: apache-%{short_name} Version: 2.1.1 -Release: 3%{?dist} +Release: 4%{?dist} License: Apache-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -108,14 +108,40 @@ ln -sf %{name}/%{short_name}-compat.jar %{buildroot}%{_javadir}/%{short_name}-co install -dm 0755 %{buildroot}%{_mavenpomdir}/%{name} install -pm 0644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}/%{short_name}.pom %add_maven_depmap %{name}/%{short_name}.pom %{name}/%{short_name}.jar -install -pm 0644 jexl2-compat/pom.xml %{buildroot}%{_mavenpomdir}/%{name}/%{short_name}-compat.pom +install -pm 0644 jexl2-compat/pom.xml %{buildroot}%{_mavenpomdir}/%{name}/%{short_name}-compat.pom %add_maven_depmap %{name}/%{short_name}-compat.pom %{name}/%{short_name}-compat.jar # javadoc install -dm 0755 %{buildroot}%{_javadocdir}/%{name}/jexl2-compat cp -pr target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/ cp -pr jexl2-compat/target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/jexl2-compat/ + +# Extract LICENSE if present +legaldir=%{buildroot}%{_javadocdir}/%{name}/legal +if [ -d "$legaldir" ]; then + install -Dm 0644 $legaldir/LICENSE \ + %{buildroot}%{_licensedir}/apache-commons-jexl/LICENSE.javadoc +fi + +# Delete ALL legal/ dirs +find %{buildroot}%{_javadocdir}/%{name} -type d -name legal -exec rm -rf {} + + +# Run fdupes (this may create new symlinks) %fdupes -s %{buildroot}%{_javadocdir} +# Fix absolute symlinks inside jexl2-compat by rewriting relative to parent directory structure +pushd %{buildroot}%{_javadocdir}/%{name} +for f in $(find jexl2-compat -type l); do + tgt=$(readlink "$f") + if [[ "$tgt" = /* ]]; then + base=$(basename "$tgt") + # Compute depth-aware relative path + depth=$(dirname "$f" | awk -F/ '{ print NF-1 }') + rel=$(printf '../%.0s' $(seq 1 $depth))"$base" + ln -snf "$rel" "$f" + fi +done +popd + %check # commons-jexl %{ant} \ @@ -128,16 +154,20 @@ cp -pr jexl2-compat/target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/jex test %files -f .mfiles -%license LICENSE.txt -%doc NOTICE.txt RELEASE-NOTES.txt +%license LICENSE.txt NOTICE.txt +%doc RELEASE-NOTES.txt %{_javadir}/%{short_name}*.jar %files javadoc -%license LICENSE.txt -%doc NOTICE.txt +%license %{_licensedir}/apache-commons-jexl/LICENSE.javadoc %{_javadocdir}/%{name} %changelog + +* Mon Dec 22 2025 Aninda Pradhan - 2.1.1-4 +- Fixed license path warnings +- License verified + * Mon Nov 14 2022 Sumedh Sharma - 2.1.1-3 - Fix build errors * create 'Packages' directory under JDK_HOME diff --git a/SPECS-EXTENDED/booth/booth.signatures.json b/SPECS-EXTENDED/booth/booth.signatures.json index 228ec48b057..6af9ea5b554 100644 --- a/SPECS-EXTENDED/booth/booth.signatures.json +++ b/SPECS-EXTENDED/booth/booth.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "booth-1.0.tar.gz": "1f4f6ed4fedebf7c52ca8dc9668218c99fbfe96bfbe9ac8f586bab7ef3b9b5d7" + "booth-1.2.tar.gz": "4f1c4581f71af1188893cce6bbe7d69ac7d9e8593ffd18a3a5f2449f4a5df3bf" } } diff --git a/SPECS-EXTENDED/booth/booth.spec b/SPECS-EXTENDED/booth/booth.spec index 6e43277c9a8..2f53f76ec96 100644 --- a/SPECS-EXTENDED/booth/booth.spec +++ b/SPECS-EXTENDED/booth/booth.spec @@ -1,12 +1,9 @@ # Disable automatic compilation of Python files in extra directories %global _python_bytecompile_extra 0 -# set following to the actual commit or, for final release, concatenate -# "boothver" macro to "v" (will yield a tag per the convention) -%global commit 5d837d2b5bf1c240a5f1c5efe4e8d79f55727cca -%global shortcommit %(c=%{commit}; echo ${c:0:7}) %{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}} %{!?_licensedir:%global license %doc} %global test_path %{_datadir}/booth/tests +%global booth_user booth_test_user # RPMs are split as follows: # * booth: # - envelope package serving as a syntactic shortcut to install @@ -31,14 +28,13 @@ %bcond_with glue Summary: Ticket Manager for Multi-site Clusters Name: booth -Version: 1.0 -Release: 8%{?dist} -License: GPLv2+ +Version: 1.2 +Release: 1%{?dist} +License: GPL-2.0-or-later Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/ClusterLabs/%{name} -Source0: https://github.com/ClusterLabs/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz#/%{name}-%{version}.tar.gz -Patch0: CVE-2022-2553.patch +Source0: https://github.com/ClusterLabs/%{name}/releases/download/v%{version}/%{name}-%{version}.tar.gz # direct build process dependencies BuildRequires: autoconf BuildRequires: automake @@ -78,7 +74,9 @@ Requires: %{name}-core%{?_isa} Requires: %{name}-site %files -# intentionally empty +%license COPYING +%dir %{_datadir}/pkgconfig +%{_datadir}/pkgconfig/booth.pc %description Booth manages tickets which authorize cluster sites located @@ -154,7 +152,7 @@ BuildArch: noarch Automated tests for running Booth, ticket manager for multi-site clusters. %prep -%autosetup -p1 -n %{name}-%{commit} +%autosetup -n %{name}-%{version} %build export CFLAGS=" %{build_cflags} -I/usr/include/pacemaker " @@ -182,22 +180,43 @@ cp -a -t %{buildroot}/%{_pkgdocdir} \ rm -rf %{buildroot}/%{_initrddir}/booth-arbitrator rm -rf %{buildroot}/%{_pkgdocdir}/README.upgrade-from-v0.1 rm -rf %{buildroot}/%{_pkgdocdir}/COPYING + +# Removing absolute symlinks +rm -f %{buildroot}%{_sbindir}/booth +rm -f %{buildroot}%{_sbindir}/geostore +ln -s boothd %{buildroot}%{_sbindir}/booth +ln -s boothd %{buildroot}%{_sbindir}/geostore + # tests -mkdir -p %{buildroot}/%{test_path} -cp -a -t %{buildroot}/%{test_path} \ - -- conf test unit-tests script/unit-test.py -chmod +x %{buildroot}/%{test_path}/test/booth_path -chmod +x %{buildroot}/%{test_path}/test/live_test.sh -mkdir -p %{buildroot}/%{test_path}/src -ln -s -t %{buildroot}/%{test_path}/src \ - -- %{_sbindir}/boothd +mkdir -p %{test_path} +cp -a -t %{test_path} \ + -- conf test +chmod +x %{test_path}/test/booth_path +chmod +x %{test_path}/test/live_test.sh +mkdir -p %{test_path}/src +ln -s -t %{test_path}/src \ + -- %{buildroot}/%{_sbindir}/boothd +# Generate runtests.py and boothtestenv.py +sed -e 's#PYTHON_SHEBANG#%{__python3} -Es#g' \ + -e 's#TEST_SRC_DIR#%{test_path}/test#g' \ + -e 's#TEST_BUILD_DIR#%{test_path}/test#g' \ + %{test_path}/test/runtests.py.in > %{test_path}/test/runtests.py + +chmod +x %{test_path}/test/runtests.py + +sed -e 's#PYTHON_SHEBANG#%{__python3} -Es#g' \ + -e 's#TEST_SRC_DIR#%{test_path}/test#g' \ + -e 's#TEST_BUILD_DIR#%{test_path}/test#g' \ + %{test_path}/test/boothtestenv.py.in > %{test_path}/test/boothtestenv.py # https://fedoraproject.org/wiki/Packaging:Python_Appendix#Manual_byte_compilation -%py_byte_compile %{__python3} %{buildroot}/%{test_path} +%py_byte_compile %{__python3} %{test_path} %check # alternatively: test/runtests.py -VERBOSE=1 make check +# Booth tests cannot run as root in RPM build system +useradd -s /usr/bin/sh %{booth_user} +su %{booth_user} -s /bin/sh -c "VERBOSE=1 %{test_path}/test/runtests.py" %files core %license COPYING @@ -212,6 +231,9 @@ VERBOSE=1 make check %dir %{_sysconfdir}/booth %exclude %{_sysconfdir}/booth/booth.conf.example +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/booth/ +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/booth/cores + %files arbitrator %{_unitdir}/booth@.service %{_unitdir}/booth-arbitrator.service @@ -233,11 +255,14 @@ VERBOSE=1 make check %files test %doc %{_pkgdocdir}/README-testing # /usr/share/booth provided by -site -%{test_path} # /usr/lib/ocf/resource.d/booth provided by -site %{_libdir}/ocf/resource.d/booth/sharedrsc %changelog +* Thu Dec 18 2025 Aditya Singh - 1.2-1 +- Upgrade to version 1.2 +- License verified. + * Wed Aug 30 2023 CBL-Mariner Servicing Account - 1.0-8 - Add patch for CVE-2022-2553 diff --git a/SPECS-EXTENDED/bsf/bsf.spec b/SPECS-EXTENDED/bsf/bsf.spec index dc9c94f0f0f..1ec19b02b17 100644 --- a/SPECS-EXTENDED/bsf/bsf.spec +++ b/SPECS-EXTENDED/bsf/bsf.spec @@ -1,7 +1,7 @@ Summary: Bean Scripting Framework Name: bsf Version: 2.4.0 -Release: 19%{?dist} +Release: 20%{?dist} License: Apache-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -85,6 +85,8 @@ install -DTm 644 %{SOURCE1} %{buildroot}%{_mavenpomdir}/JPP-%{name}.pom # javadoc install -d -m 755 %{buildroot}%{_javadocdir}/%{name} cp -pr build/javadocs/* %{buildroot}%{_javadocdir}/%{name} +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . %fdupes -s %{buildroot}%{_javadocdir}/%{name} %files -f .mfiles @@ -92,10 +94,14 @@ cp -pr build/javadocs/* %{buildroot}%{_javadocdir}/%{name} %doc AUTHORS.txt CHANGES.txt README.txt TODO.txt RELEASE-NOTE.txt %files javadoc -%license LICENSE.txt NOTICE.txt +%license LICENSE LICENSE.txt NOTICE.txt ADDITIONAL_LICENSE_INFO %{_javadocdir}/%{name} %changelog +* Fri Jan 02 2026 Sumit Jena - 2.4.0-20 +- Fixed License Warnings. +- Added additional License file. + * Tue Jan 03 2023 Sumedh Sharma - 2.4.0-19 - License verified diff --git a/SPECS-EXTENDED/buildah/buildah.spec b/SPECS-EXTENDED/buildah/buildah.spec index 83ad3316fb8..41d8b521fd7 100644 --- a/SPECS-EXTENDED/buildah/buildah.spec +++ b/SPECS-EXTENDED/buildah/buildah.spec @@ -30,7 +30,7 @@ Epoch: 0 Version: 1.41.4 # The `AND` needs to be uppercase in the License for SPDX compatibility License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0 -Release: 4%{?dist} +Release: 6%{?dist} Vendor: Microsoft Corporation Distribution: Azure Linux ExclusiveArch: aarch64 ppc64le s390x x86_64 @@ -43,7 +43,7 @@ BuildRequires: device-mapper-devel BuildRequires: git-core BuildRequires: golang >= 1.16.6 BuildRequires: glib2-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} %if !%{defined gobuild} BuildRequires: go-rpm-macros %endif @@ -173,6 +173,12 @@ make test-unit %{_datadir}/%{name}/test %changelog +* Thu Jan 22 2026 Kanishk Bansal - 0:1.41.4-6 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 0:1.41.4-5 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 0:1.41.4-4 - Bump to rebuild with updated glibc diff --git a/SPECS-EXTENDED/catatonit/catatonit.spec b/SPECS-EXTENDED/catatonit/catatonit.spec index 79147e44475..110355521c2 100644 --- a/SPECS-EXTENDED/catatonit/catatonit.spec +++ b/SPECS-EXTENDED/catatonit/catatonit.spec @@ -3,7 +3,7 @@ Distribution: Azure Linux Name: catatonit Version: 0.1.7 -Release: 24%{?dist} +Release: 26%{?dist} Summary: A signal-forwarding process manager for containers License: GPLv3+ URL: https://github.com/openSUSE/catatonit @@ -13,7 +13,7 @@ BuildRequires: automake BuildRequires: file BuildRequires: gcc BuildRequires: git -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: libtool BuildRequires: make @@ -61,6 +61,12 @@ ln -s %{_libexecdir}/%{name}/%{name} %{buildroot}%{_libexecdir}/podman/%{name} %{_libexecdir}/podman/%{name} %changelog +* Thu Jan 22 2026 Kanishk Bansal - 0.1.7-26 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 0.1.7-25 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 0.1.7-24 - Bump to rebuild with updated glibc diff --git a/SPECS-EXTENDED/corosync/corosync.spec b/SPECS-EXTENDED/corosync/corosync.spec index 08a17f3ab41..4a0ae2772c8 100644 --- a/SPECS-EXTENDED/corosync/corosync.spec +++ b/SPECS-EXTENDED/corosync/corosync.spec @@ -19,7 +19,7 @@ Distribution: Azure Linux Name: corosync Summary: The Corosync Cluster Engine and Application Programming Interfaces Version: 3.0.4 -Release: 3%{?dist} +Release: 4%{?dist} License: BSD URL: http://corosync.github.io/corosync/ Source0: http://build.clusterlabs.org/corosync/releases/%{name}-%{version}%{?gittarver}.tar.gz @@ -160,7 +160,7 @@ fi %endif %files -%doc LICENSE +%license LICENSE %{_sbindir}/corosync %{_sbindir}/corosync-keygen %{_sbindir}/corosync-cmapctl @@ -219,7 +219,7 @@ Summary: The Corosync Cluster Engine Libraries This package contains corosync libraries. %files -n corosynclib -%doc LICENSE +%license LICENSE %{_libdir}/libcfg.so.* %{_libdir}/libcpg.so.* %{_libdir}/libcmap.so.* @@ -242,7 +242,6 @@ This package contains include files and man pages used to develop using The Corosync Cluster Engine APIs. %files -n corosynclib-devel -%doc LICENSE %dir %{_includedir}/corosync/ %{_includedir}/corosync/corodefs.h %{_includedir}/corosync/cfg.h @@ -281,12 +280,15 @@ Nodes can be added and removed as well as partitioned (to simulate network splits) %files -n corosync-vqsim -%doc LICENSE %{_bindir}/corosync-vqsim %{_mandir}/man8/corosync-vqsim.8* %endif %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 3.0.4-4 +- Bumping release to rebuild with new 'net-snmp' libs. +- License verified. + * Thu Oct 14 2021 Pawel Winogrodzki - 3.0.4-3 - Initial CBL-Mariner import from Fedora 32 (license: MIT). - Converting the 'Release' tag to the '[number].[distribution]' format. diff --git a/SPECS-EXTENDED/criu/001-upstream-pr-2653.patch b/SPECS-EXTENDED/criu/001-upstream-pr-2653.patch new file mode 100644 index 00000000000..bcb96fd1df3 --- /dev/null +++ b/SPECS-EXTENDED/criu/001-upstream-pr-2653.patch @@ -0,0 +1,134 @@ +From 22fdffbdde9476b27988b3ee0a4013a4453784c9 Mon Sep 17 00:00:00 2001 +From: Andrei Vagin +Date: Mon, 21 Apr 2025 06:33:41 +0000 +Subject: [PATCH] net: nftables: avoid restore failure if the CRIU nft table + already exist + +CRIU locks the network during restore in an "empty" network namespace. +However, "empty" in this context means CRIU isn't restoring the +namespace. This network namespace can be the same namespace where +processes have been dumped and so the network is already locked in it. + +Fixes #2650 + +Signed-off-by: Andrei Vagin +--- + criu/cr-restore.c | 2 +- + criu/include/net.h | 2 +- + criu/net.c | 30 +++++++++++++++++------------- + 3 files changed, 19 insertions(+), 15 deletions(-) + +diff --git a/criu/cr-restore.c b/criu/cr-restore.c +index 583b446e0b..30932f60a2 100644 +--- a/criu/cr-restore.c ++++ b/criu/cr-restore.c +@@ -2119,7 +2119,7 @@ static int restore_root_task(struct pstree_item *init) + * the '--empty-ns net' mode no iptables C/R is done and we + * need to return these rules by hands. + */ +- ret = network_lock_internal(); ++ ret = network_lock_internal(/* restore = */ true); + if (ret) + goto out_kill; + } +diff --git a/criu/include/net.h b/criu/include/net.h +index 5e8a848620..7c5ede21e1 100644 +--- a/criu/include/net.h ++++ b/criu/include/net.h +@@ -31,7 +31,7 @@ extern int collect_net_namespaces(bool for_dump); + + extern int network_lock(void); + extern void network_unlock(void); +-extern int network_lock_internal(void); ++extern int network_lock_internal(bool restore); + + extern struct ns_desc net_ns_desc; + +diff --git a/criu/net.c b/criu/net.c +index ee46f1c495..300df480b0 100644 +--- a/criu/net.c ++++ b/criu/net.c +@@ -3206,12 +3206,12 @@ static inline FILE *redirect_nftables_output(struct nft_ctx *nft) + } + #endif + +-static inline int nftables_lock_network_internal(void) ++static inline int nftables_lock_network_internal(bool restore) + { + #if defined(CONFIG_HAS_NFTABLES_LIB_API_0) || defined(CONFIG_HAS_NFTABLES_LIB_API_1) + cleanup_file FILE *fp = NULL; + struct nft_ctx *nft; +- int ret = 0; ++ int ret = 0, exit_code = -1; + char table[32]; + char buf[128]; + +@@ -3224,11 +3224,16 @@ static inline int nftables_lock_network_internal(void) + + fp = redirect_nftables_output(nft); + if (!fp) +- goto out; ++ goto err2; + + snprintf(buf, sizeof(buf), "create table %s", table); +- if (NFT_RUN_CMD(nft, buf)) ++ ret = NFT_RUN_CMD(nft, buf); ++ if (ret) { ++ /* The network has been locked on dump. */ ++ if (restore && errno == EEXIST) ++ return 0; + goto err2; ++ } + + snprintf(buf, sizeof(buf), "add chain %s output { type filter hook output priority 0; policy drop; }", table); + if (NFT_RUN_CMD(nft, buf)) +@@ -3246,17 +3251,16 @@ static inline int nftables_lock_network_internal(void) + if (NFT_RUN_CMD(nft, buf)) + goto err1; + +- goto out; +- ++ exit_code = 0; ++out: ++ nft_ctx_free(nft); ++ return exit_code; + err1: + snprintf(buf, sizeof(buf), "delete table %s", table); + NFT_RUN_CMD(nft, buf); + err2: +- ret = -1; + pr_err("Locking network failed using nftables\n"); +-out: +- nft_ctx_free(nft); +- return ret; ++ goto out; + #else + pr_err("CRIU was built without libnftables support\n"); + return -1; +@@ -3288,7 +3292,7 @@ static int iptables_network_lock_internal(void) + return ret; + } + +-int network_lock_internal(void) ++int network_lock_internal(bool restore) + { + int ret = 0, nsret; + +@@ -3301,7 +3305,7 @@ int network_lock_internal(void) + if (opts.network_lock_method == NETWORK_LOCK_IPTABLES) + ret = iptables_network_lock_internal(); + else if (opts.network_lock_method == NETWORK_LOCK_NFTABLES) +- ret = nftables_lock_network_internal(); ++ ret = nftables_lock_network_internal(restore); + + if (restore_ns(nsret, &net_ns_desc)) + ret = -1; +@@ -3427,7 +3431,7 @@ int network_lock(void) + if (run_scripts(ACT_NET_LOCK)) + return -1; + +- return network_lock_internal(); ++ return network_lock_internal(false); + } + + void network_unlock(void) diff --git a/SPECS-EXTENDED/criu/criu.signatures.json b/SPECS-EXTENDED/criu/criu.signatures.json index 31dcbf81e54..1068f5f56b3 100644 --- a/SPECS-EXTENDED/criu/criu.signatures.json +++ b/SPECS-EXTENDED/criu/criu.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "criu-3.15.tar.bz2": "447cc1f350da94d190bcfda753695bf34ce91eee969df8263fcc33d08990a025", + "criu-4.1.1.tar.gz": "a5338fe696395843543e6e09c85ccaf36614bf172c26fe8506191b7b930d2dae", "criu-tmpfiles.conf": "d40c7153756d170c4d68ac57598236a011c177ac41a1125813f8b2e16dc15c1a" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/criu/criu.spec b/SPECS-EXTENDED/criu/criu.spec index adfa656e4e7..a633990e463 100644 --- a/SPECS-EXTENDED/criu/criu.spec +++ b/SPECS-EXTENDED/criu/criu.spec @@ -4,54 +4,49 @@ Distribution: Azure Linux %global py_prefix python3 %global py_binary %{py_prefix} - - - - # With annobin enabled, CRIU does not work anymore. It seems CRIU's # parasite code breaks if annobin is enabled. %undefine _annotated_build -Name: criu -Version: 3.15 -Release: 3%{?dist} -Provides: crtools = %{version}-%{release} -Obsoletes: crtools <= 1.0-2 -Summary: Tool for Checkpoint/Restore in User-space -License: GPLv2 -URL: http://criu.org/ -Source0: http://download.openvz.org/criu/criu-%{version}.tar.bz2 - -Patch0: unifying_struct_names.patch - -%if 0%{?rhel} && 0%{?rhel} <= 7 -BuildRequires: perl -# RHEL has no asciidoc; take man-page from Fedora 26 -# zcat /usr/share/man/man8/criu.8.gz > criu.8 -Source1: criu.8 -Source2: crit.1 -Source3: compel.1 -# The patch aio-fix.patch is needed as RHEL7 -# doesn't do "nr_events *= 2" in ioctx_alloc(). -Patch100: aio-fix.patch -%endif - -Source4: criu-tmpfiles.conf - -BuildRequires: gcc -BuildRequires: systemd -BuildRequires: libnet-devel -BuildRequires: protobuf-devel protobuf-c-devel %{py_prefix}-devel libnl3-devel libcap-devel - -BuildRequires: asciidoc xmlto -BuildRequires: perl-interpreter -BuildRequires: libselinux-devel -BuildRequires: gnutls-devel -BuildRequires: nftables-devel -BuildRequires: git +Name: criu +Version: 4.1.1 +Release: 1%{?dist} +Provides: crtools = %{version}-%{release} +Obsoletes: crtools <= 1.0-2 +Summary: Tool for Checkpoint/Restore in User-space +License: GPLv2 +URL: http://criu.org/ +Source0: https://github.com/checkpoint-restore/criu/archive/v%{version}/criu-%{version}.tar.gz + +Patch0: 001-upstream-pr-2653.patch + +Source5: criu-tmpfiles.conf + +BuildRequires: gcc +BuildRequires: systemd +BuildRequires: libnet-devel +BuildRequires: protobuf-devel +BuildRequires: protobuf-c-devel +BuildRequires: %{py_prefix}-devel +BuildRequires: libnl3-devel +BuildRequires: libcap-devel +BuildRequires: %{py_prefix}-pip +BuildRequires: %{py_prefix}-setuptools +BuildRequires: %{py_prefix}-wheel +BuildRequires: %{py_prefix}-protobuf +BuildRequires: asciidoc +BuildRequires: perl-interpreter +BuildRequires: libselinux-devel +BuildRequires: gnutls-devel +BuildRequires: libdrm-devel +BuildRequires: libuuid-devel +BuildRequires: libbsd-devel +BuildRequires: nftables-devel +BuildRequires: make +BuildRequires: git +BuildRequires: xmlto # Checkpointing containers with a tmpfs requires tar -Recommends: tar -BuildRequires: libbsd-devel +Recommends: tar # user-space and kernel changes are only available for x86_64, arm, @@ -65,102 +60,99 @@ criu is the user-space part of Checkpoint/Restore in User-space Linux in user-space. -%package devel -Summary: Header files and libraries for %{name} -Requires: %{name} = %{version}-%{release} +%package devel +Summary: Header files and libraries for %{name} +Requires: %{name} = %{version}-%{release} +Requires: %{name}-libs = %{version}-%{release} -%description devel +%description devel This package contains header files and libraries for %{name}. -%package libs -Summary: Libraries for %{name} -Requires: %{name} = %{version}-%{release} +%package libs +Summary: Libraries for %{name} +Requires: %{name} = %{version}-%{release} -%description libs +%description libs This package contains the libraries for %{name} - -%package -n %{py_prefix}-%{name} +%package amdgpu-plugin +Summary: AMD GPU plugin for %{name} +Requires: %{name} = %{version}-%{release} + +%description amdgpu-plugin +This package contains the AMD GPU plugin for %{name} + +%package cuda-plugin +Summary: CUDA plugin for %{name} +Requires: %{name} = %{version}-%{release} + +%description cuda-plugin +This package contains the CUDA plugin for %{name} + +%package -n %{py_prefix}-%{name} %{?python_provide:%python_provide %{py_prefix}-%{name}} -Summary: Python bindings for %{name} -%if 0%{?rhel} && 0%{?rhel} <= 7 -Requires: protobuf-python -Requires: %{name} = %{version}-%{release} %{py_prefix}-ipaddr -%else -Requires: protobuf-%{py_prefix} -Obsoletes: python2-criu < 3.10-1 -%endif - +Summary: Python bindings for %{name} +Requires: %{py_prefix}-protobuf + %description -n %{py_prefix}-%{name} %{py_prefix}-%{name} contains Python bindings for %{name}. - -%package -n crit -Summary: CRIU image tool -Requires: %{py_prefix}-%{name} = %{version}-%{release} - + +%package -n crit +Summary: CRIU image tool +Requires: %{py_prefix}-%{name} = %{version}-%{release} + %description -n crit crit is a tool designed to decode CRIU binary dump files and show their content in human-readable form. - + +%package -n criu-ns +Summary: Tool to run CRIU in different namespaces +Requires: %{name} = %{version}-%{release} + +%description -n criu-ns +The purpose of the criu-ns wrapper script is to enable restoring a process +tree that might require a specific PID that is already used on the system. +This script can help to workaround the so called "PID mismatch" problem. %prep -%setup -q -%patch 0 -p1 - -%if 0%{?rhel} && 0%{?rhel} <= 7 -%patch 100 -p1 -%endif +%autosetup -p1 %build -# A small part of the build makes direct calls to "ld" instead of GCC and "LDFLAGS-MASK" -# is used to cut out parts of "LDFLAGS", which "ld" doesn't understand. -# "LDFLAGS-MASK" didn't expect the "-specs" argument Mariner contains -# in the hardening flags and all direct calls to "ld" were crashing. -sed -i -E "s/(LDFLAGS-MASK.*:= -Wl,%)/\1 -specs=%/" scripts/nmk/scripts/build.mk -CFLAGS=`echo "$CFLAGS" | sed -e 's,-fstack-protector\S*,,g'` %make_build V=1 WERROR=0 RUNDIR=/run/criu PYTHON=%{py_binary} - +# This package calls LD directly without specifying the LTO plugins. Until +# that is fixed, disable LTO. +%define _lto_cflags %{nil} + +# %{?_smp_mflags} does not work +# -fstack-protector breaks build +LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now ' +export LDFLAGS +make V=1 WERROR=0 PREFIX=%{_prefix} RUNDIR=/run/criu PYTHON=%{py_binary} PLUGINDIR=%{_libdir}/criu NETWORK_LOCK_DEFAULT=NETWORK_LOCK_NFTABLES +make V=1 WERROR=0 PREFIX=%{_prefix} PLUGINDIR=%{_libdir}/criu amdgpu_plugin make docs V=1 - %install -make install-criu DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} -make install-lib DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} PYTHON=%{py_binary} - -# only install documentation on Fedora as it requires asciidoc, -# which is not available on RHEL7 +sed -e "s,--upgrade --ignore-installed,--no-index --no-deps -v --no-build-isolation,g" -i lib/Makefile -i crit/Makefile +make install-criu DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} BINDIR=%{_bindir} SBINDIR=%{_sbindir} +make install-lib DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} PYTHON=%{py_binary} PIPFLAGS="--no-build-isolation --no-index --no-deps --progress-bar off --upgrade --ignore-installed" +make install-amdgpu_plugin DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} PLUGINDIR=%{_libdir}/criu +make install-cuda_plugin DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} PLUGINDIR=%{_libdir}/criu +make install-crit DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} BINDIR=%{_bindir} SBINDIR=%{_sbindir} PYTHON=%{py_binary} PIPFLAGS="--no-build-isolation --no-index --no-deps --progress-bar off --upgrade --ignore-installed" make install-man DESTDIR=$RPM_BUILD_ROOT PREFIX=%{_prefix} LIBDIR=%{_libdir} - - - - - - +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/compel.1 + mkdir -p %{buildroot}%{_tmpfilesdir} -install -m 0644 %{SOURCE4} %{buildroot}%{_tmpfilesdir}/%{name}.conf +install -m 0644 %{SOURCE5} %{buildroot}%{_tmpfilesdir}/%{name}.conf install -d -m 0755 %{buildroot}/run/%{name}/ -%if 0%{?rhel} -# remove devel and libs packages -rm -rf $RPM_BUILD_ROOT%{_includedir}/criu -rm $RPM_BUILD_ROOT%{_libdir}/*.so* -rm -rf $RPM_BUILD_ROOT%{_libdir}/pkgconfig -rm -rf $RPM_BUILD_ROOT%{_libexecdir}/%{name} -%endif - -# remove static lib -rm -f $RPM_BUILD_ROOT%{_libdir}/libcriu.a - %files +%license COPYING +%doc README.md %{_sbindir}/%{name} %doc %{_mandir}/man8/criu.8* -%doc %{_mandir}/man1/compel.1* - %{_libexecdir}/%{name} - %dir /run/%{name} %{_tmpfilesdir}/%{name}.conf -%doc README.md COPYING %files devel @@ -170,23 +162,37 @@ rm -f $RPM_BUILD_ROOT%{_libdir}/libcriu.a %files libs %{_libdir}/*.so.* - - +%{_libdir}/*.a + +%files amdgpu-plugin +%{_libdir}/%{name}/amdgpu_plugin.so +%doc %{_mandir}/man1/criu-amdgpu-plugin.1* + +%files cuda-plugin +%{_libdir}/%{name}/cuda_plugin.so +%doc plugins/cuda/README.md + %files -n %{py_prefix}-%{name} -%if 0%{?rhel} && 0%{?rhel} <= 7 -%{python2_sitelib}/pycriu/* -%{python2_sitelib}/*egg-info -%else -%{python3_sitelib}/pycriu/* -%{python3_sitelib}/*egg-info -%endif - +%{python3_sitelib}/pycriu* + %files -n crit %{_bindir}/crit +%{python3_sitelib}/crit-%{version}.dist-info/ +%{python3_sitelib}/crit %doc %{_mandir}/man1/crit.1* + +%files -n criu-ns +%{_sbindir}/criu-ns +%doc %{_mandir}/man1/criu-ns.1* +%post +%tmpfiles_create %{name}.conf %changelog +* Fri Nov 07 2025 Sandeep Karambelkar - 4.1.1-1 +- Upgrade to 4.1.1 ref from Fedora 42 +- License verified + * Tue Sep 21 2021 Pawel Winogrodzki - 3.15-3 - Added a patch to fix build errors by unifying struct names across the source code. - Removed the "-fstack-protector" flag breaking the build. diff --git a/SPECS-EXTENDED/crun/crun.signatures.json b/SPECS-EXTENDED/crun/crun.signatures.json new file mode 100644 index 00000000000..a3a523b62ed --- /dev/null +++ b/SPECS-EXTENDED/crun/crun.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "crun-1.24.tar.gz": "90b6e33a6400ba5355eed6efc46a4c890e48e6c96d99d2bbc7fe92803bdfce52" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/crun/crun.spec b/SPECS-EXTENDED/crun/crun.spec new file mode 100644 index 00000000000..6b3d6742369 --- /dev/null +++ b/SPECS-EXTENDED/crun/crun.spec @@ -0,0 +1,192 @@ +%global krun_opts %{nil} +%global wasmedge_opts %{nil} +%global yajl_opts %{nil} + +%if %{defined copr_username} +%define copr_build 1 +%endif + +# krun and wasm support not yet provided in azurelinux +%global yajl_opts --enable-embedded-yajl + +Summary: OCI runtime written in C +Name: crun +Version: 1.24 +Release: 3%{?dist} +Vendor: Microsoft Corporation +Distribution: Azure Linux +URL: https://github.com/containers/%{name} +Source0: %{url}/releases/download/%{version}/%{name}-%{version}.tar.gz +License: GPL-2.0-only +BuildRequires: autoconf +BuildRequires: automake +BuildRequires: gcc +BuildRequires: git-core +BuildRequires: gperf +BuildRequires: libcap-devel + +%if %{defined krun_support} +BuildRequires: libkrun-devel +%endif + +BuildRequires: systemd-devel + +%if %{defined system_yajl} +BuildRequires: yajl-devel +%endif + +BuildRequires: libseccomp-devel +BuildRequires: python3-libmount +BuildRequires: libtool +BuildRequires: protobuf-c-devel +BuildRequires: criu-devel >= 3.17.1-2 +Recommends: criu >= 3.17.1 +Recommends: criu-libs + +%if %{defined wasmedge_support} +BuildRequires: wasmedge-devel +%endif + +BuildRequires: python +BuildRequires: glibc-static >= 2.38-18%{?dist} +Provides: oci-runtime + +%description +%{name} is a OCI runtime + +%if %{defined krun_support} +%package krun +Summary: %{name} with libkrun support +Requires: libkrun +Requires: %{name} = %{?epoch:%{epoch}:}%{version}-%{release} +Provides: krun = %{?epoch:%{epoch}:}%{version}-%{release} + +%description krun +krun is a symlink to the %{name} binary, with libkrun as an additional dependency. +%endif + +%if %{defined wasm_support} +%package wasm +Summary: %{name} with wasm support +Requires: %{name} = %{?epoch:%{epoch}:}%{version}-%{release} +# wasm packages are not present on RHEL yet and are currently a PITA to test +# Best to only include wasmedge as weak dep on rhel +%if %{defined fedora} +Requires: wasm-library +%endif +Recommends: wasmedge + +%description wasm +%{name}-wasm is a symlink to the %{name} binary, with wasm as an additional dependency. +%endif + +%prep +%autosetup -p1 -n %{name}-%{version} + +%build +./autogen.sh +./configure --disable-silent-rules %{krun_opts} %{wasmedge_opts} %{yajl_opts} +%make_build + +%install +%make_install prefix=%{_prefix} +rm -rf %{buildroot}%{_prefix}/lib* + +# Placeholder check to silence rpmlint +%check + +%files +%license COPYING +%{_bindir}/%{name} +%{_mandir}/man1/%{name}.1.gz + +%if %{defined krun_support} +%files krun +%license COPYING +%{_bindir}/krun +%{_mandir}/man1/krun.1.gz +%endif + +%if %{defined wasm_support} +%files wasm +%license COPYING +%{_bindir}/%{name}-wasm +%endif + +%changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.24-3 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.24-2 +- Bump to rebuild with updated glibc + +* Fri Nov 07 2025 Sandeep Karambelkar - 1.24-1 +- Initial Azure Linux import from Fedora 42 (license: MIT). +- Modified for building in azurelinux +- License verified + +* Thu Jul 31 2025 Packit - 1.23.1-1 +- Update to 1.23.1 upstream release + +* Thu Jul 24 2025 Packit - 1.23-1 +- Update to 1.23 upstream release + +* Fri Jun 27 2025 Packit - 1.22-1 +- Update to 1.22 upstream release + +* Fri Mar 28 2025 Packit - 1.21-1 +- Update to 1.21 upstream release + +* Mon Feb 10 2025 Lokesh Mandvekar - 1.20-2 +- fix gating config + +* Wed Feb 05 2025 Packit - 1.20-1 +- Update to 1.20 upstream release + +* Thu Jan 16 2025 Fedora Release Engineering - 1.19.1-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_42_Mass_Rebuild + +* Wed Jan 15 2025 Lokesh Mandvekar - 1.19.1-3 +- TMT: use prepare conditionals + +* Thu Dec 26 2024 Lokesh Mandvekar - 1.19.1-2 +- TMT: sync tests from upstream + +* Tue Dec 17 2024 Packit - 1.19.1-1 +- Update to 1.19.1 upstream release + +* Fri Dec 06 2024 Packit - 1.19-1 +- Update to 1.19 upstream release + +* Thu Oct 31 2024 Packit - 1.18.2-1 +- Update to 1.18.2 upstream release + +* Wed Oct 30 2024 Packit - 1.18.1-1 +- Update to 1.18.1 upstream release + +* Tue Oct 22 2024 Packit - 1.18-1 +- Update to 1.18 upstream release + +* Mon Oct 21 2024 Yaakov Selkowitz - 1.17-3 +- Use embedded yajl in RHEL builds + +* Thu Sep 26 2024 David Abdurachmanov - 1.17-2 +- Disable criu support on riscv64 + +* Tue Sep 10 2024 Lokesh Mandvekar - 1.17-1 +- bump to 1.17 + +* Wed Jul 17 2024 Fedora Release Engineering - 1.15-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild + +* Thu May 02 2024 Packit - 1.15-1 +- Update to 1.15 upstream release + +* Wed Mar 27 2024 Lokesh Mandvekar - 1.14.4-5 +- wasmedge should stay enabled for official fedora + +* Wed Mar 27 2024 Lokesh Mandvekar - 1.14.4-4 +- remove eln macro + +* Tue Mar 05 2024 Giuseppe Scrivano - 1.14.4-3 +- Revert "Add riscv64 support." diff --git a/SPECS-EXTENDED/dyninst/dyninst.spec b/SPECS-EXTENDED/dyninst/dyninst.spec index 93e1f4b4f4f..8d6ff8de0ff 100644 --- a/SPECS-EXTENDED/dyninst/dyninst.spec +++ b/SPECS-EXTENDED/dyninst/dyninst.spec @@ -1,7 +1,7 @@ Summary: An API for Run-time Code Generation License: LGPLv2+ Name: dyninst -Release: 26%{?dist} +Release: 28%{?dist} Vendor: Microsoft Corporation Distribution: Azure Linux URL: http://www.dyninst.org @@ -31,7 +31,7 @@ BuildRequires: tbb tbb-devel # Extra requires just for the testsuite BuildRequires: gcc-gfortran libstdc++-static libxml2-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} # Testsuite files should not provide/require anything %{?filter_setup: @@ -194,6 +194,12 @@ echo "%{_libdir}/dyninst" > %{buildroot}/etc/ld.so.conf.d/%{name}-%{_arch}.conf %attr(644,root,root) %{_libdir}/dyninst/testsuite/*.a %changelog +* Thu Jan 22 2026 Kanishk Bansal - 10.1.0-28 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 10.1.0-27 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 10.1.0-26 - Bump to rebuild with updated glibc diff --git a/SPECS-EXTENDED/ed25519-java/0001-EdDSAEngine.initVerify-Handle-any-non-EdDSAPublicKey.patch b/SPECS-EXTENDED/ed25519-java/0001-EdDSAEngine.initVerify-Handle-any-non-EdDSAPublicKey.patch new file mode 100644 index 00000000000..6d7079ea1e9 --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/0001-EdDSAEngine.initVerify-Handle-any-non-EdDSAPublicKey.patch @@ -0,0 +1,37 @@ +From c5629faa3e1880cc71da506263f224bc818fe827 Mon Sep 17 00:00:00 2001 +From: Jack Grigg +Date: Sun, 27 Jan 2019 23:27:00 +0000 +Subject: [PATCH 1/2] EdDSAEngine.initVerify(): Handle any non-EdDSAPublicKey + X.509-encoded pubkey + +sun.security.x509.X509Key is a JDK-internal API, and should not be used +directly. Instead of looking for an instance of that class, we check the +primary encoding format of the PublicKey, and proceed if it is "X.509". +--- + src/net/i2p/crypto/eddsa/EdDSAEngine.java | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/src/net/i2p/crypto/eddsa/EdDSAEngine.java b/src/net/i2p/crypto/eddsa/EdDSAEngine.java +index 1f0ba6d..6b25410 100644 +--- a/src/net/i2p/crypto/eddsa/EdDSAEngine.java ++++ b/src/net/i2p/crypto/eddsa/EdDSAEngine.java +@@ -29,7 +29,6 @@ import java.util.Arrays; + import net.i2p.crypto.eddsa.math.Curve; + import net.i2p.crypto.eddsa.math.GroupElement; + import net.i2p.crypto.eddsa.math.ScalarOps; +-import sun.security.x509.X509Key; + + /** + * Signing and verification for EdDSA. +@@ -157,7 +156,7 @@ public final class EdDSAEngine extends Signature { + } + } else if (!key.getParams().getHashAlgorithm().equals(digest.getAlgorithm())) + throw new InvalidKeyException("Key hash algorithm does not match chosen digest"); +- } else if (publicKey instanceof X509Key) { ++ } else if (publicKey.getFormat().equals("X.509")) { + // X509Certificate will sometimes contain an X509Key rather than the EdDSAPublicKey itself; the contained + // key is valid but needs to be instanced as an EdDSAPublicKey before it can be used. + EdDSAPublicKey parsedPublicKey; +-- +2.33.1 + diff --git a/SPECS-EXTENDED/ed25519-java/0002-Disable-test-that-relies-on-internal-sun-JDK-classes.patch b/SPECS-EXTENDED/ed25519-java/0002-Disable-test-that-relies-on-internal-sun-JDK-classes.patch new file mode 100644 index 00000000000..d8128286dea --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/0002-Disable-test-that-relies-on-internal-sun-JDK-classes.patch @@ -0,0 +1,46 @@ +From 1ea7fb5ed949d8a458fda40b186868b7cffbb271 Mon Sep 17 00:00:00 2001 +From: Mat Booth +Date: Wed, 1 Dec 2021 09:35:10 +0000 +Subject: [PATCH 2/2] Disable test that relies on internal sun JDK classes + +--- + test/net/i2p/crypto/eddsa/EdDSAEngineTest.java | 18 ------------------ + 1 file changed, 18 deletions(-) + +diff --git a/test/net/i2p/crypto/eddsa/EdDSAEngineTest.java b/test/net/i2p/crypto/eddsa/EdDSAEngineTest.java +index 2ed793b..adc46fd 100644 +--- a/test/net/i2p/crypto/eddsa/EdDSAEngineTest.java ++++ b/test/net/i2p/crypto/eddsa/EdDSAEngineTest.java +@@ -31,8 +31,6 @@ import net.i2p.crypto.eddsa.spec.EdDSAPublicKeySpec; + import org.junit.Rule; + import org.junit.Test; + import org.junit.rules.ExpectedException; +-import sun.security.util.DerValue; +-import sun.security.x509.X509Key; + + /** + * @author str4d +@@ -217,20 +215,4 @@ public class EdDSAEngineTest { + assertThat("verifyOneShot() failed", sgr.verifyOneShot(TEST_MSG, TEST_MSG_SIG), is(true)); + } + +- @Test +- public void testVerifyX509PublicKeyInfo() throws Exception { +- EdDSAParameterSpec spec = EdDSANamedCurveTable.getByName("Ed25519"); +- Signature sgr = new EdDSAEngine(MessageDigest.getInstance(spec.getHashAlgorithm())); +- for (Ed25519TestVectors.TestTuple testCase : Ed25519TestVectors.testCases) { +- EdDSAPublicKeySpec pubKey = new EdDSAPublicKeySpec(testCase.pk, spec); +- PublicKey vKey = new EdDSAPublicKey(pubKey); +- PublicKey x509Key = X509Key.parse(new DerValue(vKey.getEncoded())); +- sgr.initVerify(x509Key); +- +- sgr.update(testCase.message); +- +- assertThat("Test case " + testCase.caseNum + " failed", +- sgr.verify(testCase.sig), is(true)); +- } +- } + } +-- +2.33.1 + diff --git a/SPECS-EXTENDED/ed25519-java/ed25519-java-CVE-2020-36843.patch b/SPECS-EXTENDED/ed25519-java/ed25519-java-CVE-2020-36843.patch new file mode 100644 index 00000000000..324025e3fcf --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/ed25519-java-CVE-2020-36843.patch @@ -0,0 +1,39 @@ +--- ed25519-java-0.3.0/src/net/i2p/crypto/eddsa/EdDSAEngine.java 2025-03-14 14:47:43.404137953 +0100 ++++ ed25519-java-0.3.0/src/net/i2p/crypto/eddsa/EdDSAEngine.java 2025-03-14 14:50:31.859888550 +0100 +@@ -12,6 +12,7 @@ + package net.i2p.crypto.eddsa; + + import java.io.ByteArrayOutputStream; ++import java.math.BigInteger; + import java.nio.ByteBuffer; + import java.security.InvalidAlgorithmParameterException; + import java.security.InvalidKeyException; +@@ -29,6 +30,7 @@ + import net.i2p.crypto.eddsa.math.Curve; + import net.i2p.crypto.eddsa.math.GroupElement; + import net.i2p.crypto.eddsa.math.ScalarOps; ++import net.i2p.crypto.eddsa.math.bigint.BigIntegerLittleEndianEncoding; + + /** + * Signing and verification for EdDSA. +@@ -69,6 +71,8 @@ + public final class EdDSAEngine extends Signature { + public static final String SIGNATURE_ALGORITHM = "NONEwithEdDSA"; + ++ private static final BigInteger ORDER = new BigInteger("2").pow(252).add(new BigInteger("27742317777372353535851937790883648493")); ++ + private MessageDigest digest; + private ByteArrayOutputStream baos; + private EdDSAKey key; +@@ -306,6 +310,11 @@ + h = key.getParams().getScalarOps().reduce(h); + + byte[] Sbyte = Arrays.copyOfRange(sigBytes, b/8, b/4); ++ // RFC 8032 ++ BigInteger Sbigint = (new BigIntegerLittleEndianEncoding()).toBigInteger(Sbyte); ++ if (Sbigint.compareTo(ORDER) >= 0) ++ return false; ++ + // R = SB - H(Rbar,Abar,M)A + GroupElement R = key.getParams().getB().doubleScalarMultiplyVariableTime( + ((EdDSAPublicKey) key).getNegativeA(), h, Sbyte); diff --git a/SPECS-EXTENDED/ed25519-java/ed25519-java-build.xml b/SPECS-EXTENDED/ed25519-java/ed25519-java-build.xml new file mode 100644 index 00000000000..3a936462a6e --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/ed25519-java-build.xml @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/ed25519-java/ed25519-java.signatures.json b/SPECS-EXTENDED/ed25519-java/ed25519-java.signatures.json new file mode 100644 index 00000000000..7272e958992 --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/ed25519-java.signatures.json @@ -0,0 +1,6 @@ +{ + "Signatures": { + "ed25519-java-0.3.0.tar.gz": "a89a2331afb1db0bd06ce029c731db2d24684cebf111e796b51deb6e2a20a310", + "ed25519-java-build.xml": "2eb416752ef86be27a06581dfb60c6c4693d530ffa7f8e12f28112b40d65fab7" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/ed25519-java/ed25519-java.spec b/SPECS-EXTENDED/ed25519-java/ed25519-java.spec new file mode 100644 index 00000000000..ff1e7410bd3 --- /dev/null +++ b/SPECS-EXTENDED/ed25519-java/ed25519-java.spec @@ -0,0 +1,125 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux +# +# spec file for package ed25519-java +# +# Copyright (c) 2025 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +%global artifactId eddsa +Name: ed25519-java +Version: 0.3.0 +Release: 1%{?dist} +Summary: Implementation of EdDSA (Ed25519) in Java +License: CC0-1.0 +URL: https://github.com/str4d/ed25519-java +Source0: https://github.com/str4d/ed25519-java/archive/v%{version}/%{name}-%{version}.tar.gz +Source1: %{name}-build.xml +Patch0: 0001-EdDSAEngine.initVerify-Handle-any-non-EdDSAPublicKey.patch +Patch1: 0002-Disable-test-that-relies-on-internal-sun-JDK-classes.patch +Patch2: %{name}-CVE-2020-36843.patch +BuildRequires: ant +BuildRequires: fdupes +BuildRequires: java-devel >= 1.8 +BuildRequires: javapackages-local-bootstrap >= 6 +BuildRequires: javapackages-tools +BuildArch: noarch + +%description +This is an implementation of EdDSA in Java. Structurally, it +is based on the ref10 implementation in SUPERCOP (see +http://ed25519.cr.yp.to/software.html). + +There are two internal implementations: + +* A port of the radix-2^51 operations in ref10 + - fast and constant-time, but only useful for Ed25519. +* A generic version using BigIntegers for calculation + - a bit slower and not constant-time, but compatible + with any EdDSA parameter specification. + +%package javadoc +Summary: Javadoc for %{name} + +%description javadoc +This package contains javadoc for %{name}. + +%prep +%setup -q +cp %{SOURCE1} build.xml +%patch -P 0 -p1 +%patch -P 1 -p1 +%patch -P 2 -p1 + +%build +ant jar javadoc + +%install + +# jar +install -dm 0755 %{buildroot}%{_javadir} +install -pm 0644 target/%{artifactId}-%{version}.jar %{buildroot}%{_javadir}/%{artifactId}.jar +ln -sf %{_javadir}/%{artifactId}.jar %{buildroot}%{_javadir}/%{name}.jar + +# pom +install -dm 0755 %{buildroot}%{_mavenpomdir} +install -pm 0644 pom.xml %{buildroot}%{_mavenpomdir}/%{artifactId}.pom +%add_maven_depmap %{artifactId}.pom %{artifactId}.jar + +# javadoc +install -dm 0755 %{buildroot}%{_javadocdir}/%{name} +cp -r target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/ +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . +%fdupes -s %{buildroot}%{_javadocdir} + +%files -f .mfiles +%{_javadir}/%{name}.jar +%license LICENSE.txt +%doc README.md + + +%files javadoc +%license LICENSE.txt +%license LICENSE ADDITIONAL_LICENSE_INFO +%{_javadocdir}/%{name} + + +%changelog +* Tue Dec 16 2025 BinduSri Adabala - 0.3.0-1 +- Initial CBL-Mariner import from openSUSE Tumbleweed (license: same as "License" tag). +- License verified + +* Fri Mar 14 2025 Fridrich Strba +- Added patch: + * ed25519-java-CVE-2020-36843.patch + + backport commit https://github.com/i2p/i2p.i2p/commit/ + /d7d1dcb5399c61cf2916ccc45aa25b0209c88712 + + Fixes bsc#1239551, CVE-2020-36843: no check performed on + scalar to avoid signature malleability +* Wed Oct 30 2024 Fridrich Strba +- Rewrite the build using ant +* Wed Feb 21 2024 Gus Kenion +- Use %%patch -P N instead of deprecated %%patchN. +* Mon Sep 11 2023 Fridrich Strba +- Reproducible builds: use SOURCE_DATE_EPOCH for timestamp +* Tue Mar 22 2022 Fridrich Strba +- Build with source and target levels 8 +- Added patches: + * 0001-EdDSAEngine.initVerify-Handle-any-non-EdDSAPublicKey.patch + * 0002-Disable-test-that-relies-on-internal-sun-JDK-classes.patch + + Remove use of internal sun JDK classes +* Mon Jun 29 2020 Fridrich Strba +- Initial packaging of ed25519 0.3.0 diff --git a/SPECS-EXTENDED/freeradius/freeradius.spec b/SPECS-EXTENDED/freeradius/freeradius.spec index 49d57874e20..62be8ffdb52 100644 --- a/SPECS-EXTENDED/freeradius/freeradius.spec +++ b/SPECS-EXTENDED/freeradius/freeradius.spec @@ -6,7 +6,7 @@ Summary: High-performance and highly configurable free RADIUS server Name: freeradius Version: 3.2.5 -Release: 3%{?dist} +Release: 4%{?dist} Vendor: Microsoft Corporation Distribution: Azure Linux License: GPL-2.0-or-later AND LGPL-2.0-or-later @@ -869,6 +869,9 @@ EOF %attr(640,root,radiusd) %config(noreplace) /etc/raddb/mods-available/rest %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 3.2.5-4 +- Bumping release to rebuild with new 'net-snmp' libs. + * Fri Jan 31 2025 Jyoti kanase - 3.2.5-3 - Initial Azure Linux import from Fedora 41 (license: MIT). - License verified. diff --git a/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.signatures.json b/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.signatures.json index bfd2121d551..92c4ffb9609 100644 --- a/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.signatures.json +++ b/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "gnome-desktop-testing-v2018.1.tar.gz": "da0f7a434370fd4690c61aaedddacd7a15001e5e4ec71fadc9c00dd50d0ab5cf" + "gnome-desktop-testing-v2021.1.tar.gz": "d9037b3029452e54d4be478a9c39495512e737f85325c71ab92fc344c995620e" } } diff --git a/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.spec b/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.spec index adb85100dd3..afcebcd7b92 100644 --- a/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.spec +++ b/SPECS-EXTENDED/gnome-desktop-testing/gnome-desktop-testing.spec @@ -1,22 +1,18 @@ Vendor: Microsoft Corporation Distribution: Azure Linux Name: gnome-desktop-testing -Version: 2018.1 -Release: 4%{?dist} +Version: 2021.1 +Release: 1%{?dist} Summary: GNOME test runner for installed tests License: LGPLv2+ -URL: https://live.gnome.org/Initiatives/GnomeGoals/InstalledTests +URL: https://gitlab.gnome.org/GNOME/gnome-desktop-testing Source0: https://gitlab.gnome.org/GNOME/%{name}/-/archive/v%{version}/%{name}-v%{version}.tar.gz BuildRequires: pkgconfig(gio-unix-2.0) BuildRequires: systemd-devel -BuildRequires: pkgconfig(libgsystem) BuildRequires: git automake autoconf libtool -# https://gitlab.gnome.org/GNOME/gnome-desktop-testing/merge_requests/1 -Patch0: 0001-Don-t-crash-on-unknown-command-line-options.patch - %description gnome-desktop-testing-runner is a basic runner for tests that are installed in /usr/share/installed-tests. For more information, see @@ -34,11 +30,17 @@ make %{?_smp_mflags} make install DESTDIR=$RPM_BUILD_ROOT %files -%doc COPYING README +%license COPYING %{_bindir}/gnome-desktop-testing-runner %{_bindir}/ginsttest-runner +%{_mandir}/man1/ginsttest-runner.1.gz +%{_mandir}/man1/gnome-desktop-testing-runner.1.gz %changelog +* Wed Dec 24 2025 Aditya Singh - 2021.1-1 +- Upgrade to version 2021.1 +- License verified + * Tue Sep 19 2023 Jon Slobodzian - 2018.1-4 - Fix build issue for systemd/systemd-bootstrap confusion - License verified diff --git a/SPECS-EXTENDED/hawtjni-runtime/hawtjni-runtime.spec b/SPECS-EXTENDED/hawtjni-runtime/hawtjni-runtime.spec index 82e8095fee0..c508e333eec 100644 --- a/SPECS-EXTENDED/hawtjni-runtime/hawtjni-runtime.spec +++ b/SPECS-EXTENDED/hawtjni-runtime/hawtjni-runtime.spec @@ -20,13 +20,14 @@ Distribution: Azure Linux %global debug_package %{nil} Name: hawtjni-runtime Version: 1.17 -Release: 2%{?dist} +Release: 3%{?dist} Summary: HawtJNI Runtime License: Apache-2.0 AND EPL-1.0 AND BSD-3-Clause URL: https://github.com/fusesource/hawtjni Source0: https://github.com/fusesource/hawtjni/archive/hawtjni-project-%{version}.tar.gz +Patch0: use-commons-lang3.patch BuildRequires: apache-commons-cli -BuildRequires: apache-commons-lang +BuildRequires: apache-commons-lang3 BuildRequires: fdupes BuildRequires: java-devel BuildRequires: javapackages-local-bootstrap @@ -48,7 +49,7 @@ This package contains the API documentation for hawtjni. Summary: Code generator that produces the JNI code Requires: %{name} = %{version} Requires: apache-commons-cli -Requires: apache-commons-lang +Requires: apache-commons-lang3 Requires: javapackages-tools Requires: objectweb-asm >= 5 Requires: xbean @@ -62,6 +63,7 @@ JNI code which powers the eclipse platform. %prep %setup -q -n hawtjni-hawtjni-project-%{version} +%patch -P 0 -p1 %pom_disable_module hawtjni-example %pom_disable_module hawtjni-maven-plugin @@ -69,7 +71,7 @@ JNI code which powers the eclipse platform. %pom_remove_plugin -r :maven-eclipse-plugin # this dependency seems to be missing -%pom_add_dep commons-lang:commons-lang hawtjni-generator +%pom_add_dep commons-lang:commons-lang3 hawtjni-generator for mod in runtime generator; do %pom_remove_parent hawtjni-${mod} @@ -80,19 +82,19 @@ done %build mkdir -p hawtjni-runtime/build/classes -javac -d hawtjni-runtime/build/classes -source 6 -target 6 \ +javac -d hawtjni-runtime/build/classes -source 8 -target 8 \ $(find hawtjni-runtime/src/main/java/ -name *.java | xargs) jar cf hawtjni-runtime.jar -C hawtjni-runtime/build/classes . mkdir -p hawtjni-generator/build/classes javac -d hawtjni-generator/build/classes \ - -source 6 -target 6 \ - -cp $(build-classpath commons-cli commons-lang objectweb-asm/asm objectweb-asm/asm-commons xbean/xbean-finder xbean/xbean-asm-util):hawtjni-runtime.jar \ + -source 8 -target 8 \ + -cp $(build-classpath commons-cli commons-lang3 objectweb-asm/asm objectweb-asm/asm-commons xbean/xbean-finder xbean/xbean-asm-util):hawtjni-runtime.jar \ $(find hawtjni-generator/src/main/java/ -name *.java | xargs) jar cf hawtjni-generator.jar -C hawtjni-generator/build/classes . jar uf hawtjni-generator.jar -C hawtjni-generator/src/main/resources . mkdir -p hawtjni-runtime/build/apidoc -javadoc -d hawtjni-runtime/build/apidoc -source 6 \ - -classpath $(build-classpath commons-cli commons-lang objectweb-asm/asm objectweb-asm/asm-commons xbean/xbean-finder xbean/xbean-asm-util) \ +javadoc -d hawtjni-runtime/build/apidoc -source 8 \ + -classpath $(build-classpath commons-cli commons-lang3 objectweb-asm/asm objectweb-asm/asm-commons xbean/xbean-finder xbean/xbean-asm-util) \ $(find hawtjni-runtime/src/main/java/ -name *.java && \ find hawtjni-generator/src/main/java/ -name *.java| xargs) @@ -113,9 +115,18 @@ install -m 0644 hawtjni-generator/pom.xml %{buildroot}%{_mavenpomdir}/hawtjni/ha # javadoc install -dm 755 %{buildroot}%{_javadocdir}/hawtjni cp -pr hawtjni-runtime/build/apidoc/* %{buildroot}%{_javadocdir}/hawtjni/ +# to remove license warnings +install -Dm 0644 hawtjni-runtime/build/apidoc/legal/LICENSE \ + %{buildroot}%{_licensedir}/hawtjni/LICENSE.javadoc + +install -Dm 0644 hawtjni-runtime/build/apidoc/legal/ADDITIONAL_LICENSE_INFO \ + %{buildroot}%{_licensedir}/hawtjni/ADDITIONAL_LICENSE_INFO.javadoc + +rm -rf %{buildroot}%{_javadocdir}/hawtjni/legal + %fdupes -s %{buildroot}%{_javadocdir}/hawtjni/ -%{jpackage_script org.fusesource.hawtjni.generator.HawtJNI "" "" commons-cli:commons-lang:objectweb-asm/asm:objectweb-asm/asm-commons:xbean/xbean-finder:xbean/xbean-asm-util:hawtjni/hawtjni-runtime:hawtjni/hawtjni-generator hawtjni-generator true} +%{jpackage_script org.fusesource.hawtjni.generator.HawtJNI "" "" commons-cli:commons-lang3:objectweb-asm/asm:objectweb-asm/asm-commons:xbean/xbean-finder:xbean/xbean-asm-util:hawtjni/hawtjni-runtime:hawtjni/hawtjni-generator hawtjni-generator true} %files -f .mfiles %license license.txt @@ -127,8 +138,13 @@ cp -pr hawtjni-runtime/build/apidoc/* %{buildroot}%{_javadocdir}/hawtjni/ %files -n hawtjni-javadoc %{_javadocdir}/hawtjni %license license.txt +%license %{_licensedir}/hawtjni/* %changelog +* Wed Dec 24 2025 Aninda Pradhan - 1.17-3 +- Updated dependencies to use commons-lang3 +- License verified + * Thu Oct 14 2021 Pawel Winogrodzki - 1.17-2 - Converting the 'Release' tag to the '[number].[distribution]' format. diff --git a/SPECS-EXTENDED/hawtjni-runtime/use-commons-lang3.patch b/SPECS-EXTENDED/hawtjni-runtime/use-commons-lang3.patch new file mode 100644 index 00000000000..8edcb7b88b7 --- /dev/null +++ b/SPECS-EXTENDED/hawtjni-runtime/use-commons-lang3.patch @@ -0,0 +1,11 @@ +--- a/hawtjni-generator/src/main/java/org/fusesource/hawtjni/generator/model/ReflectField.java ++++ b/hawtjni-generator/src/main/java/org/fusesource/hawtjni/generator/model/ReflectField.java +@@ -14,7 +14,7 @@ + import java.util.Arrays; + import java.util.HashSet; + +-import org.apache.commons.lang.StringUtils; ++import org.apache.commons.lang3.StringUtils; + import org.fusesource.hawtjni.runtime.FieldFlag; + import org.fusesource.hawtjni.runtime.JniField; + import org.fusesource.hawtjni.runtime.T32; diff --git a/SPECS-EXTENDED/highlight/highlight.signatures.json b/SPECS-EXTENDED/highlight/highlight.signatures.json index 8359783c974..fbd474ad12a 100644 --- a/SPECS-EXTENDED/highlight/highlight.signatures.json +++ b/SPECS-EXTENDED/highlight/highlight.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "highlight-3.54.tar.bz2": "8a50a85e94061b53085c6ad8cf110039217dbdd411ab846f9ff934bec7ecd6d0" + "highlight-4.18.tar.bz2": "1cd3e273e1e4fdc5c0d23c0a9055dd0efe196ab04b43ef5da39ba0df516a8498" } } diff --git a/SPECS-EXTENDED/highlight/highlight.spec b/SPECS-EXTENDED/highlight/highlight.spec index d7a0e2c5d48..253c4b48383 100644 --- a/SPECS-EXTENDED/highlight/highlight.spec +++ b/SPECS-EXTENDED/highlight/highlight.spec @@ -2,15 +2,21 @@ Vendor: Microsoft Corporation Distribution: Azure Linux Name: highlight Summary: Universal source code to formatted text converter -Version: 3.54 -Release: 3%{?dist} -License: GPLv3 +Version: 4.18 +Release: 1%{?dist} +License: GPL-3.0-only URL: http://www.andre-simon.de/ -Source0: http://www.andre-simon.de/zip/%{name}-%{version}.tar.bz2 +Source0: https://gitlab.com/saalen/highlight/-/archive/v4.18/highlight-v4.18.tar.bz2#/%{name}-%{version}.tar.bz2 + +%bcond qt 0 + BuildRequires: gcc-c++ +%if %{with qt} BuildRequires: qt5-qtbase-devel +%endif BuildRequires: lua-devel, boost-devel BuildRequires: desktop-file-utils +BuildRequires: make %{?filter_setup: %filter_from_provides /^perl(/d; @@ -27,15 +33,17 @@ Language descriptions are configurable and support regular expressions. The utility offers indentation and reformatting capabilities. It is easily possible to create new language definitions and colour themes. +%if %{with qt} %package gui -Summary: GUI for the hihghlight source code formatter +Summary: GUI for the highlight source code formatter Requires: %{name} = %{version}-%{release} %description gui A Qt-based GUI for the highlight source code formatter source. +%endif %prep -%autosetup +%autosetup -n %{name}-v%{version} %build CFLAGS="$CFLAGS -fPIC %{optflags}"; export CFLAGS @@ -44,21 +52,37 @@ LDFLAGS="$LDFLAGS %{?__global_ldflags}"; export LDFLAGS # disabled paralell builds to fix FTBFS on rawhide & highlight 3.52+ #make_build all gui CFLAGS="${CFLAGS}" \ - make all gui CFLAGS="${CFLAGS}" \ + %{__make} all CFLAGS="${CFLAGS}" \ CXXFLAGS="${CXXFLAGS}" \ LDFLAGS="${LDFLAGS}" \ LFLAGS="-Wl,-O1 ${LDFLAGS}" \ PREFIX="%{_prefix}" \ - conf_dir="%{_sysconfdir}/highlight/" \ + conf_dir="%{_sysconfdir}/" + +%if %{with qt} + %{__make} gui CFLAGS="${CFLAGS}" \ + CXXFLAGS="${CXXFLAGS}" \ + LDFLAGS="${LDFLAGS}" \ + LFLAGS="-Wl,-O1 ${LDFLAGS}" \ + PREFIX="%{_prefix}" \ + conf_dir="%{_sysconfdir}/" \ QMAKE="%{_qt5_qmake}" \ QMAKE_STRIP= +%endif %install -%make_install PREFIX="%{_prefix}" conf_dir="%{_sysconfdir}/highlight/" +%make_install PREFIX="%{_prefix}" conf_dir="%{_sysconfdir}/" mkdir -p $RPM_BUILD_ROOT%{_datadir}/applications mkdir -p $RPM_BUILD_ROOT%{_datadir}/pixmaps -make install-gui DESTDIR=$RPM_BUILD_ROOT PREFIX="%{_prefix}" conf_dir="%{_sysconfdir}/highlight/" +%if %{with qt} +make install-gui DESTDIR=$RPM_BUILD_ROOT PREFIX="%{_prefix}" conf_dir="%{_sysconfdir}/" +%endif + +mv extras/langDefs-resources/UNLICENCE . +mv extras/pandoc/LICENSE . +rm extras/themes-resources/base16/LICENSE +rm extras/themes-resources/css-themes/UNLICENCE rm -rf $RPM_BUILD_ROOT%{_docdir}/%{name}/ @@ -71,18 +95,30 @@ desktop-file-install \ %{_datadir}/highlight/ %{_mandir}/man1/highlight.1* %{_mandir}/man5/filetypes.conf.5* +%{_datadir}/bash-completion/completions/highlight +%{_datadir}/fish/vendor_completions.d/highlight.fish +%{_datadir}/zsh/site-functions/_highlight %config(noreplace) %{_sysconfdir}/highlight/ %doc ChangeLog* AUTHORS README* extras/ -%license COPYING +%license COPYING LICENSE UNLICENCE + %if %{with qt} %files gui %{_bindir}/highlight-gui %{_datadir}/applications/highlight.desktop -%{_datadir}/pixmaps/highlight.xpm +%{_datadir}/icons/hicolor/256x256/apps/highlight.png +%else +%exclude %{_datadir}/applications/highlight.desktop +%endif %changelog +* Wed Dec 24 2025 Sumit Jena - 4.18-1 +- Update to version 4.18 +- Disabling GUI based subpackages. +- License Verified + * Fri Oct 15 2021 Pawel Winogrodzki - 3.54-3 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.signatures.json b/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.signatures.json index 2b63df129d9..54c7a6cb24a 100644 --- a/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.signatures.json +++ b/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ibus-libzhuyin-1.9.1.tar.gz": "f0a322700aec3a00dc7c3a4a185f7ad7b1d27a989614b5b50c6aec39a03cf585" + "ibus-libzhuyin-1.10.4.tar.gz": "c21a3e1d7a8d9e6357f5ed0e3246111868b3fda04fcbb8cc726dab2d6363f265" } } diff --git a/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.spec b/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.spec index 94887b96b33..b293550dd32 100644 --- a/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.spec +++ b/SPECS-EXTENDED/ibus-libzhuyin/ibus-libzhuyin.spec @@ -1,24 +1,15 @@ Vendor: Microsoft Corporation Distribution: Azure Linux -# This package depends on automagic byte compilation -# https://fedoraproject.org/wiki/Changes/No_more_automagic_Python_bytecompilation_phase_2 -%global _python_bytecompile_extra 1 - -%global snapshot 0 Name: ibus-libzhuyin -Version: 1.9.1 -Release: 6%{?dist} +Version: 1.10.4 +Release: 1%{?dist} Summary: New Zhuyin engine based on libzhuyin for IBus License: GPLv2+ URL: https://github.com/libzhuyin/ibus-libzhuyin -Source0: http://downloads.sourceforge.net/libzhuyin/ibus-libzhuyin/%{name}-%{version}.tar.gz -%if %snapshot -Patch0: ibus-libzhuyin-1.9.x-HEAD.patch -%endif +Source0: https://downloads.sourceforge.net/libzhuyin/ibus-libzhuyin/%{name}-%{version}.tar.gz BuildRequires: gcc-c++ -BuildRequires: perl(File::Find) BuildRequires: gettext-devel BuildRequires: intltool BuildRequires: libtool @@ -40,11 +31,7 @@ It includes a Chinese Zhuyin (Bopomofo) input method based on libzhuyin for IBus. %prep -%setup -q -%if %snapshot -%patch 0 -p1 -b .head -%endif - +%autosetup %build %configure --disable-static \ @@ -52,17 +39,19 @@ based on libzhuyin for IBus. --with-python=python3 # make -C po update-gmo -make %{?_smp_mflags} V=1 +%make_build %install -make install DESTDIR=${RPM_BUILD_ROOT} INSTALL="install -p" +%make_install + +%py_byte_compile %{python3} $RPM_BUILD_ROOT%{_datadir}/ibus-libzhuyin/setup %find_lang %{name} %files -f %{name}.lang %license COPYING %doc AUTHORS README ChangeLog INSTALL NEWS -%{_datadir}/appdata/*.appdata.xml +%{_datadir}/metainfo/*.appdata.xml %{_datadir}/glib-2.0/schemas/*.gschema.xml %{_datadir}/applications/ibus-setup-libzhuyin.desktop %{_libexecdir}/ibus-engine-libzhuyin @@ -74,8 +63,11 @@ make install DESTDIR=${RPM_BUILD_ROOT} INSTALL="install -p" %{_datadir}/ibus-libzhuyin/*symbol.txt %{_libdir}/ibus-libzhuyin/ - %changelog +* Mon Dec 22 2025 Aditya Singh - 1.10.4-1 +- Upgrade to version 1.10.4 +- License verified. + * Wed Feb 16 2022 Pawel Winogrodzki - 1.9.1-6 - License verified. diff --git a/SPECS-EXTENDED/ibus-table-chinese/ibus-table-chinese.spec b/SPECS-EXTENDED/ibus-table-chinese/ibus-table-chinese.spec index 7e78bfde325..8e84779efa7 100644 --- a/SPECS-EXTENDED/ibus-table-chinese/ibus-table-chinese.spec +++ b/SPECS-EXTENDED/ibus-table-chinese/ibus-table-chinese.spec @@ -6,7 +6,7 @@ Distribution: Azure Linux %global createdb ibus-table-createdb Name: ibus-table-chinese Version: 1.8.3 -Release: 3%{?dist} +Release: 4%{?dist} Summary: Chinese input tables for IBus Summary(zh_CN): 中文码表输入法 Summary(zh_TW): 中文碼表輸入法 @@ -556,7 +556,8 @@ rm -fr %{buildroot}%{_docdir}/* %files wubi-haifeng %{_datadir}/appdata/wubi-haifeng86.appdata.xml -%doc tables/wubi-haifeng/COPYING tables/wubi-haifeng/README +%doc tables/wubi-haifeng/README +%license tables/wubi-haifeng/COPYING %{ibus_icons_dir}/wubi-haifeng86.* %verify(not size md5 mtime) %{ibus_tables_dir}/wubi-haifeng86.db @@ -575,6 +576,10 @@ rm -fr %{buildroot}%{_docdir}/* %verify(not size md5 mtime) %{ibus_tables_dir}/cantonyale.db %changelog +* Thu Dec 18 2025 Aditya Singh - 1.8.3-4 +- Addressed license warning issue. +- License verified. + * Thu Jun 17 2021 Thomas Crain - 1.8.3-3 - Initial CBL-Mariner import from Fedora 32 (license: MIT). - Supplement CMake module search path with the location of cmake-fedora's modules diff --git a/SPECS-EXTENDED/ibus-table/ibus-table.signatures.json b/SPECS-EXTENDED/ibus-table/ibus-table.signatures.json index 2a355d40e0d..68dbc7f3a0f 100644 --- a/SPECS-EXTENDED/ibus-table/ibus-table.signatures.json +++ b/SPECS-EXTENDED/ibus-table/ibus-table.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ibus-table-1.12.4.tar.gz": "71728473680df5dc19f17f0dee101445fac77b56423576018142faa4c78380ec" + "ibus-table-1.17.16.tar.gz": "c1040d4928308b85b3576a418c5cbd6622f199208f80d4a153d1b829ee8508b8" } } diff --git a/SPECS-EXTENDED/ibus-table/ibus-table.spec b/SPECS-EXTENDED/ibus-table/ibus-table.spec index b0739056787..a4ea440401c 100644 --- a/SPECS-EXTENDED/ibus-table/ibus-table.spec +++ b/SPECS-EXTENDED/ibus-table/ibus-table.spec @@ -2,17 +2,15 @@ Vendor: Microsoft Corporation Distribution: Azure Linux # This package depends on automagic byte compilation # https://fedoraproject.org/wiki/Changes/No_more_automagic_Python_bytecompilation_phase_2 -%global _python_bytecompile_extra 1 Name: ibus-table -Version: 1.12.4 -Release: 4%{?dist} +Version: 1.17.16 +Release: 1%{?dist} Summary: The Table engine for IBus platform -License: LGPLv2+ +License: LGPL-2.1-or-later URL: https://github.com/mike-fabian/ibus-table -Source0: https://github.com/mike-fabian/ibus-table/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz +Source0: https://github.com/mike-fabian/ibus-table/releases/download/%{version}/%{name}-%{version}.tar.gz Requires: ibus > 1.3.0 -Requires: python(abi) >= 3.3 %{?__python3:Requires: %{__python3}} BuildRequires: gcc BuildRequires: ibus-devel > 1.3.0 @@ -65,17 +63,19 @@ The %{name}-tests package contains tests that can be used to verify the functionality of the installed %{name} package. %prep -%setup -q +%autosetup %build export PYTHON=%{__python3} %configure --disable-static --disable-additional --enable-installed-tests -%__make %{?_smp_mflags} +%make_build %install %__rm -rf $RPM_BUILD_ROOT export PYTHON=%{__python3} -%__make DESTDIR=${RPM_BUILD_ROOT} NO_INDEX=true install pkgconfigdir=%{_datadir}/pkgconfig +%make_install DESTDIR=${RPM_BUILD_ROOT} NO_INDEX=true pkgconfigdir=%{_datadir}/pkgconfig +%py_byte_compile %{python3} %{buildroot}/usr/share/ibus-table/engine +%py_byte_compile %{python3} %{buildroot}/usr/share/ibus-table/setup %find_lang %{name} @@ -137,16 +137,25 @@ export PYTHON=%{__python3} %files -f %{name}.lang -%doc AUTHORS COPYING README +%doc AUTHORS README %{_datadir}/%{name} -%{_datadir}/metainfo/*.appdata.xml +%{_datadir}/metainfo/*.metainfo.xml %{_datadir}/ibus/component/table.xml +%{_datadir}/icons/hicolor/16x16/apps/ibus-table.png +%{_datadir}/icons/hicolor/22x22/apps/ibus-table.png +%{_datadir}/icons/hicolor/32x32/apps/ibus-table.png +%{_datadir}/icons/hicolor/48x48/apps/ibus-table.png +%{_datadir}/icons/hicolor/64x64/apps/ibus-table.png +%{_datadir}/icons/hicolor/128x128/apps/ibus-table.png +%{_datadir}/icons/hicolor/256x256/apps/ibus-table.png +%{_datadir}/icons/hicolor/scalable/apps/ibus-table.svg %{_datadir}/applications/ibus-setup-table.desktop %{_datadir}/glib-2.0/schemas/org.freedesktop.ibus.engine.table.gschema.xml %{_bindir}/%{name}-createdb %{_libexecdir}/ibus-engine-table %{_libexecdir}/ibus-setup-table %{_mandir}/man1/* +%license COPYING %files devel %{_datadir}/pkgconfig/%{name}.pc @@ -158,6 +167,10 @@ export PYTHON=%{__python3} %{_datadir}/installed-tests/%{name} %changelog +* Tue Dec 16 2025 Aditya Singh - 1.17.16-1 +- Upgrade to version 1.17.16. +- License verified. + * Fri Sep 01 2023 Pawel Winogrodzki - 1.12.4-4 - Disabling test dependencies due to build failures. diff --git a/SPECS-EXTENDED/ibus/ibus.spec b/SPECS-EXTENDED/ibus/ibus.spec index c927afd4c32..c3b0384f3ff 100644 --- a/SPECS-EXTENDED/ibus/ibus.spec +++ b/SPECS-EXTENDED/ibus/ibus.spec @@ -41,7 +41,7 @@ Distribution: Azure Linux Name: ibus Version: 1.5.31 # https://github.com/fedora-infra/rpmautospec/issues/101 -Release: 1%{?dist} +Release: 2%{?dist} Summary: Intelligent Input Bus for Linux OS License: LGPL-2.1-or-later URL: https://github.com/ibus/%name/wiki @@ -414,7 +414,8 @@ dconf update || : %files -f %{name}10.lang # FIXME: no version number -%doc AUTHORS COPYING README +%doc AUTHORS README +%license COPYING %dir %{_datadir}/ibus/ %{_bindir}/ibus %{_bindir}/ibus-daemon @@ -536,6 +537,10 @@ dconf update || : %{_datadir}/installed-tests/ibus %changelog +* Thu Dec 18 2025 Aditya Singh - 1.5.31-2 +- Addressed license warning issue. +- License verified. + * Thu Mar 13 2025 Sumit Jena - 1.5.31-1 - Update to version 1.5.31 - License verified diff --git a/SPECS-EXTENDED/javacc/javacc.spec b/SPECS-EXTENDED/javacc/javacc.spec index 262a360455f..629cc51e2e1 100644 --- a/SPECS-EXTENDED/javacc/javacc.spec +++ b/SPECS-EXTENDED/javacc/javacc.spec @@ -21,7 +21,7 @@ Distribution: Azure Linux Summary: A Parser and Scanner Generator for Java Name: javacc Version: 7.0.4 -Release: 3%{?dist} +Release: 4%{?dist} License: BSD-3-Clause Group: Development/Libraries/Java URL: http://javacc.org @@ -84,7 +84,7 @@ find ./examples -type f -exec sed -i 's/\r//' {} \; %build %{ant} \ - -Dant.build.javac.source=1.6 -Dant.build.javac.target=1.6 \ + -Dant.build.javac.source=1.8 -Dant.build.javac.target=1.8 \ jar javadoc %install @@ -99,12 +99,31 @@ install -pm 0644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}.pom # javadoc install -dm 0755 %{buildroot}%{_javadocdir}/%{name} cp -pr target/javadoc/* %{buildroot}%{_javadocdir}/%{name}/ + +# Move license-like files out of javadoc tree to avoid license warnings +legaldir=%{buildroot}%{_javadocdir}/%{name}/legal + +if [ -d "$legaldir" ]; then + # install renamed copies into licensedir + install -Dm 0644 $legaldir/LICENSE \ + %{buildroot}%{_licensedir}/javacc/LICENSE.javadoc + + install -Dm 0644 $legaldir/ADDITIONAL_LICENSE_INFO \ + %{buildroot}%{_licensedir}/javacc/ADDITIONAL_LICENSE_INFO.javadoc + + # remove the originals to avoid confusion + rm -rf $legaldir +fi + +# Remove all javadoc legal directories afterward +find %{buildroot}%{_javadocdir}/%{name} -type d -name legal + %fdupes -s %{buildroot}%{_javadocdir} %fdupes -s www %fdupes -s examples %jpackage_script javacc '' '' javacc javacc true -ln -s %{_bindir}/javacc %{buildroot}%{_bindir}/javacc.sh +ln -s javacc %{buildroot}%{_bindir}/javacc.sh %jpackage_script jjdoc '' '' javacc jjdoc true %jpackage_script jjtree '' '' javacc jjtree true @@ -124,9 +143,15 @@ ln -s %{_bindir}/javacc %{buildroot}%{_bindir}/javacc.sh %files javadoc %license LICENSE -%{_javadocdir}/%{name} +%doc %{_javadocdir}/%{name} +%license %{_licensedir}/javacc/* %changelog +* Wed Dec 17 2025 Aninda Pradhan - 7.0.4-4 +- Updated javac to use 1.8 to resolve build issues. +- Fixed license path warnings +- License verified + * Sat Jul 24 2021 Pawel Winogrodzki - 7.0.4-3 - Splitting as separate 'javacc' package with a build-time dependency on 'javacc-bootstrap'. - Switching to using single digit 'Release' tags. diff --git a/SPECS-EXTENDED/jbcrypt/jbcrypt-1.0.2.pom b/SPECS-EXTENDED/jbcrypt/jbcrypt-1.0.2.pom new file mode 100644 index 00000000000..dcc1cf2c307 --- /dev/null +++ b/SPECS-EXTENDED/jbcrypt/jbcrypt-1.0.2.pom @@ -0,0 +1,33 @@ + + + + + + + + 4.0.0 + org.connectbot + jbcrypt + 1.0.2 + jBCrypt + A fork of jBCrypt with more modern OpenBSD algorithms. + https://github.com/kruton/jbcrypt + + + ISC + https://www.isc.org/downloads/software-support-policy/isc-license/ + + + + + Kenny Root + kenny@the-b.org + + + + https://github.com/kruton/jbcrypt.git + https://github.com/kruton/jbcrypt.git + https://github.com/kruton/jbcrypt + + diff --git a/SPECS-EXTENDED/jbcrypt/jbcrypt-build.xml b/SPECS-EXTENDED/jbcrypt/jbcrypt-build.xml new file mode 100644 index 00000000000..175f45b06d1 --- /dev/null +++ b/SPECS-EXTENDED/jbcrypt/jbcrypt-build.xml @@ -0,0 +1,196 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + =================================== WARNING =================================== + JUnit is not present in the test classpath or your $ANT_HOME/lib directory. Tests not executed. + =============================================================================== + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/jbcrypt/jbcrypt.signatures.json b/SPECS-EXTENDED/jbcrypt/jbcrypt.signatures.json new file mode 100644 index 00000000000..4b2d3d8c63d --- /dev/null +++ b/SPECS-EXTENDED/jbcrypt/jbcrypt.signatures.json @@ -0,0 +1,7 @@ +{ + "Signatures": { + "jbcrypt-1.0.2.pom": "c94024d7410ed6a1894e3f8e26df16c2d3f6931e89c6b3f17a9028ab4dba092b", + "jbcrypt-1.0.2.tar.gz": "0a18110f070f53e2f9ed1609e1a01a1d2042bbe87cba909a381f2f5a055a1142", + "jbcrypt-build.xml": "e85b12dde9acea14d76d09db813f90fde150c1f312e772ddffdb6d66cd51b5ef" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/jbcrypt/jbcrypt.spec b/SPECS-EXTENDED/jbcrypt/jbcrypt.spec new file mode 100644 index 00000000000..126fcc62c99 --- /dev/null +++ b/SPECS-EXTENDED/jbcrypt/jbcrypt.spec @@ -0,0 +1,91 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux +# +# spec file for package jbcrypt +# +# Copyright (c) 2024 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +Name: jbcrypt +Version: 1.0.2 +Release: 1%{?dist} +Summary: An implementation the OpenBSD Blowfish password hashing algorithm +License: ISC +Group: Development/Libraries/Java +URL: https://github.com/kruton/%{name} +Source0: https://github.com/kruton/%{name}/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz +Source1: %{name}-build.xml +Source2: https://repo1.maven.org/maven2/org/connectbot/%{name}/%{version}/%{name}-%{version}.pom +BuildRequires: ant +BuildRequires: fdupes +BuildRequires: java-devel >= 1.8 +BuildRequires: javapackages-local-bootstrap >= 6 +BuildRequires: javapackages-tools +BuildArch: noarch + +%description +jBCrypt is an implementation the OpenBSD Blowfish password hashing +algorithm. + +This system hashes passwords using a version of Bruce Schneier's +Blowfish block cipher with modifications designed to raise the cost of +off-line password cracking. The computation cost of the algorithm is +parameterised, so it can be increased as computers get faster. + +%package javadoc +Summary: Javadoc for %{name} +Group: Development/Libraries/Java + +%description javadoc +This package contains API documentation for %{name}. + +%prep +%setup -q +cp %{SOURCE1} build.xml + +%build +%{ant} -Dtest.skip=true package javadoc + +%install +install -dm 0755 %{buildroot}%{_javadir} +install -pm 0644 target/%{name}-%{version}.jar %{buildroot}%{_javadir}/%{name}.jar + +install -dm 0755 %{buildroot}%{_mavenpomdir} +install -pm 0644 %{SOURCE2} %{buildroot}%{_mavenpomdir}/%{name}.pom +%add_maven_depmap %{name}.pom %{name}.jar -a org.connectbot.jbcrypt:jbcrypt + +install -dm 0755 %{buildroot}%{_javadocdir} +cp -r target/site/apidocs %{buildroot}%{_javadocdir}/%{name} +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . +%fdupes -s %{buildroot}%{_javadocdir} + +%files -f .mfiles +%license LICENSE +%doc README + + +%files javadoc +%license LICENSE ADDITIONAL_LICENSE_INFO +%{_javadocdir}/%{name} + + +%changelog +* Tue Dec 16 2025 BinduSri Adabala - 1.0.2-1 +- Initial CBL-Mariner import from openSUSE Tumbleweed (license: same as "License" tag). +- License verified + +* Tue Mar 12 2024 Fridrich Strba +- Initial packaging of version 1.0.2 diff --git a/SPECS-EXTENDED/jtidy/build.xml b/SPECS-EXTENDED/jtidy/build.xml deleted file mode 100644 index 8f4a88f7d15..00000000000 --- a/SPECS-EXTENDED/jtidy/build.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/SPECS-EXTENDED/jtidy/jtidy-build.xml b/SPECS-EXTENDED/jtidy/jtidy-build.xml new file mode 100644 index 00000000000..954b0d8b607 --- /dev/null +++ b/SPECS-EXTENDED/jtidy/jtidy-build.xml @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/jtidy/jtidy-rpmlintrc b/SPECS-EXTENDED/jtidy/jtidy-rpmlintrc new file mode 100644 index 00000000000..86d44267df4 --- /dev/null +++ b/SPECS-EXTENDED/jtidy/jtidy-rpmlintrc @@ -0,0 +1 @@ +addFilter("invalid-license") diff --git a/SPECS-EXTENDED/jtidy/jtidy.jtidy.script b/SPECS-EXTENDED/jtidy/jtidy.jtidy.script deleted file mode 100644 index b6939917121..00000000000 --- a/SPECS-EXTENDED/jtidy/jtidy.jtidy.script +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# -# JTidy script -# JPackage Project -# $Id: jtidy.jtidy.script,v 1.1 2002/08/30 20:27:06 scop Exp $ - -# Source functions library -if [ -f /usr/share/java-utils/java-functions ] ; then - . /usr/share/java-utils/java-functions -else - echo "Can't find functions library, aborting" - exit 1 -fi - -# Configuration -MAIN_CLASS=org.w3c.tidy.Tidy -BASE_JARS="jtidy xerces-j2 xml-commons-apis" - -# Set parameters -set_jvm -set_classpath $BASE_JARS -set_flags $BASE_FLAGS -set_options $BASE_OPTIONS - -# Let's start -run "$@" diff --git a/SPECS-EXTENDED/jtidy/jtidy.signatures.json b/SPECS-EXTENDED/jtidy/jtidy.signatures.json index bdf17f0ab22..230b2601698 100644 --- a/SPECS-EXTENDED/jtidy/jtidy.signatures.json +++ b/SPECS-EXTENDED/jtidy/jtidy.signatures.json @@ -1,9 +1,7 @@ { "Signatures": { - "build.xml": "3d0dc374b73697277f33da5332298980a3880bd49d95172507f42f10e8ea4942", - "jtidy-r813.tar.bz2": "ef3a8d5094281ce419aaabd16474971e8b656c9056dac5d4eedb608fb44d9712", - "jtidy.jtidy.script": "e0ac56a85eb11c20a2b3abd27072b84b5da4af108852b341840d2ff8e0e49fed", - "maven-build.properties": "aa55c81591c1379770ec96bdac30da5ff966925ccfa392117b5c236c913894ab", - "maven-build.xml": "6d2be5301ac3489ee1c2e18244a3a1421378fdc2261db3f2082de840d2c1006e" + "jtidy-1.0.4.tar.gz": "1df3eca7077a1290f1e9b633422a3d8ac4f2fc5a0f487a129a193d58b039ea45", + "jtidy-build.xml": "f933286ec074603c3f748bd79f6c874feb0880b12067ba88c1d9f6fc27d0d28a", + "jtidy-rpmlintrc": "ee1003144292574a34c0f5cddf05a52ed1a2b27489b5ab550cc393d9cb7d9449" } } diff --git a/SPECS-EXTENDED/jtidy/jtidy.spec b/SPECS-EXTENDED/jtidy/jtidy.spec index 7f0d61ca2a6..09bd3210688 100644 --- a/SPECS-EXTENDED/jtidy/jtidy.spec +++ b/SPECS-EXTENDED/jtidy/jtidy.spec @@ -1,53 +1,24 @@ Vendor: Microsoft Corporation Distribution: Azure Linux -# -# spec file for package jtidy -# -# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany. -# -# All modifications and additions to the file contributed by third parties -# remain the property of their copyright owners, unless otherwise agreed -# upon. The license for this file, and modifications and additions to the -# file, is the same license as for the pristine package itself (unless the -# license for the pristine package is not an Open Source License, in which -# case the license is the MIT License). An "Open Source License" is a -# license that conforms to the Open Source Definition (Version 1.9) -# published by the Open Source Initiative. - -# Please submit bugfixes or comments via https://bugs.opensuse.org/ -# - - -%define with() %{expand:%%{?with_%{1}:1}%%{!?with_%{1}:0}} -%define without() %{expand:%%{?with_%{1}:0}%%{!?with_%{1}:1}} -%define bcond_with() %{expand:%%{?_with_%{1}:%%global with_%{1} 1}} -%define bcond_without() %{expand:%%{!?_without_%{1}:%%global with_%{1} 1}} -%define _without_maven 1 -%define section free -%bcond_with maven Name: jtidy -Version: 8.0 -Release: 32%{?dist} +VCS: https://src.suse.de/pool/jtidy?trackingbranch=slfo-1.2#0765e5b9560995812fcac0bb88fd0e409490c8343acc52ef06cfa2af91680a0d +Version: 1.0.4 +Release: 1%{?dist} Summary: HTML syntax checker and pretty printer -License: BSD +License: HTMLTIDY Group: Development/Libraries/Java -URL: http://jtidy.sourceforge.net/ -# svn export -r813 http://svn.sourceforge.net/svnroot/jtidy/trunk/jtidy/ jtidy -# # bnc#501764 -# rm jtidy/src/config/clover.license -Source0: %{_distro_sources_url}/jtidy-r813.tar.bz2 -Source1: %{name}.jtidy.script -Source2: build.xml -Source3: maven-build.properties -Source4: maven-build.xml +URL: https://github.com/jtidy/jtidy +Source0: https://github.com/jtidy/jtidy/archive/refs/tags/jtidy-1.0.4.tar.gz +Source1: %{name}-build.xml +Source100: %{name}-rpmlintrc BuildRequires: ant >= 1.6 BuildRequires: ant-junit BuildRequires: fdupes BuildRequires: javapackages-local-bootstrap BuildRequires: xerces-j2 -BuildRequires: xml-commons-apis +BuildRequires: xml-apis Requires: xerces-j2 -Requires: xml-commons-apis +Requires: xml-apis BuildArch: noarch %description @@ -79,34 +50,19 @@ cleaning up malformed and faulty HTML. In addition, JTidy provides a DOM parser for real-world HTML. %prep -%setup -q -n %{name} -cp -p %{SOURCE2} %{SOURCE3} %{SOURCE4} . - -sed -i 's/charset="ISO-8859-1"/charset="UTF-8"/' maven-build.xml +%setup -q -n %{name}-%{name}-%{version} +cp -p %{SOURCE1} build.xml %build -export MAVEN_REPO_LOCAL=$(pwd)/.m2/repository -mkdir -p $MAVEN_REPO_LOCAL -if [ `uname -m` = "ppc64" -o `uname -m` = "ppc64le" ];then -export ANT_OPTS="-Xss2m" -else -export ANT_OPTS="-Xss1m" -fi -export CLASSPATH=$(build-classpath junit slf4j xerces-j2 xml-commons-jaxp-1.3-apis):`pwd`/target/classes:`pwd`/target/test-classes -export OPT_JAR_LIST="junit ant/ant-junit" +mkdir -p lib +build-jar-repository -s lib xerces-j2 xml-apis %{ant} \ - -Dbuild.sysclasspath=only \ - -Dmaven.mode.offline=true \ - -Dmaven.repo.local=$MAVEN_REPO_LOCAL \ - -Dmaven.test.skip=true \ - -Dmaven.test.error.ignore=true \ package javadoc %install - # jar install -d -m 0755 %{buildroot}%{_javadir} -install -m 644 target/jtidy-8.0-SNAPSHOT.jar %{buildroot}%{_javadir}/%{name}.jar +install -m 644 target/%{name}-%{version}.jar %{buildroot}%{_javadir}/%{name}.jar # pom install -d -m 755 %{buildroot}%{_mavenpomdir} @@ -117,28 +73,26 @@ install -pm 644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}.pom install -d -m 0755 %{buildroot}%{_javadocdir}/%{name} cp -aL target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name} %fdupes -s %{buildroot}%{_javadocdir}/%{name} +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . # shell script -install -d -m 0755 %{buildroot}%{_bindir} -install -p -m 0755 %{SOURCE1} %{buildroot}%{_bindir}/%{name} +%jpackage_script org.w3c.tidy.Tidy "" "" %{name}:xerces-j2:xml-apis %{name} true # ant.d install -d -m 0755 %{buildroot}%{_sysconfdir}/ant.d cat > %{buildroot}%{_sysconfdir}/ant.d/%{name} << EOF -jtidy xerces-j2 xml-commons-jaxp-1.3-apis +jtidy xerces-j2 xml-apis EOF %files -%defattr(0644,root,root,0755) %license LICENSE.txt +%license LICENSE +%license ADDITIONAL_LICENSE_INFO %{_javadir}/%{name}.jar -%{_mavenpomdir}/* -%if %{defined _maven_repository} -%{_mavendepmapfragdir}/%{name} -%else -%{_datadir}/maven-metadata/%{name}.xml* -%endif %config(noreplace) %{_sysconfdir}/ant.d/%{name} +%exclude /usr/share/maven-poms/%{name}.pom +%exclude /usr/share/maven-metadata/%{name}.xml %files javadoc %defattr(0644,root,root,0755) @@ -149,37 +103,6 @@ EOF %{_bindir}/* %changelog -* Thu Feb 22 2024 Pawel Winogrodzki - 8.0-32 -- Updating naming for 3.0 version of Azure Linux. - -* Fri Apr 15 2022 Pawel Winogrodzki - 8.0-31 -- Updating source URL. -- License verified. - -* Thu Oct 14 2021 Pawel Winogrodzki - 8.0-30 -- Converting the 'Release' tag to the '[number].[distribution]' format. - -* Mon Nov 16 2020 Ruying Chen - 8.0-29.7 -- Initial CBL-Mariner import from openSUSE Tumbleweed (license: same as "License" tag). -- Use javapackages-local-bootstrap to avoid build cycle. - -* Mon Oct 29 2018 Fridrich Strba -- Fix javadoc build -- Package maven artifact -* Fri Sep 8 2017 fstrba@suse.com -- Modified file: - * maven-build.xml - + Specify java source and target level 1.6 in order to allow - building with jdk9 -* Thu Dec 5 2013 dvaleev@suse.com -- increase stack size for ppc64le -* Mon Sep 9 2013 tchvatal@suse.com -- Move from jpackage-utils to javapackage-tools -* Wed Dec 12 2012 dvaleev@suse.com -- increase stack size for ppc64 -* Fri Jun 15 2012 mvyskocil@suse.cz -- disable javadoc (workaround for jdk7 build) -* Wed May 20 2009 mvyskocil@suse.cz -- 'fixed bnc#501764: removed clover.license from source tarball' -* Thu May 7 2009 mvyskocil@suse.cz -- Initial packaging of 8.0 in SUSE (from jpp 5.0) +* Fri Nov 21 2025 Akarsh Chaudhary - 1.0.4-1 +- Initial Azure Linux import from openSUSE Tumbleweed (license: same as "License" tag). +- License verified diff --git a/SPECS-EXTENDED/jtidy/maven-build.properties b/SPECS-EXTENDED/jtidy/maven-build.properties deleted file mode 100644 index 508181a3b50..00000000000 --- a/SPECS-EXTENDED/jtidy/maven-build.properties +++ /dev/null @@ -1,17 +0,0 @@ -#Generated by Maven Ant Plugin - DO NOT EDIT THIS FILE! -#Thu Oct 02 12:28:05 EDT 2008 -project.build.outputDirectory=${maven.build.outputDir} -project.build.directory=${maven.build.dir} -maven.test.reports=${maven.build.dir}/test-reports -maven.build.finalName=jtidy-8.0-SNAPSHOT -maven.reporting.outputDirectory=${maven.build.dir}/site -maven.build.testResourceDir.0=src/test/resources -maven.build.outputDir=${maven.build.dir}/classes -maven.build.resourceDir.0=src/main/resources -maven.build.testOutputDir=${maven.build.dir}/test-classes -maven.repo.local=${user.home}/.m2/repository -maven.settings.offline=false -maven.build.dir=target -maven.settings.interactiveMode=true -maven.build.testDir.0=src/test/java -maven.build.srcDir.0=src/main/java diff --git a/SPECS-EXTENDED/jtidy/maven-build.xml b/SPECS-EXTENDED/jtidy/maven-build.xml deleted file mode 100644 index 1cb188bdfe5..00000000000 --- a/SPECS-EXTENDED/jtidy/maven-build.xml +++ /dev/null @@ -1,322 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - =================================== WARNING =================================== - JUnit is not present in your $ANT_HOME/lib directory. Tests not executed. - =============================================================================== - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/SPECS-EXTENDED/kernel-ipe/config b/SPECS-EXTENDED/kernel-ipe/config index ac2579594bf..036939da340 100644 --- a/SPECS-EXTENDED/kernel-ipe/config +++ b/SPECS-EXTENDED/kernel-ipe/config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 6.6.117.1 Kernel Configuration +# Linux/x86_64 6.6.121.1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.0" CONFIG_CC_IS_GCC=y @@ -1170,7 +1170,7 @@ CONFIG_XFRM_OFFLOAD=y CONFIG_XFRM_ALGO=m CONFIG_XFRM_USER=m # CONFIG_XFRM_USER_COMPAT is not set -# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y @@ -1220,7 +1220,7 @@ CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m # CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -2062,7 +2062,8 @@ CONFIG_DMIID=y CONFIG_DMI_SYSFS=m CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y # CONFIG_ISCSI_IBFT is not set -# CONFIG_FW_CFG_SYSFS is not set +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set CONFIG_SYSFB=y # CONFIG_SYSFB_SIMPLEFB is not set # CONFIG_GOOGLE_FIRMWARE is not set @@ -7169,7 +7170,7 @@ CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set +CONFIG_SQUASHFS_ZSTD=y # CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set # CONFIG_SQUASHFS_EMBEDDED is not set CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 diff --git a/SPECS-EXTENDED/kernel-ipe/config_aarch64 b/SPECS-EXTENDED/kernel-ipe/config_aarch64 index 57521d161aa..600b9693681 100644 --- a/SPECS-EXTENDED/kernel-ipe/config_aarch64 +++ b/SPECS-EXTENDED/kernel-ipe/config_aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.117.1 Kernel Configuration +# Linux/arm64 6.6.121.1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.0" CONFIG_CC_IS_GCC=y @@ -1231,7 +1231,7 @@ CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m # CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -4715,6 +4715,7 @@ CONFIG_SPI_SUN6I=m CONFIG_SPI_SYNQUACER=m CONFIG_SPI_MXIC=m # CONFIG_SPI_TEGRA210_QUAD is not set +# CONFIG_SPI_TEGRA114 is not set # CONFIG_SPI_TEGRA20_SFLASH is not set CONFIG_SPI_THUNDERX=m CONFIG_SPI_XCOMM=m @@ -8523,7 +8524,6 @@ CONFIG_STAGING=y # CONFIG_RTL8192U is not set # CONFIG_RTLLIB is not set # CONFIG_RTL8723BS is not set -# CONFIG_R8712U is not set # CONFIG_RTS5208 is not set # CONFIG_VT6655 is not set # CONFIG_VT6656 is not set diff --git a/SPECS-EXTENDED/kernel-ipe/kernel-ipe.signatures.json b/SPECS-EXTENDED/kernel-ipe/kernel-ipe.signatures.json index b136a0efd60..bba710e66c5 100644 --- a/SPECS-EXTENDED/kernel-ipe/kernel-ipe.signatures.json +++ b/SPECS-EXTENDED/kernel-ipe/kernel-ipe.signatures.json @@ -1,14 +1,14 @@ { "Signatures": { "azurelinux-ca-20230216.pem": "d545401163c75878319f01470455e6bc18a5968e39dd964323225e3fe308849b", - "config": "f9b4a11f5f16da83111766e1af913b77103a6dc872b848bb25d41a5be68cb032", - "config_aarch64": "d4207f14d92b0b873856ed4d26ee7c340ed8fe77e0beea9e5047d632c48189b3", + "config": "45568c4b391b581400145626bd7ca1712028bfcef6b1f3ab4691c27786a91c3a", + "config_aarch64": "77ba2d0761f07f9d1182fd3ab469106e99c38e839665e5af699379a1a204a844", "cpupower": "d7518767bf2b1110d146a49c7d42e76b803f45eb8bd14d931aa6d0d346fae985", "cpupower.service": "b057fe9e5d0e8c36f485818286b80e3eba8ff66ff44797940e99b1fd5361bb98", "sha512hmac-openssl.sh": "02ab91329c4be09ee66d759e4d23ac875037c3b56e5a598e32fd1206da06a27f", "azl-ipe-boot-policy.pol": "f2b7941bd3b721aadc8e937d0472c36fe5e140221f7bb54af6ef905884e0372c", "Makefile": "1c2e740407215ed9b9cbbc09f9102bc99c08b370bbe2cbb0490aefdc9eb70455", "tarfs.c": "066084e1ca2c1e7ba83e76a6696cf17928e7efb46a2b1670a7a1f597c2d9bc51", - "kernel-6.6.117.1.tar.gz": "bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3" + "kernel-6.6.121.1.tar.gz": "aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12" } } diff --git a/SPECS-EXTENDED/kernel-ipe/kernel-ipe.spec b/SPECS-EXTENDED/kernel-ipe/kernel-ipe.spec index 7a527d3cd7f..ac871c7905c 100644 --- a/SPECS-EXTENDED/kernel-ipe/kernel-ipe.spec +++ b/SPECS-EXTENDED/kernel-ipe/kernel-ipe.spec @@ -32,7 +32,7 @@ Summary: Linux Kernel Name: kernel-ipe -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -460,6 +460,21 @@ echo "initrd of kernel %{uname_r} removed" >&2 %{_sysconfdir}/bash_completion.d/bpftool %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Enable CONFIG_FW_CFG_SYSFS and CONFIG_SQUASHFS_ZSTD + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Enable XFRM_INTERFACE + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Enable CONFIG_INET_DIAG_DESTROY + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS-EXTENDED/lasso/lasso.signatures.json b/SPECS-EXTENDED/lasso/lasso.signatures.json index a71c8f6e6e6..76a29baebcc 100644 --- a/SPECS-EXTENDED/lasso/lasso.signatures.json +++ b/SPECS-EXTENDED/lasso/lasso.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "lasso-2.8.0.tar.gz": "ffcbd5851d98586c7e1caf43bad66164211a3b61d12bf860a0598448ff9f2b38" + "lasso-2.9.0.tar.gz": "225f664fe602b73bb5eaf5c4b60a6b07eb2d936d0dd0cafb222524c801d98e5d" } } diff --git a/SPECS-EXTENDED/lasso/lasso.spec b/SPECS-EXTENDED/lasso/lasso.spec index 3bd2b175648..4a5c7f6b45b 100644 --- a/SPECS-EXTENDED/lasso/lasso.spec +++ b/SPECS-EXTENDED/lasso/lasso.spec @@ -3,20 +3,26 @@ Distribution: Azure Linux %global with_java 0 %global with_php 0 %global with_perl 1 +%global with_python3 1 %global with_wsf 0 -%global obsolete_old_lang_subpackages 0 +%global default_sign_algo "rsa-sha1" +%global min_hash_algo "sha1" %if %{with_php} -%if "%{php_version}" < "5.6" -%global ini_name %{name}.ini -%else %global ini_name 40-%{name}.ini %endif -%endif %global configure_args %{nil} %global configure_args %{configure_args} +%if %{default_sign_algo} + %global configure_args %{configure_args} --with-default-sign-algo=%{default_sign_algo} +%endif + +%if %{min_hash_algo} + %global configure_args %{configure_args} --with-min-hash-algo=%{min_hash_algo} +%endif + %if !%{with_java} %global configure_args %{configure_args} --disable-java %endif @@ -26,66 +32,56 @@ Distribution: Azure Linux %endif %if %{with_php} - %global configure_args %{configure_args} --enable-php5=yes --with-php5-config-dir=%{php_inidir} + %global configure_args %{configure_args} --enable-php5=no --enable-php7=yes --with-php7-config-dir=%{php_inidir} %else - %global configure_args %{configure_args} --enable-php5=no + %global configure_args %{configure_args} --enable-php5=no --enable-php7=no %endif %if %{with_wsf} %global configure_args %{configure_args} --enable-wsf --with-sasl2=%{_prefix}/sasl2 %endif +%if !%{with_python3} + %global configure_args %{configure_args} --disable-python +%endif + Summary: Liberty Alliance Single Sign On Name: lasso -Version: 2.8.0 +Version: 2.9.0 Release: 1%{?dist} -License: GPLv2+ -URL: http://lasso.entrouvert.org/ -Source: http://dev.entrouvert.org/lasso/lasso-%{version}.tar.gz +License: GPL-2.0-or-later +URL: https://lasso.entrouvert.org/ +Source0: https://git.entrouvert.org/entrouvert/lasso/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz BuildRequires: autoconf BuildRequires: automake BuildRequires: check-devel +BuildRequires: gcc BuildRequires: glib2-devel BuildRequires: gtk-doc BuildRequires: libtool BuildRequires: libtool-ltdl-devel BuildRequires: libxml2-devel -BuildRequires: libxslt-devel +BuildRequires: make BuildRequires: openssl-devel -BuildRequires: swig -BuildRequires: xmlsec1-devel >= 1.2.25-4 -BuildRequires: xmlsec1-openssl-devel >= 1.2.25-4 -BuildRequires: zlib-devel -%if %{with_java} -BuildRequires: java-devel -BuildRequires: jpackage-utils -%endif -%if %{with_perl} -BuildRequires: perl-devel -BuildRequires: perl-generators -BuildRequires: perl(Error) -BuildRequires: perl(ExtUtils::MakeMaker) -BuildRequires: perl(strict) -BuildRequires: perl(Test::More) -BuildRequires: perl(warnings) -BuildRequires: perl(XSLoader) -%endif -%if %{with_php} -BuildRequires: expat-devel -BuildRequires: php-devel -%endif -# The Lasso build system requires python, especially the binding generators BuildRequires: python3 -BuildRequires: python3-devel -BuildRequires: python3-lxml BuildRequires: python3-six +BuildRequires: (python3-setuptools if python3 >= 3.12) +BuildRequires: swig +BuildRequires: xmlsec1-devel +BuildRequires: xmlsec1-openssl-devel +BuildRequires: zlib-devel %if %{with_wsf} BuildRequires: cyrus-sasl-devel %endif -Requires: xmlsec1 >= 1.2.25-4 +Requires: xmlsec1 + +# lasso upstream no longer supports java bindings +# see https://dev.entrouvert.org/issues/45876#change-289747 +# and https://dev.entrouvert.org/issues/51418 +Obsoletes: java-lasso < %{version}-%{release} %description Lasso is a library that implements the Liberty Alliance Single Sign On @@ -104,7 +100,15 @@ documentation for Lasso. %if %{with_perl} %package -n perl-%{name} Summary: Liberty Alliance Single Sign On (lasso) Perl bindings -Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) +BuildRequires: perl-devel +BuildRequires: perl-generators +BuildRequires: perl-interpreter +BuildRequires: perl(Error) +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: perl(strict) +BuildRequires: perl(Test::More) +BuildRequires: perl(warnings) +BuildRequires: perl(XSLoader) Requires: %{name}%{?_isa} = %{version}-%{release} %description -n perl-%{name} @@ -114,14 +118,11 @@ Perl language bindings for the lasso (Liberty Alliance Single Sign On) library. %if %{with_java} %package -n java-%{name} Summary: Liberty Alliance Single Sign On (lasso) Java bindings -Requires: java +Buildrequires: java-1.8.0-openjdk-devel +BuildRequires: jpackage-utils +Requires: java-headless Requires: jpackage-utils Requires: %{name}%{?_isa} = %{version}-%{release} -%if %{obsolete_old_lang_subpackages} -Provides: %{name}-java = %{version}-%{release} -Provides: %{name}-java%{?_isa} = %{version}-%{release} -Obsoletes: %{name}-java < %{version}-%{release} -%endif %description -n java-%{name} Java language bindings for the lasso (Liberty Alliance Single Sign On) library. @@ -130,6 +131,8 @@ Java language bindings for the lasso (Liberty Alliance Single Sign On) library. %if %{with_php} %package -n php-%{name} Summary: Liberty Alliance Single Sign On (lasso) PHP bindings +BuildRequires: expat-devel +BuildRequires: php-devel Requires: %{name}%{?_isa} = %{version}-%{release} Requires: php(zend-abi) = %{php_zend_api} Requires: php(api) = %{php_core_api} @@ -139,51 +142,52 @@ PHP language bindings for the lasso (Liberty Alliance Single Sign On) library. %endif + +%if %{with_python3} %package -n python3-%{name} %{?python_provide:%python_provide python3-%{name}} Summary: Liberty Alliance Single Sign On (lasso) Python bindings +BuildRequires: python3-devel +BuildRequires: python3-lxml Requires: python3 Requires: %{name}%{?_isa} = %{version}-%{release} -Provides: lasso-python = %{version}-%{release} %description -n python3-%{name} Python language bindings for the lasso (Liberty Alliance Single Sign On) library. +%endif %prep -%autosetup -p1 +%autosetup -n %{name} # Remove any python script shebang lines (unless they refer to python3) sed -i -E -e '/^#![[:blank:]]*(\/usr\/bin\/env[[:blank:]]+python[^3]?\>)|(\/usr\/bin\/python[^3]?\>)/d' \ `grep -r -l -E '^#![[:blank:]]*(/usr/bin/python[^3]?)|(/usr/bin/env[[:blank:]]+python[^3]?)' *` %build -export JAVA_HOME=%{java_home} +%if 0%{?with_java} +export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk +%endif +echo %{version} > .tarball-version ./autogen.sh - -%configure %{configure_args} --with-python=%{__python3} +%if 0%{?with_python3} + %configure %{configure_args} --with-python=%{__python3} +%else + %configure %{configure_args} +%endif %make_build CFLAGS="%{optflags}" %check -make check CK_TIMEOUT_MULTIPLIER=5 +make check CK_TIMEOUT_MULTIPLIER=10 %install -#install -m 755 -d %{buildroot}%{_datadir}/gtk-doc/html - -make install exec_prefix=%{_prefix} DESTDIR=%{buildroot} +%make_install exec_prefix=%{_prefix} find %{buildroot} -type f -name '*.la' -exec rm -f {} \; find %{buildroot} -type f -name '*.a' -exec rm -f {} \; # Perl subpackage %if %{with_perl} find %{buildroot} \( -name perllocal.pod -o -name .packlist \) -exec rm -v {} \; - -find %{buildroot}/usr/lib*/perl5 -type f -print | - sed "s@^%{buildroot}@@g" > %{name}-perl-filelist -if [ "$(cat %{name}-perl-filelist)X" = "X" ] ; then - echo "ERROR: EMPTY FILE LIST" - exit -1 -fi %endif # PHP subpackage @@ -199,10 +203,9 @@ fi %endif # Remove bogus doc files -rm -fr %{buildroot}%{_defaultdocdir}/%{name} +rm -fr %{buildroot}%{_docdir}/%{name} %ldconfig_scriptlets - %files %{_libdir}/liblasso.so.3* %doc AUTHORS NEWS README @@ -214,7 +217,9 @@ rm -fr %{buildroot}%{_defaultdocdir}/%{name} %{_includedir}/%{name} %if %{with_perl} -%files -n perl-%{name} -f %{name}-perl-filelist +%files -n perl-%{name} +%{perl_vendorarch}/Lasso.pm +%{perl_vendorarch}/auto/Lasso/ %endif %if %{with_java} @@ -231,12 +236,18 @@ rm -fr %{buildroot}%{_defaultdocdir}/%{name} %{_datadir}/php/%{name}/lasso.php %endif +%if %{with_python3} %files -n python3-%{name} %{python3_sitearch}/lasso.py* %{python3_sitearch}/_lasso.so %{python3_sitearch}/__pycache__/* +%endif %changelog +* Wed Dec 24 2025 Sumit Jena - 2.9.0-1 +- Upgrade to version 2.9.0 +- License verified + * Mon Sep 12 2022 Muhammad Falak - 2.8.0-1 - Bump version to 2.8.0 - Drop un-needed patches @@ -462,3 +473,4 @@ rm -fr %{buildroot}%{_defaultdocdir}/%{name} to build on Fedora 20 - Perl bindings are disabled as they fail to build - Disable doc building as it doesn't ork correctly for now + diff --git a/SPECS-EXTENDED/libpinyin/libpinyin.signatures.json b/SPECS-EXTENDED/libpinyin/libpinyin.signatures.json index 402aefe7594..b6026bff412 100644 --- a/SPECS-EXTENDED/libpinyin/libpinyin.signatures.json +++ b/SPECS-EXTENDED/libpinyin/libpinyin.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "libpinyin-2.9.92.tar.gz": "f816515a6cb7c9a134cb5214de2a3403f03da4791a38d62be45a6fa91ee7e4d9" + "libpinyin-2.10.3.tar.gz": "3fe786ff2c2059bdbdf9d8d752db691a516a941a977521955fe0af3f0b4db299" } } diff --git a/SPECS-EXTENDED/libpinyin/libpinyin.spec b/SPECS-EXTENDED/libpinyin/libpinyin.spec index 6e083826f9f..4c3014f8d39 100644 --- a/SPECS-EXTENDED/libpinyin/libpinyin.spec +++ b/SPECS-EXTENDED/libpinyin/libpinyin.spec @@ -1,18 +1,14 @@ Vendor: Microsoft Corporation Distribution: Azure Linux -%global snapshot 0 Name: libpinyin -Version: 2.9.92 +Version: 2.10.3 Release: 1%{?dist} Summary: Library to deal with pinyin License: GPL-3.0-or-later URL: https://github.com/libpinyin/libpinyin Source0: https://downloads.sourceforge.net/libpinyin/libpinyin/%{name}-%{version}.tar.gz -%if %snapshot -Patch0: libpinyin-2.8.x-head.patch -%endif BuildRequires: gcc-c++ BuildRequires: kyotocabinet-devel, glib2-devel @@ -57,32 +53,27 @@ Requires: %{name} = %{version}-%{release} %description -n libzhuyin The libzhuyin package contains libzhuyin compatibility library. - %prep -%setup -q - -%if %snapshot -%patch -P0 -p1 -b .head -%endif +%autosetup %build %configure --disable-static \ --with-dbm=KyotoCabinet \ --enable-libzhuyin %make_build + %check make check %install -%make_install +%make_install DESTDIR=$RPM_BUILD_ROOT find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';' - %ldconfig_scriptlets - %files -%doc AUTHORS COPYING README +%doc AUTHORS README +%license COPYING %{_libdir}/libpinyin*.so.* %dir %{_libdir}/libpinyin @@ -109,6 +100,10 @@ find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';' %{_libdir}/libzhuyin*.so.* %changelog +* Mon Dec 22 2025 Aditya Singh - 2.10.3-1 +- Upgrade to version 2.10.3 +- License verified + * Mon Nov 11 2024 Sumit Jena - 2.9.92-1 - Update to version 2.9.92 - License verified diff --git a/SPECS-EXTENDED/libreport/0001-gui-wizard-gtk-Fix-segfault.patch b/SPECS-EXTENDED/libreport/0001-gui-wizard-gtk-Fix-segfault.patch deleted file mode 100644 index fdf29016586..00000000000 --- a/SPECS-EXTENDED/libreport/0001-gui-wizard-gtk-Fix-segfault.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 1a22f30187163ce288b14e55a80539353a38b7be Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Mat=C4=9Bj=20Grabovsk=C3=BD?= -Date: Tue, 29 Sep 2020 14:16:00 +0200 -Subject: [PATCH] gui-wizard-gtk: Fix segfault - -Since show_error_as_msgbox() is specified as the custom logging handler -(via setting libreport_g_custom_logger), it will get called if an error -occurs in libreport_save_user_settings(). However, at that point, -g_wnd_assistant has already been destroyed, which leads to an invalid -read in show_error_as_msgbox(). - -This change unsets the custom logging handler after the GUI is destroyed -and adds an assertion in show_error_as_msgbox() checking that -g_wnd_assistant is not a null pointer. - -Resolves https://bugzilla.redhat.com/show_bug.cgi?id=1883337 ---- - src/gui-wizard-gtk/main.c | 6 ++++-- - src/gui-wizard-gtk/wizard.c | 2 ++ - 2 files changed, 6 insertions(+), 2 deletions(-) - -diff --git a/src/gui-wizard-gtk/main.c b/src/gui-wizard-gtk/main.c -index e111948c..f094c5fb 100644 ---- a/src/gui-wizard-gtk/main.c -+++ b/src/gui-wizard-gtk/main.c -@@ -125,6 +125,7 @@ int main(int argc, char **argv) - /* List of events specified on the command line. */ - GList *user_event_list = NULL; - const char *prgname = "abrt"; -+ int ret = 0; - abrt_init(argv); - - /* I18n */ -@@ -217,13 +218,14 @@ int main(int argc, char **argv) - g_signal_connect(app, "startup", G_CALLBACK(startup_wizard), NULL); - - /* Enter main loop */ -- g_application_run(G_APPLICATION(app), argc, argv); -+ ret = g_application_run(G_APPLICATION(app), argc, argv); - g_object_unref(app); -+ libreport_g_custom_logger = NULL; - - if (opts & OPT_d) - delete_dump_dir_possibly_using_abrtd(g_dump_dir_name); - - libreport_save_user_settings(); - -- return 0; -+ return ret; - } -diff --git a/src/gui-wizard-gtk/wizard.c b/src/gui-wizard-gtk/wizard.c -index 775b709f..c4a0b4c0 100644 ---- a/src/gui-wizard-gtk/wizard.c -+++ b/src/gui-wizard-gtk/wizard.c -@@ -360,6 +360,8 @@ struct dump_dir *wizard_open_directory_for_writing(const char *dump_dir_name) - - void show_error_as_msgbox(const char *msg) - { -+ g_return_if_fail(g_wnd_assistant != NULL); -+ - GtkWidget *dialog = gtk_message_dialog_new(GTK_WINDOW(g_wnd_assistant), - GTK_DIALOG_DESTROY_WITH_PARENT, - GTK_MESSAGE_WARNING, --- -2.26.2 - diff --git a/SPECS-EXTENDED/libreport/0001-skip-unwanted-tests.patch b/SPECS-EXTENDED/libreport/0001-skip-unwanted-tests.patch new file mode 100644 index 00000000000..8be381fc86f --- /dev/null +++ b/SPECS-EXTENDED/libreport/0001-skip-unwanted-tests.patch @@ -0,0 +1,1310 @@ +From b31ffa842674babe78c66f32aac8062fc53ff397 Mon Sep 17 00:00:00 2001 +From: SumitJenaHCL +Date: Tue, 13 Jan 2026 12:07:08 +0530 +Subject: [PATCH] skip unwanted tests + +--- + tests/dump_dir.at | 620 ----------------------------------------- + tests/proc_helpers.at | 389 -------------------------- + tests/report_python.at | 37 --- + tests/ureport.at | 180 ------------ + 4 files changed, 1226 deletions(-) + +diff --git a/tests/dump_dir.at b/tests/dump_dir.at +index 748701a..8ac5400 100644 +--- a/tests/dump_dir.at ++++ b/tests/dump_dir.at +@@ -2,196 +2,6 @@ + + AT_BANNER([dump_dir]) + +-## --------- ## +-## dd_sanity ## +-## --------- ## +- +-AT_TESTFUN([dd_sanity], +-[[ +-#include "testsuite.h" +- +-void validate_dump_dir_contents(struct dump_dir *dd) +-{ +- int items = 0; +- assert(dd_exist(dd, FILENAME_TIME)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_KERNEL)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_HOSTNAME)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_ARCHITECTURE)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_OS_INFO)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_OS_RELEASE)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_OS_RELEASE)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_TYPE)); +- ++items; +- +- assert(dd_exist(dd, FILENAME_LAST_OCCURRENCE)); +- ++items; +- +- assert(dd_exist(dd, "at_test_text")); +- assert(dd_get_item_size(dd, "at_test_text") == 3); +- ++items; +- +- assert(dd_exist(dd, "at_test_binary")); +- assert(dd_get_item_size(dd, "at_test_binary") == 4); +- ++items; +- +- struct stat srv_buf; +- stat("/etc/services", &srv_buf); +- +- assert(dd_exist(dd, "at_test_services")); +- assert(dd_get_item_size(dd, "at_test_services") == srv_buf.st_size); +- ++items; +- +- struct stat grp_buf; +- stat("/etc/group", &grp_buf); +- assert(dd_exist(dd, "at_test_group")); +- assert(dd_get_item_size(dd, "at_test_group") == grp_buf.st_size); +- ++items; +- +- struct stat pwd_buf; +- stat("/etc/passwd", &pwd_buf); +- assert(dd_exist(dd, "at_test_passwd")); +- assert(dd_get_item_size(dd, "at_test_passwd") == pwd_buf.st_size); +- ++items; +- +- dd_save_text(dd, "at_test_to_delete", "deleted"); +- assert(dd_exist(dd, "at_test_to_delete")); +- dd_delete_item(dd, "at_test_to_delete"); +- assert(!dd_exist(dd, "at_test_to_delete")); +- +- DIR *d1 = dd_init_next_file(dd); +- assert(d1 != NULL); +- +- int items_counter = 0; +- char *short_name, *full_name; +- while (dd_get_next_file(dd, &short_name, &full_name)) +- { +- ++items_counter; +- +- TS_ASSERT_PTR_IS_NOT_NULL(short_name); +- TS_ASSERT_PTR_IS_NOT_NULL(full_name); +- TS_ASSERT_STRING_EQ(short_name, (strrchr(full_name, '/') + 1), NULL); +- TS_ASSERT_STRING_BEGINS_WITH(full_name, dd->dd_dirname, NULL); +- TS_ASSERT_CHAR_EQ_MESSAGE(full_name[strlen(dd->dd_dirname)], '/', full_name); +- g_free(short_name); +- g_free(full_name); +- } +- +- TS_ASSERT_SIGNED_EQ(items, items_counter); +- TS_ASSERT_SIGNED_EQ(dd_get_items_count(dd), items); +- TS_ASSERT_PTR_IS_NULL(dd->next_dir); +- TS_ASSERT_SIGNED_EQ(dd_get_next_file(dd, NULL, NULL), 0); +- +- DIR *iterator_second_run = dd_init_next_file(dd); +- TS_ASSERT_PTR_IS_NOT_NULL(iterator_second_run); +- +- while (dd_get_next_file(dd, &short_name, &full_name)) +- { +- --items_counter; +- g_free(short_name); +- g_free(full_name); +- } +- +- TS_ASSERT_SIGNED_OP_MESSAGE(items_counter, ==, 0, "Second run iterator goes through all items"); +- +- DIR *iterator_third_run = dd_init_next_file(dd); +- TS_ASSERT_PTR_IS_NOT_NULL(iterator_third_run); +- TS_ASSERT_PTR_IS_NOT_NULL(dd->next_dir); +- dd_clear_next_file(dd); +- TS_ASSERT_PTR_IS_NULL(dd->next_dir); +- TS_ASSERT_SIGNED_OP_MESSAGE(dd_get_next_file(dd, NULL, NULL), ==, 0, "dd_clear_next_file(dd) stops iteration"); +-} +- +-TS_MAIN +-{ +- char template[] = "/tmp/XXXXXX/dump_dir"; +- +- char *last_slash = strrchr(template, '/'); +- *last_slash = '\0'; +- +- if (mkdtemp(template) == NULL) { +- perror("mkdtemp()"); +- return EXIT_FAILURE; +- } +- +- *last_slash = '/'; +- +- printf("Dump dir path: %s\n", template); +- +- fprintf(stderr, "Create new dump directory\n"); +- struct dump_dir *dd = dd_create(template, (uid_t)-1, 0640); +- assert(dd != NULL || !"Cannot create new dump directory"); +- +- dd_create_basic_files(dd, geteuid(), NULL); +- dd_save_text(dd, FILENAME_TYPE, "attest"); +- +- dd_save_text(dd, "at_test_text", "foo"); +- assert(dd_exist(dd, "at_test_text")); +- +- dd_save_binary(dd, "at_test_binary", "blah", 4); +- assert(dd_exist(dd, "at_test_binary")); +- +- dd_copy_file(dd, "at_test_services", "/etc/services"); +- +- const int etc_dir_fd = open("/etc", O_DIRECTORY | O_PATH | O_CLOEXEC | O_EXCL); +- assert(etc_dir_fd >= 0); +- dd_copy_file_at(dd, "at_test_group", etc_dir_fd, "group"); +- close(etc_dir_fd); +- +- int passwd_fd = open("/etc/passwd", O_RDONLY); +- assert(passwd_fd >= 0); +- dd_copy_fd(dd, "at_test_passwd", passwd_fd, 0, 0); +- close(passwd_fd); +- +- fprintf(stderr, "Test newly created dump directory\n"); +- validate_dump_dir_contents(dd); +- dd_close(dd); +- +- +- fprintf(stderr, "Test opened dump directory\n"); +- dd = dd_opendir(template, /*for writing*/0); +- assert(dd != NULL || !"Cannot open the dump directory"); +- validate_dump_dir_contents(dd); +- dd_close(dd); +- +- +- fprintf(stderr, "Test renamed dump directory\n"); +- dd = dd_opendir(template, /*for writing*/0); +- assert(dd != NULL || !"Cannot open the dump directory second time"); +- +- *(last_slash+1) = 'X'; +- assert(dd_rename(dd, template) == 0 || !"Cannot rename the dump directory"); +- +- validate_dump_dir_contents(dd); +- dd_close(dd); +- +- +- fprintf(stderr, "Test opened renamed dump directory\n"); +- assert(dd != NULL || !"Cannot open the renamed dump directory"); +- dd = dd_opendir(template, /*for writing*/0); +- validate_dump_dir_contents(dd); +- +- assert(dd_delete(dd) == 0); +- +- *last_slash = '\0'; +- assert(rmdir(template) == 0); +-} +-TS_RETURN_MAIN +-]]) +- + ## --------------------- ## + ## dd_create_open_delete ## + ## --------------------- ## +@@ -859,153 +669,6 @@ TS_RETURN_MAIN + + ]]) + +-## ---------- ## +-## dd_copy_fd ## +-## ---------- ## +- +-AT_TESTFUN([dd_copy_fd], +-[[ +-#include "testsuite.h" +- +-void test(const char buffer[], const size_t buffer_size) +-{ +- char template[] = "/tmp/XXXXXX"; +- +- if (mkdtemp(template) == NULL) { +- perror("mkdtemp()"); +- abort(); +- } +- +- printf("Dump dir path: %s\n", template); +- +- struct dump_dir *dd = dd_create(template, (uid_t)-1, 0640); +- assert(dd != NULL || !"Cannot create new dump directory"); +- +- dd_create_basic_files(dd, geteuid(), NULL); +- +- char tmpfile[] = "/tmp/libreport-attestsuite-dd_copy_fd.XXXXXX"; +- int tmpfd = mkstemp(tmpfile); +- libreport_full_write(tmpfd, buffer, buffer_size); +- +- { +- assert((-1) != lseek(tmpfd, 0, SEEK_SET)); +- +- const off_t read_truncated = dd_copy_fd(dd, "truncated", tmpfd, 0, buffer_size/2); +- TS_ASSERT_SIGNED_GE(read_truncated, buffer_size/2); +- TS_ASSERT_SIGNED_EQ(dd_get_item_size(dd, "truncated"), buffer_size/2); +- TS_ASSERT_SIGNED_EQ(dd_delete_item(dd, "truncated"), 0); +- } +- +- { +- assert((-1) != lseek(tmpfd, 0, SEEK_SET)); +- +- const off_t read_exact = dd_copy_fd(dd, "exact", tmpfd, 0, buffer_size); +- TS_ASSERT_SIGNED_EQ(read_exact, buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_get_item_size(dd, "exact"), buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_delete_item(dd, "exact"), 0); +- } +- +- { +- assert((-1) != lseek(tmpfd, 0, SEEK_SET)); +- +- const off_t read_bigger = dd_copy_fd(dd, "bigger", tmpfd, 0, buffer_size * 2); +- TS_ASSERT_SIGNED_EQ(read_bigger, buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_get_item_size(dd, "bigger"), buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_delete_item(dd, "bigger"), 0); +- } +- +- { +- assert((-1) != lseek(tmpfd, 0, SEEK_SET)); +- +- const off_t read_no_limit = dd_copy_fd(dd, "no_limit", tmpfd, 0, 0); +- TS_ASSERT_SIGNED_EQ(read_no_limit, buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_get_item_size(dd, "no_limit"), buffer_size); +- TS_ASSERT_SIGNED_EQ(dd_delete_item(dd, "no_limit"), 0); +- } +- +- close(tmpfd); +- unlink(tmpfile); +- assert(dd_delete(dd) == 0); +-} +- +-TS_MAIN +-{ +- { +- char buffer[1024*2]; +- memset(buffer, 'x', sizeof(buffer)); +- test(buffer, sizeof(buffer)); +- } +- +- { +- char buffer[1024*4]; +- memset(buffer, 'y', sizeof(buffer)); +- test(buffer, sizeof(buffer)); +- } +- +- { +- char buffer[1024*6]; +- memset(buffer, 'z', sizeof(buffer)); +- test(buffer, sizeof(buffer)); +- } +- +- +- { +- char template[] = "/tmp/XXXXXX"; +- +- if (mkdtemp(template) == NULL) { +- perror("mkdtemp()"); +- return EXIT_FAILURE; +- } +- +- printf("Dump dir path: %s\n", template); +- +- struct dump_dir *dd = dd_create(template, (uid_t)-1, 0640); +- assert(dd != NULL || !"Cannot create new dump directory"); +- +- dd_create_basic_files(dd, geteuid(), NULL); +- +- { +- int opath_fd = open("/etc/services", O_PATH); +- char buf[16] = {0}; +- if (read(opath_fd, buf, sizeof(buf)/sizeof(buf[0])) == -1) +- { +- assert(errno == EBADF || !"O_PATH fd can be read"); +- close(opath_fd); +- +- opath_fd = open("/etc/services", O_PATH); +- assert(opath_fd >= 0); +- +- const off_t opath_read = dd_copy_fd(dd, "opath", opath_fd, 0, 0); +- TS_ASSERT_SIGNED_EQ(opath_read, -1); +- TS_ASSERT_SIGNED_EQ(dd_exist(dd, "opath"), 0); +- } +- close(opath_fd); +- } +- +- { +- int wronly_fd = open("/tmp/libreport.testsuite", O_WRONLY | O_CREAT | O_TRUNC, 0600); +- assert(wronly_fd >= 0 || !"Cannot create temporary file"); +- char buf[] = "Hello, world!"; +- assert(write(wronly_fd, buf, sizeof(buf)/sizeof(buf[0])) == sizeof(buf)/sizeof(buf[0])); +- close(wronly_fd); +- +- wronly_fd = open("/tmp/libreport.testsuite", O_WRONLY); +- assert(wronly_fd >= 0 || !"Cannot re-open temporary file"); +- +- const off_t wronly_read = dd_copy_fd(dd, "wronly", wronly_fd, 0, 0); +- TS_ASSERT_SIGNED_EQ(wronly_read, -1); +- TS_ASSERT_SIGNED_EQ(dd_exist(dd, "wronly"), 0); +- +- close(wronly_fd); +- } +- +- dd_delete(dd); +- } +-} +-TS_RETURN_MAIN +- +-]]) +- + ## ------------- ## + ## dd_load_int32 ## + ## ------------- ## +@@ -1277,289 +940,6 @@ int main(void) + } + ]]) + +-## ----------------- ## +-## dd_create_archive ## +-## ----------------- ## +- +-AT_TESTFUN([dd_create_archive], +-[[ +-#include "internal_libreport.h" +-#include +-#include +-#include +- +-static int copy_data(struct archive *in, struct archive *out) +-{ +- int r; +- const void *buff; +- size_t size; +- la_int64_t offset; +- +- for (;;) +- { +- r = archive_read_data_block(in, &buff, &size, &offset); +- if (r == ARCHIVE_EOF) +- return (ARCHIVE_OK); +- if (r < ARCHIVE_OK) +- return (r); +- r = archive_write_data_block(out, buff, size, offset); +- if (r < ARCHIVE_OK) +- { +- fprintf(stderr, "Error: archive_write_data_block() failed: %s\n", archive_error_string(out)); +- return (r); +- } +- } +-} +- +-void verify_archive(struct dump_dir *dd, const char *file_name, +- const_string_vector_const_ptr_t included_files, +- const_string_vector_const_ptr_t excluded_files) +-{ +- unsigned c = 0; +- for (const_string_vector_const_ptr_t i = included_files; i && *i; ++i) +- ++c; +- g_autofree int *check_array = g_malloc0(c * sizeof(int)); +- +- struct archive *in_archive; +- struct archive *out_archive; +- struct archive_entry *entry = NULL; +- int flags = ARCHIVE_EXTRACT_TIME|ARCHIVE_EXTRACT_PERM|ARCHIVE_EXTRACT_ACL|ARCHIVE_EXTRACT_FFLAGS; +- +- in_archive = archive_read_new(); +- archive_read_support_filter_gzip(in_archive); +- archive_read_support_format_tar(in_archive); +- +- int r = archive_read_open_filename(in_archive, file_name, 10240); +- if (r != ARCHIVE_OK) +- { +- fprintf(stderr, "Failed to open archive '%s': %s\n", file_name, archive_error_string(in_archive)); +- abort(); +- } +- const char *real_file = "/tmp/libreport-attest-extracted"; +- for (;;) +- { +- if (entry) +- archive_entry_clear(entry); +- r = archive_read_next_header(in_archive, &entry); +- if (r == ARCHIVE_EOF) +- { +- archive_entry_free(entry); +- archive_read_close(in_archive); +- //uncomment to cause a segfault +- //archive_read_free(in_archive); +- break; +- } +- +- const char *path = g_strdup(archive_entry_pathname(entry)); +- +- if (archive_entry_filetype(entry) != AE_IFREG) +- { +- fprintf(stderr, "Not regular file: '%s', found in archive: '%s'\n", path, file_name); +- g_free((void *)path); +- continue; +- } +- +- const_string_vector_const_ptr_t i = included_files; +- for (c = 0; i && *i; ++i, ++c) +- { +- if (strcmp(*i, path) == 0) +- break; +- } +- +- if (i && *i != NULL) +- { +- printf("Included file: '%s', found in archive '%s'\n", path, file_name); +- check_array[c] += 1; +- +- unlink(real_file); +- out_archive = archive_write_disk_new(); +- archive_write_disk_set_options(out_archive, flags); +- archive_write_disk_set_standard_lookup(out_archive); +- archive_entry_set_pathname(entry, "/tmp/libreport-attest-extracted"); +- r = archive_write_header(out_archive, entry); +- if (r != ARCHIVE_OK) +- fprintf(stderr, "Error: archive_write_header() failed: %s\n", archive_error_string(out_archive)); +- else if (archive_entry_size(entry) > 0) +- { +- r = copy_data(in_archive, out_archive); +- if (r != ARCHIVE_OK) +- { +- fprintf(stderr, "Error: copy_data() failed: %s\n", archive_error_string(out_archive)); +- abort(); +- } +- } +- +- archive_write_close(out_archive); +- archive_write_free(out_archive); +- +- g_autofree char *original = dd_load_text(dd, path); +- assert(original != NULL); +- assert(original[0] != '\0'); +- +- g_autofree char *extracted = libreport_xmalloc_xopen_read_close("/tmp/libreport-attest-extracted", NULL); +- assert(extracted != NULL); +- +- if (strcmp(extracted, original) != 0) +- { +- fprintf(stderr, "Invalid file contents: '%s'\nExp: '%s'\nGot: '%s'\n", path, original, extracted); +- abort(); +- } +- g_free((void *)path); +- continue; +- } +- +- i = excluded_files; +- for (; i && *i; ++i) +- { +- if (strcmp(*i, path) == 0) +- break; +- } +- +- if (i && *i != NULL) +- { +- fprintf(stderr, "Excluded file: '%s', found in archive '%s'\n", path, file_name); +- abort(); +- } +- +- fprintf(stderr, "Uncategorized file: '%s', found in archive '%s'\n", path, file_name); +- g_free((void *)path); +- } +- +- int err = 0; +- const_string_vector_const_ptr_t i = included_files; +- for (c = 0; i && *i; ++i, ++c) +- { +- switch (check_array[c]) +- { +- case 0: +- fprintf(stderr, "Not found included file: '%s', in archive: %s\n", *i, file_name); +- ++err; +- break; +- case 1: +- fprintf(stdout, "Found included file: '%s', in archive: %s\n", *i, file_name); +- break; +- default: +- fprintf(stderr, "%d occurrences of included file: '%s', in archive: %s\n", check_array[c], *i, file_name); +- ++err; +- break; +- } +- } +- +- if (err) +- abort(); +- +- return; +-} +- +-int main(void) +-{ +- libreport_g_verbose = 3; +- +- char template[] = "/tmp/XXXXXX"; +- +- if (mkdtemp(template) == NULL) { +- perror("mkdtemp()"); +- return EXIT_FAILURE; +- } +- +- printf("Dump dir path: %s\n", template); +- +- struct dump_dir *dd = dd_create(template, (uid_t)-1, 0640); +- assert(dd != NULL || !"Cannot create new dump directory"); +- +- +-#define COMMON_FILES "time", "last_occurrence", "uid", "kernel", \ +- "architecture", "hostname", "os_info", "os_release", \ +- "type", "count", "component", "program_log" +-#define SENSITIVE_FILES "environ", "backtrace", "secret_file", "private_file", \ +- "useless_file" +- +- dd_create_basic_files(dd, geteuid(), NULL); +- dd_save_text(dd, FILENAME_TYPE, "attest"); +- dd_save_text(dd, FILENAME_COUNT, "1"); +- dd_save_text(dd, FILENAME_COMPONENT, "libreport-attest"); +- dd_save_text(dd, "program_log", "Something very important!"); +- +- const gchar *excluded_files[] = { +- SENSITIVE_FILES, +- NULL, +- }; +- +- for (const gchar **iter = excluded_files; *iter; ++iter) +- dd_save_text(dd, *iter, *iter); +- +- /* Un-supported archive type */ +- { +- fprintf(stderr, "TEST-CASE: Un-supported type\n"); +- fprintf(stdout, "TEST-CASE: Un-supported type\n"); +- const int r = dd_create_archive(dd, "/tmp/libreport-attest.omg", NULL, 0); +- printf("dd_create_archive() == %d\n", r); +- assert(r == -ENOSYS || !"Not supported"); +- } +- +- /* File already exists. */ +- { +- fprintf(stderr, "TEST-CASE: File exists\n"); +- fprintf(stdout, "TEST-CASE: File exists\n"); +- char file_contents[] = "Non emtpy file"; +- const char *file_name = "/tmp/libreport-attest.tar.gz"; +- FILE *test_file = fopen(file_name, "w"); +- assert(test_file != NULL); +- assert(fprintf(test_file, "%s", file_contents) == strlen(file_contents)); +- fclose(test_file); +- +- assert(dd_create_archive(dd, file_name, NULL, 0) == -EEXIST || !"Exists"); +- +- g_autofree char *canary = libreport_xmalloc_xopen_read_close(file_name, NULL); +- assert(canary != NULL); +- assert(strcmp(canary, file_contents) == 0); +- } +- +- /* All elements */ +- { +- fprintf(stderr, "TEST-CASE: Compress all elements\n"); +- fprintf(stdout, "TEST-CASE: Compress all elements\n"); +- +- const gchar *included_files[] = { +- COMMON_FILES, +- SENSITIVE_FILES, +- NULL, +- }; +- +- const char *file_name = "/tmp/libreport-attest-all.tar.gz"; +- unlink(file_name); +- assert(dd_create_archive(dd, file_name, NULL, 0) == 0 || !"All elements"); +- +- verify_archive(dd, file_name, included_files, NULL); +- +- unlink(file_name); +- } +- +- /* Excluded elements */ +- { +- fprintf(stderr, "TEST-CASE: Exclude elements\n"); +- fprintf(stdout, "TEST-CASE: Exclude elements\n"); +- +- const char *included_files[] = { +- COMMON_FILES, +- NULL, +- }; +- +- const char *file_name = "/tmp/libreport-attest-excluded.tar.gz"; +- unlink(file_name); +- assert(dd_create_archive(dd, file_name, excluded_files, 0) == 0 || !"Excluded elements"); +- +- verify_archive(dd, file_name, included_files, excluded_files); +- +- unlink(file_name); +- } +- +- assert(dd_delete(dd) == 0); +- +- return 0; +-} +-]]) +- + ## --------------- ## + ## dd_compute_size ## + ## --------------- ## +diff --git a/tests/proc_helpers.at b/tests/proc_helpers.at +index 76d24e7..6ee576e 100644 +--- a/tests/proc_helpers.at ++++ b/tests/proc_helpers.at +@@ -432,243 +432,6 @@ TS_MAIN + TS_RETURN_MAIN + ]]) + +-## ---------------------- ## +-## libreport_dump_fd_info ## +-## ---------------------- ## +- +-AT_TESTFUN([libreport_dump_fd_info], [[ +-#include "testsuite.h" +-#include +-#include +-#include +- +-#define FILENAME_FORMAT "/tmp/libreport.testsuite.fdinfo.%d.%s" +- +-pid_t prepare_process(void) +-{ +- int toparent[2]; +- g_unix_open_pipe(toparent, 0, NULL); +- +- g_autofree char *binary = libreport_malloc_readlink("/proc/self/exe"); +- pid_t pid = fork(); +- if (pid < 0) { +- err(EXIT_FAILURE, "fork"); +- } +- +- if (pid == 0) { +- close(STDOUT_FILENO); +- libreport_xdup2(toparent[1], STDOUT_FILENO); +- +- DIR *fddir = opendir("/proc/self/fd"); +- struct dirent *dent; +- while ((dent = readdir(fddir))) { +- const int fd = atoi(dent->d_name); +- if (fd != STDOUT_FILENO) { +- close(fd); +- } +- } +- +- execl(binary, "wait", NULL); +- exit(EXIT_FAILURE); +- } +- +- close(toparent[1]); +- +- /* Wait for child */ +- char buf[8]; +- if (libreport_full_read(toparent[0], buf, 8) < 0) { +- fprintf(stderr, "Failed to read from child: %s\n", strerror(errno)); +- fflush(stderr); +- } +- +- close(toparent[0]); +- +- return pid; +-} +- +-void kill_process(pid_t pid) +-{ +- /* Notify child */ +- kill(pid, SIGTERM); +- int status = 0; +- if (libreport_safe_waitpid(pid, &status, 0) < 0) { +- fprintf(stderr, "Couldn't wait for child\n"); +- } +- else if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGTERM) { +- fprintf(stderr, "Child was not TERMinated - %d\n", status); +- } +-} +- +-void check_file_contents(const char *fdinfo_filename) +-{ +- struct fd { +- int fd; +- const char *file; +- } fds[] = { +- { .fd = 0, .file = "/etc/services", }, +- { .fd = 2, .file = "/etc/passwd", }, +- { .fd = 3, .file = "/etc/group", }, +- }; +- +- g_autofree char *file = libreport_xmalloc_xopen_read_close(fdinfo_filename, NULL); +- int fdno = 0; +- char *cursor = file; +- char *line = file; +- char *end = file + strlen(file); +- while (cursor < end) { +- cursor = strchrnul(line, '\n'); +- if (*cursor != '\0') { +- *cursor = '\0'; +- } +- ++cursor; +- +- if (fdno < (sizeof(fds)/sizeof(fds[0]))) { +- int fd = 0; +- g_autofree char *file = NULL; +- const int res = sscanf(line, "%d:%ms", &fd, &file); +- TS_ASSERT_SIGNED_EQ(res, 2); +- TS_ASSERT_SIGNED_EQ(fd, fds[fdno].fd); +- TS_ASSERT_STRING_EQ(file, fds[fdno].file, "FD file name"); +- } +- +- line = cursor; +- int fieldscnt = 0; +- while (line < end) { +- cursor = strchrnul(line, '\n'); +- if (*cursor != '\0') { +- *cursor = '\0'; +- } +- ++cursor; +- +- if (strcmp(line, "") == 0) { +- break; +- } +- +- int col = 0; +- for (; col < strlen(line); ++col) { +- if (line[col] == ':') { +- break; +- } +- +- TS_ASSERT_TRUE(line[col] != ' ' && line[col] != '\t'); +- if (!g_testsuite_last_ok) { +- break; +- } +- } +- TS_ASSERT_SIGNED_NEQ(col, 0); +- TS_ASSERT_SIGNED_LT(col, strlen(line)); +- if (g_testsuite_last_ok) { +- TS_ASSERT_CHAR_EQ(line[col], ':'); +- } +- +- fieldscnt += g_testsuite_last_ok; +- line = cursor; +- } +- TS_ASSERT_SIGNED_GT(fieldscnt, 2); +- +- ++fdno; +- line = cursor; +- } +- +- TS_ASSERT_SIGNED_EQ(fdno, sizeof(fds)/sizeof(fds[0])); +-} +- +-TS_MAIN +-{ +- if (strcmp(argv[0], "wait") == 0) { +- FILE *services = fopen("/etc/services", "r"); +- FILE *passwd = fopen("/etc/passwd", "r"); +- FILE *group = fopen("/etc/group", "r"); +- +- /* Notify parent */ +- close(STDOUT_FILENO); +- +- /* Wait for parent */ +- while (1) { +- sleep(1); +- } +- +- fclose(group); +- fclose(passwd); +- fclose(services); +- exit(EXIT_SUCCESS); +- } +- +- pid_t pid = prepare_process(); +- +- char proc_dir_path[strlen("/proc/%d/fd") + sizeof(pid_t) * 3]; +- if (sizeof(proc_dir_path) <= snprintf(proc_dir_path, sizeof(proc_dir_path), "/proc/%d/fd", pid)) { +- errx(EXIT_FAILURE, "too small buffer for proc dir path"); +- } +- +- { +- TS_PRINTF("%s\n", "libreport_dump_fd_info"); +- char fdinfo_filename[strlen(FILENAME_FORMAT) + sizeof(pid_t) * 3 + strlen("libreport_dump_fd_info")]; +- if (sizeof(fdinfo_filename) <= snprintf(fdinfo_filename, sizeof(fdinfo_filename), FILENAME_FORMAT, pid, "libreport_dump_fd_info")) { +- errx(EXIT_FAILURE, "too small buffer for file name"); +- } +- +- TS_ASSERT_FUNCTION(libreport_dump_fd_info(fdinfo_filename, proc_dir_path)); +- +- struct stat st; +- TS_ASSERT_FUNCTION(stat(fdinfo_filename, &st)); +- if (g_testsuite_last_ok) { +- TS_ASSERT_SIGNED_EQ(st.st_mode & 0777, 0600); +- } +- +- check_file_contents(fdinfo_filename); +- +- unlink(fdinfo_filename); +- } +- +- { +- TS_PRINTF("%s\n", "libreport_dump_fd_info_ext"); +- char fdinfo_filename[strlen(FILENAME_FORMAT) + sizeof(pid_t) * 3 + strlen("libreport_dump_fd_info_ext")]; +- if (sizeof(fdinfo_filename) <= snprintf(fdinfo_filename, sizeof(fdinfo_filename), FILENAME_FORMAT, pid, "libreport_dump_fd_info_ext")) { +- errx(EXIT_FAILURE, "too small buffer for file name"); +- } +- +- const uid_t uid = getuid(); +- const gid_t gid = getgid(); +- TS_ASSERT_FUNCTION(libreport_dump_fd_info_ext(fdinfo_filename, proc_dir_path, uid, gid)); +- +- struct stat st; +- TS_ASSERT_FUNCTION(stat(fdinfo_filename, &st)); +- if (g_testsuite_last_ok) { +- TS_ASSERT_SIGNED_EQ(st.st_mode & 0777, 0600); +- } +- +- check_file_contents(fdinfo_filename); +- +- unlink(fdinfo_filename); +- } +- +- { +- TS_PRINTF("%s\n", "libreport_dump_fd_info_at"); +- char fdinfo_filename[strlen(FILENAME_FORMAT) + sizeof(pid_t) * 3 + strlen("libreport_dump_fd_info_at")]; +- if (sizeof(fdinfo_filename) <= snprintf(fdinfo_filename, sizeof(fdinfo_filename), FILENAME_FORMAT, pid, "libreport_dump_fd_info_at")) { +- errx(EXIT_FAILURE, "too small buffer for file name"); +- } +- +- FILE *dest = fopen(fdinfo_filename, "w"); +- const int pid_proc_fd = libreport_open_proc_pid_dir(pid); +- +- TS_ASSERT_FUNCTION(libreport_dump_fd_info_at(pid_proc_fd, dest)); +- +- close(pid_proc_fd); +- fclose(dest); +- +- check_file_contents(fdinfo_filename); +- +- unlink(fdinfo_filename); +- } +- +- kill_process(pid); +-} +-TS_RETURN_MAIN +-]]) +- +- + ## ------------- ## + ## get_fs-u_g-id ## + ## ------------- ## +@@ -1034,155 +797,3 @@ TS_MAIN + } + TS_RETURN_MAIN + ]]) +- +- +-## ------------------------------ ## +-## libreport_process_has_own_root ## +-## ------------------------------ ## +- +-AT_TESTFUN([libreport_process_has_own_root], [[ +-#include "testsuite.h" +-#include +-#include +- +-void write_cmd_output_to_fd(int fd, const char *cmd) +-{ +- FILE *proc = popen(cmd, "r"); +- if (proc == NULL) { +- err(EXIT_FAILURE, "popen(%s)", cmd); +- } +- +- g_autofree char *output = libreport_xmalloc_fgetline(proc); +- TS_PRINTF("%s : %s\n", cmd, output); +- +- const int retcode = pclose(proc); +- if (retcode == -1) { +- err(EXIT_FAILURE, "pclose(%s)", cmd); +- } +- +- if (retcode != 0) { +- errx(EXIT_FAILURE, "non-0 status %d of '%s'", retcode, cmd); +- } +- +- if (output == NULL) { +- errx(EXIT_FAILURE, "no output of '%s'", cmd); +- } +- +- libreport_full_write_str(fd, output); +-} +- +-TS_MAIN +-{ +- char mock_pid_proc[] = "/tmp/libreport.testsuite.pid.XXXXXX"; +- +- if (mkdtemp(mock_pid_proc) == NULL) { +- err(EXIT_FAILURE, "mkdtemp(%s)", mock_pid_proc); +- } +- +- const int mock_pid_proc_fd = open(mock_pid_proc, O_DIRECTORY); +- if (mock_pid_proc_fd < 0) { +- err(EXIT_FAILURE, "open(%s, O_DIRECTORY)", mock_pid_proc); +- } +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, -ENOENT); +- } +- +- /* Please, notice that the mode is intentionally 0000 - no read, no write, +- * no execute access */ +- int mntnf_fd = openat(mock_pid_proc_fd, "mountinfo", O_RDWR | O_CREAT | O_EXCL, 0000); +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, -EACCES); +- } +- +- /* Make the file readable & writable */ +- fchmod(mntnf_fd, 0600); +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, -ENOKEY); +- } +- +- libreport_full_write_str(mntnf_fd, "36 35 98:0 /madeuproot /foo rw,noatime master:1 - ext3 /dev/myroot rw,errors=continue\n"); +- libreport_full_write_str(mntnf_fd, "37 38 99:0 /mnt3 /mnt4 rw,noatime master:2 - ext3 /dev/boot rw,errors=continue\n"); +- +- fsync(mntnf_fd); +- lseek(mntnf_fd, 0, SEEK_SET); +- +- TS_PRINTF("Made-up mountinfo created in %s\n", mock_pid_proc); +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, -ENOKEY); +- } +- +- TS_PRINTF("Going to copy /proc/1/mountinfo to %s\n", mock_pid_proc); +- +- const int pid1_mntnf_fd = open("/proc/1/mountinfo", O_RDONLY); +- if (pid1_mntnf_fd < 0) { +- err(EXIT_FAILURE, "/proc/1/mountinfo"); +- } +- +- TS_PRINTF("Copying /proc/1/mountinfo to %s\n", mock_pid_proc); +- +- { +- int r = 0; +- +- while ((r = sendfile(mntnf_fd, pid1_mntnf_fd, NULL, 65535)) > 0) +- ; +- +- if (r < 0) { +- err(EXIT_FAILURE, "Cannot copy /proc/1/mountinfo to %s", mock_pid_proc); +- } +- } +- +- close(pid1_mntnf_fd); +- +- fsync(mntnf_fd); +- lseek(mntnf_fd, 0, SEEK_SET); +- +- TS_PRINTF("Copied /proc/1/mountinfo to %s\n", mock_pid_proc); +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, 0); +- } +- +- /* Test different source directory. Swap / with \ in the mock mountinfo. */ +- fsync(mntnf_fd); +- lseek(mntnf_fd, 0, SEEK_SET); +- +- libreport_full_write_str(mntnf_fd, "12 34 567:89 /madeuproot / "); +- write_cmd_output_to_fd(mntnf_fd, "findmnt -F /proc/1/mountinfo -r -n -o VFS-OPTIONS,OPT-FIELDS -T /"); +- libreport_full_write_str(mntnf_fd, " - "); +- write_cmd_output_to_fd(mntnf_fd, "findmnt -F /proc/1/mountinfo -r -n -o FSTYPE,SOURCE,FS-OPTIONS -T /"); +- +- fsync(mntnf_fd); +- lseek(mntnf_fd, 0, SEEK_SET); +- +- { +- /* TODO: add test for open file descriptors */ +- const int r = libreport_process_has_own_root_at(mock_pid_proc_fd); +- TS_ASSERT_SIGNED_EQ(r, 1); +- } +- +- close(mntnf_fd); +- +- if (unlinkat(mock_pid_proc_fd, "mountinfo", 0) < 0) { +- perror("unlinkat(fd, mountinfo)"); +- } +- +- if (rmdir(mock_pid_proc) < 0) { +- perror("rmdir(/mock_pid_dir)"); +- } +-} +-TS_RETURN_MAIN +-]]) +diff --git a/tests/report_python.at b/tests/report_python.at +index 31c0142..f267427 100644 +--- a/tests/report_python.at ++++ b/tests/report_python.at +@@ -70,43 +70,6 @@ if __name__ == "__main__": + sys.exit(main()) + ]]) + +-## ----------------------- ## +-## get_from_etc_os_release ## +-## ----------------------- ## +- +-AT_PYTESTFUN([get_from_etc_os_release], +-[[import sys +- +-sys.path.insert(0, "../../../src/report-python") +-sys.path.insert(0, "../../../src/report-python/report/.libs") +- +-report = __import__("report", globals(), locals(), [], 0) +-sys.modules["report"] = report +- +-import os +- +-if not os.path.exists("/etc/os-release"): +- print("Cannot run the test: '/etc/os-release' does not exist") +- sys.exit(1) +- +-exit_code = 0 +-if report.getProduct_fromOSRELEASE() != report.getProduct(): +- print("getProduct('{0}') did not return PRODUCT='{1}' from /etc/os-release".format(report.getProduct(), report.getProduct_fromOSRELEASE())) +- exit_code += 1 +- +-if report.getVersion_fromOSRELEASE() != report.getVersion(): +- print("getVersion('{0}') did not return PRODUCT_VERSION='{1}' from /etc/os-release".format(report.getVersion(), report.getVersion_fromOSRELEASE())) +- exit_code += 1 +- +-if exit_code != 0: +- print("++++ /etc/os-release ++++") +- with open("/etc/os-release") as osrel: +- sys.stdout.write(osrel.read()) +- print("^^^^ /etc/os-release ^^^^") +- +-sys.exit(exit_code) +-]]) +- + ## ---------------------------------- ## + ## create_dump_dir_uid_does_not_exist ## + ## ---------------------------------- ## +diff --git a/tests/ureport.at b/tests/ureport.at +index 7ac67dc..849f1c9 100644 +--- a/tests/ureport.at ++++ b/tests/ureport.at +@@ -764,101 +764,6 @@ int main(void) + } + ]]) + +-## -------------------------- ## +-## libreport_ureport_do_post ## +-## -------------------------- ## +- +-AT_TESTFUN([libreport_ureport_do_post], +-[[ +-#include "internal_libreport.h" +-#include "ureport.h" +-#include +-#include "libreport_curl.h" +-#include "problem_data.h" +- +-int main(void) +-{ +- libreport_g_verbose=3; +- +- struct dump_dir *dd = dd_create("./test", (uid_t)-1L, DEFAULT_DUMP_DIR_MODE); +- assert(dd != NULL); +- dd_create_basic_files(dd, (uid_t)-1L, NULL); +- dd_save_text(dd, FILENAME_TYPE, "CCpp"); +- dd_save_text(dd, FILENAME_ANALYZER, "CCpp"); +- dd_save_text(dd, FILENAME_PKG_EPOCH, "pkg_epoch"); +- dd_save_text(dd, FILENAME_PKG_ARCH, "pkg_arch"); +- dd_save_text(dd, FILENAME_PKG_RELEASE, "pkg_release"); +- dd_save_text(dd, FILENAME_PKG_VERSION, "pkg_version"); +- dd_save_text(dd, FILENAME_PKG_NAME, "pkg_name"); +- const char *bt = "{ \"signal\": 6, \"executable\": \"/usr/bin/will_abort\" }"; +- dd_save_text(dd, FILENAME_CORE_BACKTRACE, bt); +- dd_save_text(dd, FILENAME_COUNT, "1"); +- dd_close(dd); +- +- g_autofree char *json = libreport_ureport_from_dump_dir_ext("./test", NULL); +- +- /* wrong url */ +- struct ureport_server_config config; +- libreport_ureport_server_config_init(&config); +- struct post_state *post_state = libreport_ureport_do_post(json, &config, "not_exist"); +- assert(post_state->curl_result == CURLE_COULDNT_RESOLVE_HOST); +- +- free_post_state(post_state); +- libreport_ureport_server_config_destroy(&config); +- delete_dump_dir("./test"); +- +- return 0; +-} +-]]) +- +-## ------------------------- ## +-## libreport_ureport_submit ## +-## ------------------------- ## +- +-AT_TESTFUN([libreport_ureport_submit], +-[[ +-#include "internal_libreport.h" +-#include "ureport.h" +-#include +-#include "libreport_curl.h" +-#include "problem_data.h" +- +-int main(void) +-{ +- libreport_g_verbose=3; +- +- struct dump_dir *dd = dd_create("./test", (uid_t)-1L, DEFAULT_DUMP_DIR_MODE); +- assert(dd != NULL); +- dd_create_basic_files(dd, (uid_t)-1L, NULL); +- dd_save_text(dd, FILENAME_TYPE, "CCpp"); +- dd_save_text(dd, FILENAME_ANALYZER, "CCpp"); +- dd_save_text(dd, FILENAME_PKG_EPOCH, "pkg_epoch"); +- dd_save_text(dd, FILENAME_PKG_ARCH, "pkg_arch"); +- dd_save_text(dd, FILENAME_PKG_RELEASE, "pkg_release"); +- dd_save_text(dd, FILENAME_PKG_VERSION, "pkg_version"); +- dd_save_text(dd, FILENAME_PKG_NAME, "pkg_name"); +- const char *bt = "{ \"signal\": 6, \"executable\": \"/usr/bin/will_abort\" }"; +- dd_save_text(dd, FILENAME_CORE_BACKTRACE, bt); +- dd_save_text(dd, FILENAME_COUNT, "1"); +- dd_close(dd); +- +- g_autofree char *json = libreport_ureport_from_dump_dir_ext("./test", NULL); +- +- /* wrong url */ +- struct ureport_server_config config; +- libreport_ureport_server_config_init(&config); +- struct ureport_server_response *response = libreport_ureport_submit(json, &config); +- +- assert(response == NULL); +- +- libreport_ureport_server_response_free(response); +- libreport_ureport_server_config_destroy(&config); +- delete_dump_dir("./test"); +- +- return 0; +-} +-]]) +- + ## --------------------------- ## + ## ureport_json_attachment_new ## + ## --------------------------- ## +@@ -918,91 +823,6 @@ int main(void) + } + ]]) + +-## ----------------------------------- ## +-## libreport_ureport_from_dump_dir_ext ## +-## ----------------------------------- ## +- +-AT_TESTFUN([libreport_ureport_from_dump_dir_ext], +-[[ +-#include "internal_libreport.h" +-#include "ureport.h" +-#include +-#include "libreport_curl.h" +-#include "problem_data.h" +- +-int main(void) +-{ +- libreport_g_verbose=3; +- +- struct dump_dir *dd = dd_create("./test", (uid_t)-1L, DEFAULT_DUMP_DIR_MODE); +- assert(dd != NULL); +- dd_create_basic_files(dd, (uid_t)-1L, NULL); +- dd_save_text(dd, FILENAME_TYPE, "CCpp"); +- dd_save_text(dd, FILENAME_ANALYZER, "CCpp"); +- dd_save_text(dd, FILENAME_PKG_EPOCH, "pkg_epoch"); +- dd_save_text(dd, FILENAME_PKG_ARCH, "pkg_arch"); +- dd_save_text(dd, FILENAME_PKG_RELEASE, "pkg_release"); +- dd_save_text(dd, FILENAME_PKG_VERSION, "pkg_version"); +- dd_save_text(dd, FILENAME_PKG_NAME, "pkg_name"); +- const char *bt = "{ \"signal\": 6, \"executable\": \"/usr/bin/will_abort\" }"; +- dd_save_text(dd, FILENAME_CORE_BACKTRACE, bt); +- dd_save_text(dd, FILENAME_COUNT, "1"); +- dd_close(dd); +- +- /* no auth */ +- char *ureport = libreport_ureport_from_dump_dir_ext("./test", NULL); +- assert(strstr(ureport, "auth") == NULL); +- g_free(ureport); +- +- /* auth */ +- dd = dd_opendir("./test", 0); +- dd_save_text(dd, FILENAME_HOSTNAME, "env_hostname"); +- dd_close(dd); +- +- struct ureport_server_config config; +- libreport_ureport_server_config_init(&config); +- +- GHashTable *settings = g_hash_table_new_full(g_str_hash, g_str_equal, free, free); +- +- setenv("uReport_IncludeAuthData", "yes", 1); +- setenv("uReport_AuthDataItems", "hostname", 1); +- +- libreport_ureport_server_config_load(&config, settings); +- +- ureport = libreport_ureport_from_dump_dir_ext("./test", &config.ur_prefs); +- assert(strstr(ureport, "auth") != NULL); +- assert(strstr(ureport, "\"hostname\": \"env_hostname\"") != NULL); +- g_free(ureport); +- +- libreport_ureport_server_config_destroy(&config); +- if (settings) +- g_hash_table_destroy(settings); +- +- /* auth with unknown uReport_AuthDataItems */ +- libreport_ureport_server_config_init(&config); +- +- settings = g_hash_table_new_full(g_str_hash, g_str_equal, free, free); +- +- setenv("uReport_AuthDataItems", "hostname, unknown", 1); +- +- libreport_ureport_server_config_load(&config, settings); +- +- ureport = libreport_ureport_from_dump_dir_ext("./test", &config.ur_prefs); +- assert(strstr(ureport, "auth") != NULL); +- assert(strstr(ureport, "\"hostname\": \"env_hostname\"") != NULL); +- assert(strstr(ureport, "unknown") == NULL); +- g_free(ureport); +- +- libreport_ureport_server_config_destroy(&config); +- if (settings) +- g_hash_table_destroy(settings); +- delete_dump_dir("./test"); +- +- return 0; +-} +-]]) +- +- + ## ------------------------------------- ## + ## ureport_server_config_load_basic_auth ## + ## ------------------------------------- ## +-- +2.45.4 + diff --git a/SPECS-EXTENDED/libreport/libreport.signatures.json b/SPECS-EXTENDED/libreport/libreport.signatures.json index b0c554ae2f4..0325089df3b 100644 --- a/SPECS-EXTENDED/libreport/libreport.signatures.json +++ b/SPECS-EXTENDED/libreport/libreport.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "libreport-2.13.1.tar.gz": "d8523ff6993ba1514550eb745e09eb9e35b4e24bad0f31dcfccb6f4e3f58e2bf" + "libreport-2.17.15.tar.gz": "cf181ed248f6bf12233b459e4a504fab645b3ce8040b6855a073453756311166" } } diff --git a/SPECS-EXTENDED/libreport/libreport.spec b/SPECS-EXTENDED/libreport/libreport.spec index 5aa5500078c..b6a93f3e064 100644 --- a/SPECS-EXTENDED/libreport/libreport.spec +++ b/SPECS-EXTENDED/libreport/libreport.spec @@ -1,94 +1,94 @@ %bcond_without bugzilla + +%define dbus_devel dbus-devel %define libjson_devel json-c-devel + %define glib_ver 2.43.4 -Summary: Generic library for reporting various problems -Name: libreport -Version: 2.13.1 -Release: 9%{?dist} -License: GPLv2+ +Summary: Generic library for reporting various problems +Name: libreport +Version: 2.17.15 +Release: 1%{?dist} +License: GPL-2.0-or-later Vendor: Microsoft Corporation Distribution: Azure Linux -URL: https://abrt.readthedocs.org/ -Source: https://github.com/abrt/%{name}/archive/%{version}/%{name}-%{version}.tar.gz -Patch0: 0001-gui-wizard-gtk-Fix-segfault.patch -BuildRequires: dbus-devel -# BuildRequires: gtk3-devel -BuildRequires: asciidoc -BuildRequires: augeas -BuildRequires: augeas-devel -BuildRequires: curl-devel -BuildRequires: desktop-file-utils -BuildRequires: doxygen -BuildRequires: gettext -BuildRequires: git-core -BuildRequires: glib2-devel >= %{glib_ver} -BuildRequires: intltool -BuildRequires: libproxy-devel -BuildRequires: libtar-devel -BuildRequires: libtool -BuildRequires: libxml2-devel -BuildRequires: lz4 -BuildRequires: nettle-devel -BuildRequires: newt-devel -BuildRequires: python3-devel -BuildRequires: satyr-devel >= 0.24 -BuildRequires: systemd-devel -BuildRequires: texinfo -BuildRequires: xmlrpc-c -BuildRequires: xmlto -BuildRequires: xz -Requires: glib2 >= %{glib_ver} -Requires: libreport-filesystem = %{version}-%{release} -Requires: lz4 -Requires: nettle -Requires: satyr >= 0.24 -Requires: xz -%if 0%{?with_check} && 0%{?mariner_failing_tests} -# A test case uses zh_CN locale to verify XML event translations -BuildRequires: glibc-all-langpacks -%endif +URL: https://abrt.readthedocs.org/ +Source: https://github.com/abrt/%{name}/archive/%{version}/%{name}-%{version}.tar.gz +Patch1: 0001-skip-unwanted-tests.patch +BuildRequires: %{dbus_devel} +BuildRequires: gtk3-devel +BuildRequires: curl-devel +BuildRequires: desktop-file-utils +BuildRequires: python3-devel +BuildRequires: gettext +BuildRequires: libxml2-devel +BuildRequires: intltool +BuildRequires: libtool +BuildRequires: make +BuildRequires: texinfo +BuildRequires: asciidoc +BuildRequires: xmlto +BuildRequires: newt-devel +BuildRequires: satyr-devel >= 0.38 +BuildRequires: glib2-devel >= %{glib_ver} +BuildRequires: git-core + %if %{with bugzilla} -BuildRequires: xmlrpc-c-devel +BuildRequires: xmlrpc-c-devel %endif +BuildRequires: doxygen +BuildRequires: systemd-devel +BuildRequires: augeas-devel +BuildRequires: augeas +BuildRequires: libarchive-devel +Requires: libreport-filesystem = %{version}-%{release} +Requires: satyr%{?_isa} >= 0.38 +Requires: glib2%{?_isa} >= %{glib_ver} +Requires: libarchive%{?_isa} + # Required for the temporary modularity hack, see below %if 0%{?_module_build} -BuildRequires: sed +BuildRequires: sed %endif +Obsoletes: %{name}-compat < 2.13.2 +Obsoletes: %{name}-plugin-rhtsupport < 2.13.2 +Obsoletes: %{name}-rhel < 2.13.2 + %description Libraries providing API for reporting different problems in applications to different bug targets like Bugzilla, ftp, trac, etc... %package filesystem -Summary: Filesystem layout for libreport -BuildArch: noarch +Summary: Filesystem layout for libreport +BuildArch: noarch %description filesystem Filesystem layout for libreport %package devel -Summary: Development libraries and headers for libreport -Requires: libreport = %{version}-%{release} +Summary: Development libraries and headers for libreport +Requires: libreport = %{version}-%{release} %description devel Development libraries and headers for libreport %package web -Summary: Library providing network API for libreport -Requires: libreport = %{version}-%{release} +Summary: Library providing network API for libreport +Requires: libreport = %{version}-%{release} %description web Library providing network API for libreport %package web-devel -Summary: Development headers for libreport-web -Requires: libreport-web = %{version}-%{release} +Summary: Development headers for libreport-web +Requires: libreport-web = %{version}-%{release} %description web-devel Development headers for libreport-web %package -n python3-libreport +Summary: Python 3 bindings for report-libs %if 0%{?_module_build} # This is required for F26 Boltron (the modular release) # Different parts of libreport are shipped with different @@ -96,63 +96,81 @@ Development headers for libreport-web # strict NVR dependency to make it work. Temporary and # limited to F26 Boltron. %global distfreerelease %(echo %{release}|sed 's/%{?dist}$//'||echo 0) -Requires: libreport >= %{version}-%{distfreerelease} +Requires: libreport >= %{version}-%{distfreerelease} %else -Requires: libreport = %{version}-%{release} +Requires: libreport = %{version}-%{release} %endif -Summary: Python 3 bindings for report-libs +Requires: python3-dnf +Requires: python3-requests +Requires: python3-urllib3 %{?python_provide:%python_provide python3-libreport} -Requires: python3-dnf %description -n python3-libreport Python 3 bindings for report-libs. %package cli -Summary: %{name}'s command line interface -Requires: %{name} = %{version}-%{release} +Summary: %{name}'s command line interface +Requires: %{name} = %{version}-%{release} %description cli This package contains simple command line tool for working with problem dump reports %package newt -Summary: %{name}'s newt interface -Requires: %{name} = %{version}-%{release} -Provides: report-newt = 0:0.23-1 -Obsoletes: report-newt < 0:0.23-1 +Summary: %{name}'s newt interface +Requires: %{name} = %{version}-%{release} +Provides: report-newt = 0:0.23-1 +Obsoletes: report-newt < 0:0.23-1 %description newt This package contains a simple newt application for reporting bugs +%package gtk +Summary: GTK front-end for libreport +Requires: libreport = %{version}-%{release} +Requires: libreport-plugin-reportuploader = %{version}-%{release} +Provides: report-gtk = 0:0.23-1 +Obsoletes: report-gtk < 0:0.23-1 + +%description gtk +Applications for reporting bugs using libreport backend + +%package gtk-devel +Summary: Development libraries and headers for libreport +Requires: libreport-gtk = %{version}-%{release} + +%description gtk-devel +Development libraries and headers for libreport-gtk + %package plugin-kerneloops -Summary: %{name}'s kerneloops reporter plugin -Requires: %{name} = %{version}-%{release} -Requires: curl -Requires: libreport-web = %{version}-%{release} +Summary: %{name}'s kerneloops reporter plugin +Requires: curl +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} %description plugin-kerneloops This package contains plugin which sends kernel crash information to specified server, usually to kerneloops.org. %package plugin-logger -Summary: %{name}'s logger reporter plugin -Requires: %{name} = %{version}-%{release} +Summary: %{name}'s logger reporter plugin +Requires: %{name} = %{version}-%{release} %description plugin-logger The simple reporter plugin which writes a report to a specified file. %package plugin-systemd-journal -Summary: %{name}'s systemd journal reporter plugin -Requires: %{name} = %{version}-%{release} +Summary: %{name}'s systemd journal reporter plugin +Requires: %{name} = %{version}-%{release} %description plugin-systemd-journal The simple reporter plugin which writes a report to the systemd journal. %package plugin-mailx -Summary: %{name}'s mailx reporter plugin -Requires: %{name} = %{version}-%{release} -Requires: mailx +Summary: %{name}'s mailx reporter plugin +Requires: %{name} = %{version}-%{release} +Requires: /usr/bin/mailx %description plugin-mailx The simple reporter plugin which sends a report via mailx to a specified @@ -160,123 +178,56 @@ email address. %if %{with bugzilla} %package plugin-bugzilla -Summary: %{name}'s bugzilla plugin -Requires: %{name} = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} +Summary: %{name}'s bugzilla plugin +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} +Requires: python3-libreport = %{version}-%{release} %description plugin-bugzilla Plugin to report bugs into the bugzilla. %endif %package plugin-mantisbt -Summary: %{name}'s mantisbt plugin -Requires: %{name} = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} +Summary: %{name}'s mantisbt plugin +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} %description plugin-mantisbt Plugin to report bugs into the mantisbt. %package centos -Summary: %{name}'s CentOS Bug Tracker workflow -Requires: %{name} = %{version}-%{release} -Requires: libreport-plugin-mantisbt = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} +Summary: %{name}'s CentOS Bug Tracker workflow +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} +Requires: libreport-plugin-mantisbt = %{version}-%{release} %description centos Workflows to report issues into the CentOS Bug Tracker. %package plugin-ureport -Summary: %{name}'s micro report plugin -BuildRequires: %{libjson_devel} -Requires: %{name} = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} -%if 0%{?rhel} -Requires: python3-subscription-manager-rhsm -%endif +Summary: %{name}'s micro report plugin +BuildRequires: %{libjson_devel} +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} %description plugin-ureport Uploads micro-report to abrt server -%package plugin-rhtsupport -Summary: %{name}'s RHTSupport plugin -Requires: %{name} = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} - -%description plugin-rhtsupport -Plugin to report bugs into RH support system. - -%if %{with bugzilla} -%package compat -Summary: %{name}'s compat layer for obsoleted 'report' package -Requires: %{name}-plugin-bugzilla = %{version}-%{release} -Requires: %{name}-plugin-rhtsupport = %{version}-%{release} -Requires: libreport = %{version}-%{release} - -%description compat -Provides 'report' command-line tool. -%endif - %package plugin-reportuploader -Summary: %{name}'s reportuploader plugin -Requires: %{name} = %{version}-%{release} -Requires: libreport-web = %{version}-%{release} +Summary: %{name}'s reportuploader plugin +Requires: %{name} = %{version}-%{release} +Requires: libreport-web = %{version}-%{release} %description plugin-reportuploader Plugin to report bugs into anonymous FTP site associated with ticketing system. -%if 0%{?fedora} -%package fedora -Summary: Default configuration for reporting bugs via Fedora infrastructure -Requires: %{name} = %{version}-%{release} - -%description fedora -Default configuration for reporting bugs via Fedora infrastructure -used to easily configure the reporting process for Fedora systems. Just -install this package and you're done. -%endif - -%if 0%{?rhel} -%package rhel -Summary: Default configuration for reporting bugs via Red Hat infrastructure -Requires: %{name} = %{version}-%{release} -Requires: %{name}-plugin-ureport - -%description rhel -Default configuration for reporting bugs via Red Hat infrastructure -used to easily configure the reporting process for Red Hat systems. Just -install this package and you're done. - -%package rhel-bugzilla -Summary: Default configuration for reporting bugs to Red Hat Bugzilla -Requires: %{name} = %{version}-%{release} -Requires: libreport-plugin-bugzilla = %{version}-%{release} -Requires: libreport-plugin-ureport = %{version}-%{release} - -%description rhel-bugzilla -Default configuration for reporting bugs to Red Hat Bugzilla used to easily -configure the reporting process for Red Hat systems. Just install this package -and you're done. - -%package rhel-anaconda-bugzilla -Summary: Default configuration for reporting anaconda bugs to Red Hat Bugzilla -Requires: %{name} = %{version}-%{release} -Requires: libreport-plugin-bugzilla = %{version}-%{release} - -%description rhel-anaconda-bugzilla -Default configuration for reporting Anaconda problems to Red Hat Bugzilla used -to easily configure the reporting process for Red Hat systems. Just install this -package and you're done. -%endif - %if %{with bugzilla} %package anaconda -Summary: Default configuration for reporting anaconda bugs -Requires: %{name} = %{version}-%{release} -Requires: libreport-plugin-reportuploader = %{version}-%{release} -%if 0%{?rhel} -Requires: libreport-plugin-rhtsupport = %{version}-%{release} -%else -Requires: libreport-plugin-bugzilla = %{version}-%{release} +Summary: Default configuration for reporting anaconda bugs +Requires: %{name} = %{version}-%{release} +Requires: libreport-plugin-reportuploader = %{version}-%{release} +%if ! 0%{?rhel} || 0%{?eln} +Requires: libreport-plugin-bugzilla = %{version}-%{release} %endif %description anaconda @@ -285,29 +236,25 @@ data over ftp/scp... %endif %prep -%autosetup -S git +%autosetup -p1 %build -autoconf +./autogen.sh %configure \ %if %{without bugzilla} --without-bugzilla \ -%endif -%if 0%{?rhel} - --enable-import-rhtsupport-cert \ %endif --enable-doxygen-docs \ - --disable-silent-rules \ - --without-gtk + --disable-silent-rules %make_build %install %make_install \ %if %{with python3} - PYTHON=python3 \ -%endif # with python3 + PYTHON=%{__python3} \ +%endif mandir=%{_mandir} %find_lang %{name} @@ -331,8 +278,6 @@ mkdir -p %{buildroot}/%{_datadir}/%{name}/workflows/ # After everything is installed, remove info dir rm -f %{buildroot}/%{_infodir}/dir -# Remove unwanted Fedora specific workflow configuration files -%if 0%{!?fedora:1} rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_FedoraCCpp.xml rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_FedoraKerneloops.xml rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_FedoraPython.xml @@ -345,10 +290,8 @@ rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_FedoraJavaScript.xml rm -f %{buildroot}/%{_sysconfdir}/libreport/workflows.d/report_fedora.conf rm -f %{buildroot}%{_mandir}/man5/report_fedora.conf.5 rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_AnacondaFedora.xml -%endif # Remove unwanted RHEL specific workflow configuration files -%if 0%{!?rhel:1} rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_RHELCCpp.xml rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_RHELKerneloops.xml rm -f %{buildroot}/%{_datadir}/libreport/workflows/workflow_RHELPython.xml @@ -383,34 +326,50 @@ rm -f %{buildroot}/%{_sysconfdir}/libreport/workflows.d/report_rhel_bugzilla.con rm -f %{buildroot}%{_mandir}/man5/report_rhel.conf.5 rm -f %{buildroot}%{_mandir}/man5/report_uReport.conf.5 rm -f %{buildroot}%{_mandir}/man5/report_rhel_bugzilla.conf.5 -%endif - -rm -f %{buildroot}%{_mandir}/man1/report-gtk.1 -rm -f %{buildroot}%{_mandir}/man5/forbidden_words.conf.5 -rm -f %{buildroot}%{_mandir}/man5/ignored_words.conf.5 %check -make check -check_result=$? -if [[ $check_result -ne 0 ]]; then +make check|| { # find and print the logs of failed test # do not cat tests/testsuite.log because it contains a lot of bloat find tests/testsuite.dir -name "testsuite.log" -print -exec cat '{}' \; -fi -[[ $check_result -eq 0 ]] + exit 1 +} %ldconfig_scriptlets %ldconfig_scriptlets web +%if 0%{?rhel} && 0%{?rhel} <= 7 +%post gtk +%{?ldconfig} +# update icon cache +touch --no-create %{_datadir}/icons/hicolor &>/dev/null || : + +%postun gtk +%{?ldconfig} +if [ $1 -eq 0 ] ; then + touch --no-create %{_datadir}/icons/hicolor &>/dev/null + gtk-update-icon-cache %{_datadir}/icons/hicolor &>/dev/null || : +fi + +%posttrans gtk +gtk-update-icon-cache %{_datadir}/icons/hicolor &>/dev/null || : + +%endif %files -f %{name}.lang %doc README.md %license COPYING %config(noreplace) %{_sysconfdir}/%{name}/libreport.conf %config(noreplace) %{_sysconfdir}/%{name}/report_event.conf +%config(noreplace) %{_sysconfdir}/%{name}/forbidden_words.conf +%config(noreplace) %{_sysconfdir}/%{name}/ignored_words.conf +%config(noreplace) %{_sysconfdir}/%{name}/ignored_elements.conf %{_datadir}/%{name}/conf.d/libreport.conf %{_libdir}/libreport.so.* %{_mandir}/man5/libreport.conf.5* %{_mandir}/man5/report_event.conf.5* +%{_mandir}/man5/forbidden_words.conf.5* +%{_mandir}/man5/ignored_words.conf.5* +%{_mandir}/man5/ignored_elements.conf.5* # filesystem package owns /usr/share/augeas/lenses directory %{_datadir}/augeas/lenses/libreport.aug @@ -441,6 +400,9 @@ fi %{_includedir}/libreport/file_obj.h %{_includedir}/libreport/config_item_info.h %{_includedir}/libreport/workflow.h +%{_includedir}/libreport/problem_details_widget.h +%{_includedir}/libreport/problem_details_dialog.h +%{_includedir}/libreport/problem_utils.h %{_includedir}/libreport/ureport.h %{_includedir}/libreport/reporters.h %{_includedir}/libreport/global_configuration.h @@ -472,6 +434,16 @@ fi %{_bindir}/report-newt %{_mandir}/man1/report-newt.1.gz +%files gtk +%{_bindir}/report-gtk +%{_libdir}/libreport-gtk.so.* +%{_mandir}/man1/report-gtk.1.gz + +%files gtk-devel +%{_libdir}/libreport-gtk.so +%{_includedir}/libreport/internal_libreport_gtk.h +%{_libdir}/pkgconfig/libreport-gtk.pc + %files plugin-kerneloops %{_datadir}/%{name}/events/report_Kerneloops.xml %{_mandir}/man*/reporter-kerneloops.* @@ -515,7 +487,7 @@ fi %{_mandir}/man1/reporter-ureport.1.gz %{_mandir}/man5/ureport.conf.5.gz %{_datadir}/%{name}/events/report_uReport.xml -%if 0%{?rhel} +%if 0%{?rhel} && ! 0%{?eln} %config(noreplace) %{_sysconfdir}/libreport/workflows.d/report_uReport.conf %{_datadir}/%{name}/workflows/workflow_uReport.xml %{_mandir}/man5/report_uReport.conf.5.* @@ -545,6 +517,8 @@ fi %{_mandir}/man5/bugzilla_formatdup_analyzer_libreport.conf.5.* %{_mandir}/man5/bugzilla_format_kernel.conf.5.* %{_bindir}/reporter-bugzilla +%{_bindir}/reporter-bugzilla-python + %endif %files plugin-mantisbt @@ -581,26 +555,6 @@ fi %config(noreplace) %{_sysconfdir}/libreport/events.d/centos_report_event.conf %{_mandir}/man5/centos_report_event.conf.5.gz -%files plugin-rhtsupport -%config(noreplace) %{_sysconfdir}/libreport/plugins/rhtsupport.conf -%{_datadir}/%{name}/conf.d/plugins/rhtsupport.conf -%{_datadir}/%{name}/events/report_RHTSupport.xml -%{_datadir}/%{name}/events/report_RHTSupport_AddData.xml -%if 0%{?rhel} -%attr(600,root,root)%{_sysconfdir}/%{name}/cert-api.access.redhat.com.pem -%endif -%config(noreplace) %{_sysconfdir}/libreport/events.d/rhtsupport_event.conf -%{_mandir}/man1/reporter-rhtsupport.1.gz -%{_mandir}/man5/rhtsupport.conf.5.* -%{_mandir}/man5/rhtsupport_event.conf.5.* -%{_bindir}/reporter-rhtsupport - -%if %{with bugzilla} -%files compat -%{_bindir}/report -%{_mandir}/man1/report.1.gz -%endif - %files plugin-reportuploader %{_mandir}/man*/reporter-upload.* %{_mandir}/man5/uploader_event.conf.5.* @@ -617,67 +571,10 @@ fi %config(noreplace) %{_sysconfdir}/libreport/events/report_Uploader.conf %{_mandir}/man5/report_Uploader.conf.5.* -%if 0%{?fedora} -%files fedora -%{_datadir}/%{name}/workflows/workflow_FedoraCCpp.xml -%{_datadir}/%{name}/workflows/workflow_FedoraKerneloops.xml -%{_datadir}/%{name}/workflows/workflow_FedoraPython.xml -%{_datadir}/%{name}/workflows/workflow_FedoraPython3.xml -%{_datadir}/%{name}/workflows/workflow_FedoraVmcore.xml -%{_datadir}/%{name}/workflows/workflow_FedoraXorg.xml -%{_datadir}/%{name}/workflows/workflow_FedoraLibreport.xml -%{_datadir}/%{name}/workflows/workflow_FedoraJava.xml -%{_datadir}/%{name}/workflows/workflow_FedoraJavaScript.xml -%config(noreplace) %{_sysconfdir}/libreport/workflows.d/report_fedora.conf -%{_mandir}/man5/report_fedora.conf.5.* -%endif - -%if 0%{?rhel} -%files rhel -%{_datadir}/%{name}/workflows/workflow_RHELCCpp.xml -%{_datadir}/%{name}/workflows/workflow_RHELKerneloops.xml -%{_datadir}/%{name}/workflows/workflow_RHELPython.xml -%{_datadir}/%{name}/workflows/workflow_RHELvmcore.xml -%{_datadir}/%{name}/workflows/workflow_RHELxorg.xml -%{_datadir}/%{name}/workflows/workflow_RHELLibreport.xml -%{_datadir}/%{name}/workflows/workflow_RHELJava.xml -%{_datadir}/%{name}/workflows/workflow_RHELJavaScript.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataCCpp.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataJava.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataKerneloops.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataLibreport.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataPython.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDatavmcore.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataxorg.xml -%{_datadir}/%{name}/workflows/workflow_RHELAddDataJavaScript.xml -%config(noreplace) %{_sysconfdir}/libreport/workflows.d/report_rhel.conf -%config(noreplace) %{_sysconfdir}/libreport/workflows.d/report_rhel_add_data.conf -%{_mandir}/man5/report_rhel.conf.5.* - -%files rhel-bugzilla -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaCCpp.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaKerneloops.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaPython.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaVmcore.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaXorg.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaLibreport.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaJava.xml -%{_datadir}/%{name}/workflows/workflow_RHELBugzillaJavaScript.xml -%config(noreplace) %{_sysconfdir}/libreport/workflows.d/report_rhel_bugzilla.conf -%{_mandir}/man5/report_rhel_bugzilla.conf.5.* - -%files rhel-anaconda-bugzilla -%{_datadir}/%{name}/workflows/workflow_AnacondaRHELBugzilla.xml -%endif %if %{with bugzilla} + %files anaconda -%if 0%{?fedora} -%{_datadir}/%{name}/workflows/workflow_AnacondaFedora.xml -%endif -%if 0%{?rhel} -%{_datadir}/%{name}/workflows/workflow_AnacondaRHEL.xml -%endif %{_datadir}/%{name}/workflows/workflow_AnacondaUpload.xml %config(noreplace) %{_sysconfdir}/libreport/workflows.d/anaconda_event.conf %config(noreplace) %{_sysconfdir}/libreport/events.d/bugzilla_anaconda_event.conf @@ -690,6 +587,10 @@ fi %endif %changelog +* Thu Nov 28 2024 Sumit Jena - 2.17.15-1 +- Update to version 2.17.15 +- License verified. + * Tue Dec 20 2022 Muhammad Falak - 2.13.1-9 - License verified diff --git a/SPECS-EXTENDED/llhttp/llhttp.signatures.json b/SPECS-EXTENDED/llhttp/llhttp.signatures.json new file mode 100644 index 00000000000..08472879443 --- /dev/null +++ b/SPECS-EXTENDED/llhttp/llhttp.signatures.json @@ -0,0 +1,6 @@ +{ + "Signatures": { + "llhttp-9.3.0.tar.gz": "d6612eefc06a32c778b1d3044cb07aac49eed8f14db689ded91c609b94d51502", + "llhttp-release-v9.3.0.tar.gz": "1a2b45cb8dda7082b307d336607023aa65549d6f060da1d246b1313da22b685a" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/llhttp/llhttp.spec b/SPECS-EXTENDED/llhttp/llhttp.spec new file mode 100644 index 00000000000..046643c05b5 --- /dev/null +++ b/SPECS-EXTENDED/llhttp/llhttp.spec @@ -0,0 +1,250 @@ +# This package is rather exotic. The compiled library is a typical shared +# library with a C API. However, it has only a tiny bit of C source code. Most +# of the library is written in TypeScript, which is transpiled to C, via LLVM +# IR, using llparse (https://github.com/nodejs/llparse)—all of which happens +# within the NodeJS ecosystem. +# +# Historically, this package “built like” a NodeJS package, with a +# dev-dependency bundle from NPM that we used to transpile the original +# TypeScript sources to C downstream. Since 9.3.0, it is no longer practical to +# re-generate the C sources from Typescript without using pre-compiled esbuild +# executables from NPM, so we use the upstream “release” tarball with +# pre-generated C source and header files included. +# +# That allows this package to be built without running the NodeJS/Typescript +# machinery in the build (via a large “dev” dependency bundle. However, this +# release archive lacks the original TypeScript source code for the generated C +# code, so we need to include this in an additional source. For details, see: +# https://docs.fedoraproject.org/en-US/packaging-guidelines/what-can-be-packaged/#pregenerated-code + +# This package is a dependency of libgit2 which in turn is one of rpmautospec. +# When upgrading to a version with a new soname, this package needs to provide +# both in order to bootstrap itself and libgit2. Set %%bootstrap and +# %%previous_so_version for this (and unset and rebuild later). +# +%bcond bootstrap 0 +%global so_version 9.3 +%global previous_so_version 9.2 + +Name: llhttp +Version: 9.3.0 +Release: 6%{?dist} +Summary: Port of http_parser to llparse +Vendor: Microsoft Corporation +Distribution: Azure Linux + +# SPDX +License: MIT +URL: https://github.com/nodejs/llhttp +Source0: %{url}/archive/refs/tags/release/v%{version}/llhttp-release-v%{version}.tar.gz +# Contains the original TypeScript sources, which we must include in the source +# RPM per packaging guidelines. +Source1: %{url}/archive/v%{version}/llhttp-%{version}.tar.gz + +# For compiling the C library +BuildRequires: cmake +BuildRequires: gcc +# There is no C++ involved, but CMake searches for a C++ compiler. +BuildRequires: gcc-c++ + +%if %{with bootstrap} +%if "%{_lib}" == "lib64" +BuildRequires: libllhttp.so.%{previous_so_version}()(64bit) +%else +BuildRequires: libllhttp.so.%{previous_so_version} +%endif +%endif + +%description +This project is a port of http_parser to TypeScript. llparse is used to +generate the output C source file, which could be compiled and linked with the +embedder's program (like Node.js). + +%package devel +Summary: Development files for llhttp +Requires: llhttp%{?_isa} = %{?epoch:%{epoch}:}%{version}-%{release} + +%description devel +The llhttp-devel package contains libraries and header files for +developing applications that use llhttp. + +%prep +%autosetup -n llhttp-release-v%{version} + +%conf +%cmake + +%build +%cmake_build + +%install +%cmake_install + +%if %{with bootstrap} +cp -vp %{_libdir}/libllhttp.so.%{previous_so_version}{,.*} \ + %{buildroot}%{_libdir} +%endif + +# The same obstacles that prevent us from re-generating the C sources from +# TypeScript also prevent us from running the tests, which rely on NodeJS. + +%files +# Files LICENSE and LICENSE-MIT are duplicates. +%license LICENSE +%doc README.md +%{_libdir}/libllhttp.so.%{so_version}{,.*} +%if %{with bootstrap} +%{_libdir}/libllhttp.so.%{previous_so_version}{,.*} +%endif + +%files devel +%{_includedir}/llhttp.h +%{_libdir}/libllhttp.so +%{_libdir}/pkgconfig/libllhttp.pc +%{_libdir}/cmake/llhttp/ + +%changelog +* Tue Dec 23 2025 Aditya Singh - 9.3.0-6 +- Initial Azure Linux import from Fedora 44 (license: MIT). +- License verified. + +* Thu Jul 24 2025 Fedora Release Engineering - 9.3.0-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_43_Mass_Rebuild + +* Mon May 19 2025 Benjamin A. Beasley - 9.3.0-4 +- Non-bootstrap build + +* Mon May 19 2025 Benjamin A. Beasley - 9.3.0-3 +- Minor style tweaks to bootstrapping machinery + +* Mon May 19 2025 Nils Philippsen - 9.3.0-2 +- Make package bootstrappable for rpmautospec + +* Sun May 04 2025 Benjamin A. Beasley - 9.3.0-1 +- Update to 9.3.0 (close RHBZ#2363919) + +* Tue Apr 29 2025 Benjamin A. Beasley - 9.2.1-6 +- Correct a term in the SourceLicense + +* Fri Jan 17 2025 Fedora Release Engineering - 9.2.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_42_Mass_Rebuild + +* Mon Dec 16 2024 Benjamin A. Beasley - 9.2.1-4 +- Omit build-time license auditing on i686 +- This keeps llhttp from blocking licensecheck’s dependencies or askalono- + cli from dropping i686 support. + +* Fri Dec 13 2024 Benjamin A. Beasley - 9.2.1-3 +- Add a SourceLicense field +- Re-generate the dev-dependencies bundle + +* Thu Jul 18 2024 Fedora Release Engineering - 9.2.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild + +* Thu Apr 04 2024 Benjamin A. Beasley - 9.2.1-1 +- Update to 9.2.1 (close RHBZ#2273352, fix CVE-2024-27982) +- Switch from xz to zstd compression for the “dev” bundle archive + +* Thu Mar 21 2024 Benjamin A. Beasley - 9.2.0-4 +- Format check-null-licenses with “ruff format” + +* Wed Feb 14 2024 Benjamin A. Beasley - 9.2.0-1 +- Update to 9.2.0 (close RHBZ#2263250) + +* Wed Feb 14 2024 Benjamin A. Beasley - 9.1.3-6 +- Compress the dev dependency bundle with xz instead of gzip + +* Sun Feb 11 2024 Yaakov Selkowitz - 9.1.3-5 +- Avoid licensecheck dependency in RHEL builds + +* Thu Feb 08 2024 Benjamin A. Beasley - 9.1.3-4 +- Better audit (and document auditing of) dev dependency licenses + +* Thu Jan 25 2024 Fedora Release Engineering - 9.1.3-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Sun Jan 21 2024 Fedora Release Engineering - 9.1.3-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Thu Oct 05 2023 Benjamin A. Beasley - 9.1.3-1 +- Update to 9.1.3 (close RHBZ#2242220) + +* Tue Oct 03 2023 Benjamin A. Beasley - 9.1.2-1 +- Update to 9.1.2 + +* Thu Sep 14 2023 Benjamin A. Beasley - 9.1.1-1 +- Update to 9.1.1 + +* Thu Sep 14 2023 Benjamin A. Beasley - 9.1.0-1 +- Update to 9.1.0 + +* Mon Aug 21 2023 Benjamin A. Beasley - 9.0.1-1 +- Update to 9.0.1 (close RHBZ#2228290) + +* Tue Aug 01 2023 Benjamin A. Beasley - 9.0.0-1 +- Update to 9.0.0 + +* Sat Jul 29 2023 Benjamin A. Beasley - 8.1.1-1 +- Update to 8.1.1 (close RHBZ#2216591) + +* Thu Jul 20 2023 Fedora Release Engineering - 8.1.0-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild + +* Sat Jun 03 2023 Benjamin A. Beasley - 8.1.0-5 +- Remove explicit %%set_build_flags, not needed since F36 + +* Wed Feb 15 2023 Benjamin A. Beasley - 8.1.0-4 +- Fix test compiling/execution + +* Thu Jan 19 2023 Fedora Release Engineering - 8.1.0-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild + +* Tue Dec 20 2022 Benjamin A. Beasley - 8.1.0-2 +- Indicate dirs. in files list with trailing slashes + +* Sat Oct 15 2022 Benjamin A. Beasley - 8.1.0-1 +- Update to 8.1.0 (close RHBZ#2131175) + +* Sat Oct 15 2022 Benjamin A. Beasley - 8.0.0-1 +- Update to 8.0.0 (close RHBZ#2131175) + +* Sat Oct 15 2022 Benjamin A. Beasley - 6.0.10-2 +- Drop workarounds for Python 3.10 and older + +* Thu Sep 29 2022 Stephen Gallagher - 6.0.10-1 +- Update to v6.0.10 + +* Thu Aug 25 2022 Miro Hrončok - 6.0.9-2 +- Use tomllib/python-tomli instead of dead upstream python-toml + +* Thu Aug 11 2022 Benjamin A. Beasley - 6.0.9-1 +- Update to 6.0.9 (close RHBZ#2116231) +- Bumped .so version from downstream 0.1 to upstream 6.0 +- Better upstream support for building and installing a shared library +- The -devel package now contains a .pc file +- Tests are now built with gcc and fully respect distro flags + +* Thu Jul 21 2022 Fedora Release Engineering - 6.0.6-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild + +* Wed Apr 20 2022 Benjamin A. Beasley - 6.0.6-7 +- Drop “forge” macros, which aren’t really doing much here + +* Thu Jan 20 2022 Fedora Release Engineering - 6.0.6-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild + +* Fri Dec 24 2021 Benjamin A. Beasley - 6.0.6-5 +- Add a note about LLHTTP_STRICT_MODE to the package description + +* Fri Dec 24 2021 Benjamin A. Beasley - 6.0.6-4 +- Revert "Build with LLHTTP_STRICT_MODE enabled" + +* Wed Dec 22 2021 Benjamin A. Beasley - 6.0.6-3 +- Build with LLHTTP_STRICT_MODE enabled + +* Tue Dec 14 2021 Benjamin A. Beasley - 6.0.6-2 +- Dep. on cmake-filesystem is now auto-generated + +* Mon Dec 06 2021 Benjamin A. Beasley - 6.0.6-1 +- Initial package (close RHBZ#2029461) +## END: Generated by rpmautospec diff --git a/SPECS-EXTENDED/mod_auth_openidc/mod_auth_openidc.spec b/SPECS-EXTENDED/mod_auth_openidc/mod_auth_openidc.spec index 6a9007e296d..7d9509d0b65 100644 --- a/SPECS-EXTENDED/mod_auth_openidc/mod_auth_openidc.spec +++ b/SPECS-EXTENDED/mod_auth_openidc/mod_auth_openidc.spec @@ -13,7 +13,7 @@ Summary: OpenID Connect auth module for Apache HTTP Server Name: mod_auth_openidc Version: 2.4.14.2 -Release: 1%{?dist} +Release: 2%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -30,7 +30,7 @@ BuildRequires: httpd-devel BuildRequires: jansson-devel BuildRequires: jq-devel BuildRequires: openssl-devel -BuildRequires: pcre-devel +BuildRequires: pcre2-devel Requires: httpd-mmn %description @@ -87,6 +87,9 @@ install -m 700 -d %{buildroot}%{httpd_pkg_cache_dir}/cache %dir %attr(0700, apache, apache) %{httpd_pkg_cache_dir}/cache %changelog +* Thu Jan 08 2026 Aditya Singh - 2.4.14.2-2 +- Bump to rebuild with updated pcre2. + * Tue Sep 05 2023 Archana Choudhary - 2.4.14.2-1 - Upgrade to 2.4.14.2 - CVE-2021-20718, CVE-2021-39191, CVE-2022-23527, CVE-2023-28625 - Add DESTDIR to resolve mod_auth_openidc.so filepath diff --git a/SPECS-EXTENDED/osgi-compendium/osgi-compendium.spec b/SPECS-EXTENDED/osgi-compendium/osgi-compendium.spec index 1307a415f7c..62df804437f 100644 --- a/SPECS-EXTENDED/osgi-compendium/osgi-compendium.spec +++ b/SPECS-EXTENDED/osgi-compendium/osgi-compendium.spec @@ -20,7 +20,7 @@ Distribution: Azure Linux Name: osgi-compendium Version: 7.0.0 -Release: 2%{?dist} +Release: 3%{?dist} Summary: Interfaces and Classes for use in compiling OSGi bundles License: Apache-2.0 Group: Development/Libraries/Java @@ -108,10 +108,14 @@ install -pm 0644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}/osgi.cmpn.pom # javadoc install -dm 0755 %{buildroot}%{_javadocdir}/%{name} cp -pr target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/ +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . + %fdupes -s %{buildroot}%{_javadocdir} %files -f .mfiles %license LICENSE +%license ADDITIONAL_LICENSE_INFO %doc about.html %files javadoc @@ -119,6 +123,10 @@ cp -pr target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/ %{_javadocdir}/%{name} %changelog +* Wed Dec 24 2025 Durga Jagadeesh Palli - 7.0.0-3 +- address license warning. +- License verified + * Thu Oct 14 2021 Pawel Winogrodzki - 7.0.0-2 - Converting the 'Release' tag to the '[number].[distribution]' format. diff --git a/SPECS-EXTENDED/osgi-core/osgi-core-build.xml b/SPECS-EXTENDED/osgi-core/osgi-core-build.xml index eb4671f5416..cce1cd290e0 100644 --- a/SPECS-EXTENDED/osgi-core/osgi-core-build.xml +++ b/SPECS-EXTENDED/osgi-core/osgi-core-build.xml @@ -6,13 +6,13 @@ - + - - + + @@ -110,12 +110,12 @@ - - + + - + diff --git a/SPECS-EXTENDED/osgi-core/osgi-core.signatures.json b/SPECS-EXTENDED/osgi-core/osgi-core.signatures.json index 604efa9f425..3a42ec49814 100644 --- a/SPECS-EXTENDED/osgi-core/osgi-core.signatures.json +++ b/SPECS-EXTENDED/osgi-core/osgi-core.signatures.json @@ -1,8 +1,8 @@ { "Signatures": { "LICENSE-2.0": "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30", - "osgi-core-build.xml": "d9499d11320d38261befe31bbdbfbd4552762fcb6ea8d9ee24af3f39db938d41", - "osgi.core-7.0.0-sources.jar": "e25ca4d936521cd1cd67f655ec01f57ce5e0316be9edc99f8a2f985580b10f9c", - "osgi.core-7.0.0.pom": "d51d5fd23e1e6e0d3babdbd82a2b922e3a7ddbe19dadc0e00504affb9090098e" + "osgi-core-build.xml": "476eb29bb6b1fb401bc4da55d5725c07678a377e843c8d4820287a91da84bdac", + "osgi.core-8.0.0-sources.jar": "df92374f65300db3b22927ef5d0cb654db2ba0cfba459c45917bf8735920b280", + "osgi.core-8.0.0.pom": "651f76ba29bb4392d36aea56f7d0b0c82fc4917024d9e6a843774bba5015eb53" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/osgi-core/osgi-core.spec b/SPECS-EXTENDED/osgi-core/osgi-core.spec index 59faea118cd..f1d9df81702 100644 --- a/SPECS-EXTENDED/osgi-core/osgi-core.spec +++ b/SPECS-EXTENDED/osgi-core/osgi-core.spec @@ -3,7 +3,7 @@ Distribution: Azure Linux # # spec file for package osgi-core # -# Copyright (c) 2020 SUSE LLC +# Copyright (c) 2024 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -19,8 +19,8 @@ Distribution: Azure Linux Name: osgi-core -Version: 7.0.0 -Release: 2%{?dist} +Version: 8.0.0 +Release: 1%{?dist} Summary: OSGi Core API License: Apache-2.0 Group: Development/Libraries/Java @@ -31,13 +31,13 @@ Source2: http://www.apache.org/licenses/LICENSE-2.0 Source3: %{name}-build.xml BuildRequires: ant BuildRequires: fdupes -BuildRequires: javapackages-local-bootstrap +BuildRequires: javapackages-local-bootstrap >= 6 BuildRequires: osgi-annotation BuildRequires: unzip BuildArch: noarch %description -OSGi Core Release 7, Interfaces and Classes for use in compiling bundles. +OSGi Core Release 8, Interfaces and Classes for use in compiling bundles. %package javadoc Summary: API documentation for %{name} @@ -94,16 +94,24 @@ install -pm 0644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}/osgi.core.pom # javadoc install -dm 0755 %{buildroot}%{_javadocdir}/%{name} cp -pr target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name}/ +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . + %fdupes -s %{buildroot}%{_javadocdir} %files -f .mfiles %license LICENSE +%license ADDITIONAL_LICENSE_INFO %files javadoc %license LICENSE %{_javadocdir}/%{name} %changelog +* Wed Dec 17 2025 Durga Jagadeesh Palli - 8.0.0-1 +- Upgrade to 8.0.0 (Reference: openSUSE Tumbleweed) +- License verified + * Thu Oct 14 2021 Pawel Winogrodzki - 7.0.0-2 - Converting the 'Release' tag to the '[number].[distribution]' format. diff --git a/SPECS-EXTENDED/osgi-core/osgi.core-7.0.0.pom b/SPECS-EXTENDED/osgi-core/osgi.core-8.0.0.pom similarity index 75% rename from SPECS-EXTENDED/osgi-core/osgi.core-7.0.0.pom rename to SPECS-EXTENDED/osgi-core/osgi.core-8.0.0.pom index ee61e137004..c15b493c331 100644 --- a/SPECS-EXTENDED/osgi-core/osgi.core-7.0.0.pom +++ b/SPECS-EXTENDED/osgi-core/osgi.core-8.0.0.pom @@ -3,8 +3,8 @@ 4.0.0 org.osgi osgi.core - 7.0.0 - OSGi Core Release 7, Interfaces and Classes for use in compiling bundles + 8.0.0 + OSGi Core Release 8, Interfaces and Classes for use in compiling bundles org.osgi:osgi.core https://www.osgi.org/ @@ -14,7 +14,7 @@ Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 repo Apache License, Version 2.0 @@ -23,7 +23,7 @@ https://osgi.org/gitweb/build.git scm:git:https://osgi.org/git/build.git scm:git:https://osgi.org/git/build.git - hudson-build.core-1432 + r8-core-final @@ -34,4 +34,12 @@ https://www.osgi.org/ + + + org.osgi + osgi.annotation + 7.0.0 + provided + + diff --git a/SPECS-EXTENDED/pacemaker/pacemaker.signatures.json b/SPECS-EXTENDED/pacemaker/pacemaker.signatures.json index 4655a8b80a3..2d6c6f2e47e 100644 --- a/SPECS-EXTENDED/pacemaker/pacemaker.signatures.json +++ b/SPECS-EXTENDED/pacemaker/pacemaker.signatures.json @@ -1,5 +1,6 @@ { "Signatures": { - "pacemaker-2.1.5.tar.gz": "7654b78f08d4994b9fd31b8592addbd7f4b173e2cbf30336722419f1ce45e185" + "pacemaker-3.0.1.tar.gz": "17a823c52d5448de817162f334b038598e22ea3e24e63a8819f978fac2b252e4", + "pacemaker.sysusers": "b599209cf97e03f192393c16ca8a93ffd8ddcb1ea9db73a60d6480fa7d89800b" } } diff --git a/SPECS-EXTENDED/pacemaker/pacemaker.spec b/SPECS-EXTENDED/pacemaker/pacemaker.spec index a13e7282f1c..3f5780788dd 100644 --- a/SPECS-EXTENDED/pacemaker/pacemaker.spec +++ b/SPECS-EXTENDED/pacemaker/pacemaker.spec @@ -1,3 +1,12 @@ +# +# Copyright 2008-2023 the Pacemaker project contributors +# +# The version control history for this file may have further details. +# +# This source code is licensed under the GNU General Public License version 2 +# or later (GPLv2+) WITHOUT ANY WARRANTY. +# + # User-configurable globals and defines to control package behavior # (these should not test {with X} values, which are declared later) @@ -8,15 +17,25 @@ ## Where to install Pacemaker documentation %global pcmk_docdir %{_docdir}/%{name} -## Where bug reports should be submitted -## Leave bug_url undefined to use ClusterLabs default, others define it here +## GitHub entity that distributes source (for ease of using a fork) +%global github_owner ClusterLabs + ## What to use as the OCF resource agent root directory %global ocf_root %{_prefix}/lib/ocf -## Add option to enable support for stonith/external fencing agents -%bcond_with stonithd +## Since git v2.11, the extent of abbreviation is autoscaled by default +## (used to be constant of 7), so we need to convey it for non-tags, too. +%global commit_abbrev 9 + +# Define conditionals so that "rpmbuild --with " and +# "rpmbuild --without " can enable and disable specific features + + +## Add option for Linux-HA (stonith/external) fencing agent support +%bcond_with linuxha + ## Add option for whether to support storing sensitive information outside CIB %bcond_without cibsecrets @@ -38,16 +57,16 @@ ## after upgrading to versions that support synchronization. %bcond_without sbd_sync -## NOTE: skip --with upstart_job ## Add option to turn off hardening of libraries and daemon executables -%bcond_with hardening - -## Add option to enable (or disable) links for legacy daemon names -%bcond_with legacy_links +%bcond_without hardening # Define globals for convenient use later +## Portion of export/dist tarball name after "pacemaker-", and release version +%define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) +%define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz + ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} @@ -59,101 +78,143 @@ %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gettext gettext-devel -%global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs -%global hacluster_id 189 ## Distro-specific configuration choices -### Use 2.0-style output when other distro packages don't support current output -%global compat20 --enable-compat-2.0 - -### Default concurrent-fencing to true when distro prefers that -%global concurrent_fencing --with-concurrent-fencing-default=true - ### Default resource-stickiness to 1 when distro prefers that %global resource_stickiness --with-resource-stickiness-default=1 + +# Python-related definitions + +## Turn off auto-compilation of Python files outside Python specific paths, +## so there's no risk that unexpected "__python" macro gets picked to do the +## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) +## -- distro-dependent tricks or automake's fallback to be applied there +%if %{defined _python_bytecompile_extra} +%global _python_bytecompile_extra 0 +%else +### the statement effectively means no RPM-native byte-compiling will occur at +### all, so distro-dependent tricks for Python-specific packages to be applied +%global __os_install_post %(echo '%{__os_install_post}' | { + sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) +%endif + ## Prefer Python 3 definitions explicitly, in case 2 is also available +%if %{defined __python3} %global python_name python3 %global python_path %{__python3} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} +%else +%if %{defined python_version} +%global python_name python%(echo %{python_version} | cut -d'.' -f1) +%define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}} +%else +%global python_name python +%global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} +%endif +%define python_site %{?python_sitelib}%{!?python_sitelib:%( + %{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} +%endif # Keep sane profiling data if requested %if %{with profiling} + ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} + %endif -Summary: Scalable High-Availability cluster resource manager + Name: pacemaker -Version: 2.1.5 -Release: 5%{?dist} -License: GPLv2+ and LGPLv2+ +Summary: Scalable High-Availability cluster resource manager +Version: 3.0.1 +Release: 1%{?dist} +Vendor: Microsoft Corporation +Distribution: Azure Linux +License: GPL-2.0-or-later AND LGPL-2.1-or-later Url: https://www.clusterlabs.org/ -Source0: https://github.com/ClusterLabs/pacemaker/archive/refs/tags/Pacemaker-2.1.5.tar.gz#/%{name}-%{version}.tar.gz -Requires: resource-agents -Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} -Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} -Requires: %{name}-cli = %{version}-%{release} + +Source0: https://github.com/ClusterLabs/pacemaker/archive/refs/tags/Pacemaker-%{version}.tar.gz#/%{name}-%{version}.tar.gz +Source1: pacemaker.sysusers + +Requires: resource-agents +Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} +Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} %{?systemd_requires} -Requires: %{python_path} -BuildRequires: %{python_name}-devel + +Requires: %{python_path} +BuildRequires: %{python_name}-devel +BuildRequires: %{python_name}-setuptools + # Pacemaker requires a minimum libqb functionality -Requires: libqb >= 0.17.0 -#BuildRequires: libqb-devel >= 0.17.0 -BuildRequires: pkgconfig(libqb) >= 0.17.0 +Requires: libqb >= 1.0.1 +BuildRequires: libqb-devel >= 1.0.1 + # Required basic build tools -BuildRequires: autoconf -BuildRequires: automake -BuildRequires: coreutils -BuildRequires: findutils -BuildRequires: gcc -BuildRequires: grep -BuildRequires: libtool +BuildRequires: autoconf +BuildRequires: automake +BuildRequires: coreutils +BuildRequires: findutils +BuildRequires: gcc +BuildRequires: grep +BuildRequires: libtool %if %{defined pkgname_libtool_devel} -BuildRequires: %{?pkgname_libtool_devel} +BuildRequires: %{?pkgname_libtool_devel} %endif -BuildRequires: make -BuildRequires: pkgconfig -BuildRequires: sed +BuildRequires: make +BuildRequires: pkgconfig >= 0.28 +BuildRequires: sed + # Required for core functionality -BuildRequires: pkgconfig(glib-2.0) >= 2.42 -BuildRequires: libxml2-devel -BuildRequires: libxslt-devel -BuildRequires: libuuid-devel -BuildRequires: %{pkgname_bzip2_devel} +BuildRequires: pkgconfig(glib-2.0) >= 2.42 +BuildRequires: pkgconfig(gnutls) >= 3.1.7 +BuildRequires: pkgconfig(libxml-2.0) >= 2.9.2 +BuildRequires: systemd-devel +BuildRequires: libxslt-devel +BuildRequires: pkgconfig(uuid) +BuildRequires: %{pkgname_bzip2_devel} + # Enables optional functionality -BuildRequires: pkgconfig(dbus-1) -BuildRequires: %{pkgname_docbook_xsl} -BuildRequires: %{pkgname_gnutls_devel} -BuildRequires: help2man -BuildRequires: ncurses-devel -BuildRequires: pam-devel -BuildRequires: %{pkgname_gettext} >= 0.18 +BuildRequires: pkgconfig(dbus-1) >= 1.5.12 +BuildRequires: %{pkgname_docbook_xsl} +BuildRequires: help2man +BuildRequires: ncurses-devel +BuildRequires: pam-devel +BuildRequires: %{pkgname_gettext} >= 0.18 + # Required for "make check" -%if 0%{?with_check} -BuildRequires: libcmocka-devel -%endif -BuildRequires: corosync-devel >= 2.0.0 -%if %{with stonithd} -BuildRequires: %{pkgname_glue_libs}-devel +BuildRequires: libcmocka-devel >= 1.1.0 +BuildRequires: %{python_name}-psutil + +# RH patches are created by git, so we need git to apply them +BuildRequires: git + +Requires: corosync >= 2.0.0 +BuildRequires: corosync-devel >= 2.0.0 + +%if %{with linuxha} +BuildRequires: %{pkgname_glue_libs}-devel %endif + %if %{with doc} -BuildRequires: asciidoc -BuildRequires: inkscape -BuildRequires: %{python_name}-sphinx +BuildRequires: %{python_name}-sphinx %endif -BuildRequires: systemd -Requires: corosync >= 2.0.0 + +# Creation of Users / Groups +BuildRequires: systemd-rpm-macros + # Booth requires this -Provides: pacemaker-ticket-support = 2.0 -Provides: pcmk-cluster-manager = %{version}-%{release} -Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} +Provides: pacemaker-ticket-support = 2.0 + +Provides: pcmk-cluster-manager = %{version}-%{release} +Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description Pacemaker is an advanced, scalable High-Availability cluster resource @@ -167,21 +228,21 @@ when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : cibsecrets hardening nls pre_release profiling stonithd + --with(out) : cibsecrets hardening linuxha nls profiling %package cli -License: GPLv2+ and LGPLv2+ -Summary: Command line tools for controlling Pacemaker clusters -Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} -Recommends: pcmk-cluster-manager = %{version}-%{release} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Command line tools for controlling Pacemaker clusters +Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} +Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report -Recommends: tar -Recommends: bzip2 -Requires: perl-TimeDate -Requires: %{pkgname_procps} -Requires: psmisc -Requires: %{python_name}-psutil -Requires(post): coreutils +Recommends: tar +Recommends: bzip2 +Requires: perl-TimeDate +Requires: %{pkgname_procps} +Requires: psmisc +Requires: %{python_name}-psutil +Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource @@ -192,16 +253,17 @@ to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} -License: GPLv2+ and LGPLv2+ -Summary: Core Pacemaker libraries -Requires(pre): %{pkgname_shadow_utils} -Requires: %{name}-schemas = %{version}-%{release} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Core Pacemaker libraries +Requires(pre): %{pkgname_shadow_utils} +Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t # sbd 1.4.2+ supports startup/shutdown handshake via pacemakerd-api # and handshake defaults to enabled for rhel builds # sbd 1.5.0+ handshake defaults to enabled with upstream sbd-release # implicitly supports handshake defaults to enabled in this spec -Conflicts: sbd < 1.5.0 +Conflicts: sbd < 1.5.0 +Conflicts: pcs < 0.11 %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource @@ -211,9 +273,9 @@ The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs -License: GPLv2+ and LGPLv2+ -Summary: Cluster Libraries used by Pacemaker -Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Cluster Libraries used by Pacemaker +Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource @@ -222,16 +284,30 @@ manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. +%package -n %{python_name}-%{name} +License: LGPL-2.1-or-later +Summary: Python libraries for Pacemaker +Requires: %{python_path} +Requires: %{pkgname_pcmk_libs} = %{version}-%{release} +BuildArch: noarch + +%description -n %{python_name}-%{name} +Pacemaker is an advanced, scalable High-Availability cluster resource +manager. + +The %{python_name}-%{name} package contains a Python library that can be used +to interface with Pacemaker. + %package remote -License: GPLv2+ and LGPLv2+ -Summary: Pacemaker remote executor daemon for non-cluster nodes -Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} -Requires: %{name}-cli = %{version}-%{release} -Requires: resource-agents -# -remote can be fully independent of systemd -%{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} -Provides: pcmk-cluster-manager = %{version}-%{release} -Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Pacemaker remote executor daemon for non-cluster nodes +Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} +Requires: %{python_name}-%{name} = %{version}-%{release} +Requires: resource-agents +%{?systemd_requires} +Provides: pcmk-cluster-manager = %{version}-%{release} +Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource @@ -242,20 +318,20 @@ which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel -License: GPLv2+ and LGPLv2+ -Summary: Pacemaker development package -Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} -Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} -Requires: %{pkgname_bzip2_devel}%{?_isa} -Requires: corosync-devel >= 2.0.0 -Requires: glib2-devel%{?_isa} -Requires: libqb-devel%{?_isa} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Pacemaker development package +Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} +Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} +Requires: %{pkgname_bzip2_devel}%{?_isa} +Requires: corosync-devel >= 2.0.0 +Requires: glib2-devel%{?_isa} +Requires: libqb-devel%{?_isa} >= 1.0.1 %if %{defined pkgname_libtool_devel_arch} -Requires: %{?pkgname_libtool_devel_arch} +Requires: %{?pkgname_libtool_devel_arch} %endif -Requires: libuuid-devel%{?_isa} -Requires: libxml2-devel%{?_isa} -Requires: libxslt-devel%{?_isa} +Requires: libuuid-devel%{?_isa} +Requires: libxml2-devel%{?_isa} >= 2.9.2 +Requires: libxslt-devel%{?_isa} %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource @@ -265,25 +341,31 @@ The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts -License: GPLv2+ and LGPLv2+ -Summary: Test framework for cluster-related technologies like Pacemaker -Requires: %{python_path} -Requires: %{pkgname_pcmk_libs} = %{version}-%{release} -Requires: %{name}-cli = %{version}-%{release} -Requires: %{pkgname_procps} -Requires: psmisc -BuildArch: noarch - +License: GPL-2.0-or-later AND LGPL-2.1-or-later +Summary: Test framework for cluster-related technologies like Pacemaker +Requires: %{python_path} +Requires: %{pkgname_pcmk_libs} = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} +Requires: %{pkgname_procps} +Requires: psmisc +BuildArch: noarch + +# systemd Python bindings are a separate package in some distros +%if %{defined systemd_requires} +%if %{defined fedora} || %{defined rhel} +Requires: %{python_name}-systemd +%endif +%endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc -License: CC-BY-SA-4.0 -Summary: Documentation for Pacemaker -BuildArch: noarch -Conflicts: %{name}-libs > %{version}-%{release} -Conflicts: %{name}-libs < %{version}-%{release} +License: CC-BY-SA-4.0 +Summary: Documentation for Pacemaker +BuildArch: noarch +Conflicts: %{name}-libs > %{version}-%{release} +Conflicts: %{name}-libs < %{version}-%{release} %description doc Documentation for Pacemaker. @@ -292,9 +374,9 @@ Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas -License: GPLv2+ -Summary: Schemas and upgrade stylesheets for Pacemaker -BuildArch: noarch +License: GPL-2.0-or-later +Summary: Schemas and upgrade stylesheets for Pacemaker +BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker @@ -303,10 +385,15 @@ Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep -%autosetup -p1 -n %{name}-Pacemaker-%{version} +%autosetup -n %{name}-Pacemaker-%{version} -S git_am -p 1 +# in f33 s390x complains but shouldn't hurt globally +# as configure.ac is checking for support +sed -i configure.ac -e "s/-Wall/-Wall -Wno-format-truncation/" %build -export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} + +export systemdsystemunitdir=%{?_unitdir} + %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will @@ -321,12 +408,9 @@ export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" ./autogen.sh -CFLAGS="%{optflags} -DQB_KILL_ATTRIBUTE_SECTION" - -%configure \ +%{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ - %{?with_legacy_links: --enable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_cibsecrets: --with-cibsecrets} \ %{?with_nls: --enable-nls} \ @@ -334,32 +418,56 @@ CFLAGS="%{optflags} -DQB_KILL_ATTRIBUTE_SECTION" %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ %{?bug_url: --with-bug-url=%{bug_url}} \ %{?ocf_root: --with-ocfdir=%{ocf_root}} \ - %{?concurrent_fencing} \ %{?resource_stickiness} \ - %{?compat20} \ --disable-static \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ - --with-nagios=true \ --with-version=%{version}-%{release} -%make_build +make %{_smp_mflags} V=1 + +pushd python +%py3_build +popd + +%check +make %{_smp_mflags} check +{ cts/cts-scheduler --run load-stopped-loop \ + && cts/cts-cli -V \ + && touch .CHECKED +} 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint +[ -f .CHECKED ] && rm -f -- .CHECKED %install -%make_install +# skip automake-native Python byte-compilation, since RPM-native one (possibly +# distro-confined to Python-specific directories, which is currently the only +# relevant place, anyway) assures proper intrinsic alignment with wider system +# (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) +make install \ + DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ + %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} + +pushd python +%py3_install +popd -mkdir -p %{buildroot}%{_localstatedir}/lib/rpm-state/%{name} +mkdir -p %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata +for file in $(find %{nagios_name}-%{nagios_hash}/metadata -type f); do + install -m 644 $file %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata +done + + +mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} # Don't package libtool archives -find %{buildroot} -type f -name "*.la" -delete -print +find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either rm -f %{buildroot}/%{_sbindir}/fence_legacy rm -f %{buildroot}/%{_mandir}/man8/fence_legacy.* -%{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} -%{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} +install -p -D -m 0644 %{SOURCE1} %{buildroot}%{_sysusersdir}/pacemaker.conf %post %systemd_post pacemaker.service @@ -410,7 +518,7 @@ fi if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated - { mv -fbS.rpmsave %{_}//pacemaker.log* %{_var}/log/pacemaker \ + { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi @@ -421,12 +529,6 @@ fi %postun cli %systemd_postun_with_restart crm_mon.service -%pre -n %{pkgname_pcmk_libs} -# @TODO Use sysusers.d: -# https://fedoraproject.org/wiki/Changes/Adopting_sysusers.d_format -getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} -getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} -exit 0 %ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs @@ -434,27 +536,23 @@ exit 0 %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker +%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %{_sbindir}/pacemakerd %{_unitdir}/pacemaker.service -%exclude %{_datadir}/pacemaker/nagios/plugins-metadata/* - -%exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted -%exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* -%{_sbindir}/crm_master %{_sbindir}/fence_watchdog +%doc %{_mandir}/man7/pacemaker-based.* %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* -%doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_watchdog.* %doc %{_mandir}/man8/pacemakerd.* @@ -462,16 +560,15 @@ exit 0 %license licenses/GPLv2 %license COPYING -%doc ChangeLog +%doc ChangeLog.md -%dir %attr (750, %{uname}, %{gname}) %{_sharedstatedir}/pacemaker/cib -%dir %attr (750, %{uname}, %{gname}) %{_sharedstatedir}/pacemaker/pengine +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine %{ocf_root}/resource.d/pacemaker/controld %{ocf_root}/resource.d/pacemaker/remote %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker -%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %{_unitdir}/crm_mon.service @@ -485,6 +582,7 @@ exit 0 %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount +%{_sbindir}/crm_master %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource @@ -505,37 +603,43 @@ exit 0 %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude %{ocf_root}/resource.d/pacemaker/controld -%exclude %{ocf_root}/resource.d/pacemaker/o2cb %exclude %{ocf_root}/resource.d/pacemaker/remote %dir %{ocf_root} %dir %{ocf_root}/resource.d %{ocf_root}/resource.d/pacemaker -%doc %{_mandir}/man7/* +%doc %{_mandir}/man7/*pacemaker* +%exclude %{_mandir}/man7/pacemaker-based.* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* -%exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* -%doc %{_mandir}/man8/* -%exclude %{_mandir}/man8/crm_master.* +%doc %{_mandir}/man8/crm*.8.gz +%doc %{_mandir}/man8/attrd_updater.* +%doc %{_mandir}/man8/cibadmin.* +%if %{with cibsecrets} + %doc %{_mandir}/man8/cibsecret.* +%endif %exclude %{_mandir}/man8/fence_watchdog.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* +%doc %{_mandir}/man8/iso8601.* +%doc %{_mandir}/man8/stonith_admin.* %license licenses/GPLv2 %license COPYING -%doc ChangeLog +%doc ChangeLog.md -%dir %attr (750, %{uname}, %{gname}) %{_sharedstatedir}/pacemaker -%dir %attr (750, %{uname}, %{gname}) %{_sharedstatedir}/pacemaker/blackbox -%dir %attr (750, %{uname}, %{gname}) %{_sharedstatedir}/pacemaker/cores +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles -%files -n %{pkgname_pcmk_libs} %{?with_nls:-f %{name}.lang} +%files -n %{pkgname_pcmk_libs} %{?with_nls:-f\\\\\\ %{name}.lang} +%{_sysusersdir}/pacemaker.conf %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* @@ -546,13 +650,21 @@ exit 0 %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %license COPYING -%doc ChangeLog +%doc ChangeLog.md %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %license COPYING -%doc ChangeLog +%doc ChangeLog.md + +%files -n %{python_name}-%{name} +%{python3_sitelib}/pacemaker/ +%{python3_sitelib}/pacemaker-*.egg-info +%exclude %{python3_sitelib}/pacemaker/_cts/ +%license licenses/LGPLv2.1 +%license COPYING +%doc ChangeLog.md %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker @@ -563,34 +675,46 @@ exit 0 %{_unitdir}/pacemaker_remote.service %{_sbindir}/pacemaker-remoted -%{_sbindir}/pacemaker_remoted %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %license COPYING -%doc ChangeLog +%doc ChangeLog.md %files doc %doc %{pcmk_docdir} +%exclude %{pcmk_docdir}/licenses/* +%exclude %{pcmk_docdir}/COPYING +%license COPYING +%license licenses/GPLv2 %license licenses/CC-BY-SA-4.0 +%license licenses/RevisedBSD +%license licenses/LGPLv2.1 %files cts -%{python_site}/cts +%{python3_sitelib}/pacemaker/_cts/ %{_datadir}/pacemaker/tests -%{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %license COPYING -%doc ChangeLog +%doc ChangeLog.md %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker -%{_libdir}/*.so -%{_libdir}/pkgconfig/*.pc +%{_libdir}/libcib.so +%{_libdir}/liblrmd.so +%{_libdir}/libcrmservice.so +%{_libdir}/libcrmcommon.so +%{_libdir}/libpe_status.so +%{_libdir}/libpe_rules.so +%{_libdir}/libpacemaker.so +%{_libdir}/libstonithd.so +%{_libdir}/libcrmcluster.so +%{_libdir}/pkgconfig/*pacemaker*.pc %license licenses/LGPLv2.1 %license COPYING -%doc ChangeLog +%doc ChangeLog.md %files schemas %license licenses/GPLv2 @@ -602,6 +726,10 @@ exit 0 %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog +* Mon Nov 10 2025 Jyoti kanase - 3.0.1-1 +- Upgrade to 3.0.1 +- License Verified. + * Tue Sep 19 2023 Jon Slobodzian - 2.1.5-5 - Fix build issue for systemd/systemd-bootstrap confusion diff --git a/SPECS-EXTENDED/pacemaker/pacemaker.sysusers b/SPECS-EXTENDED/pacemaker/pacemaker.sysusers new file mode 100644 index 00000000000..daadb965e2a --- /dev/null +++ b/SPECS-EXTENDED/pacemaker/pacemaker.sysusers @@ -0,0 +1,3 @@ +#Type Name ID GECOS Home directory Shell +g haclient 189 +u hacluster 189:haclient "cluster user" /var/lib/pacemaker /sbin/nologin diff --git a/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.signatures.json b/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.signatures.json index 5c7ddab055e..69f3e942533 100644 --- a/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.signatures.json +++ b/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "perl-Alien-pkgconf-0.20.tar.gz": "93ac0adcaccfa5ca3151bc9a4a3cc5e9d0591696b6ee059a3311f1e1cb49410d" + "Alien-pkgconf-0.21.tar.gz": "b9abab04605aab11089005b98e5ff202b93912cbde2ec67ee688f278ea77f765" } -} \ No newline at end of file +} diff --git a/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.spec b/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.spec index 0b7eea0900a..c83412f3fc6 100644 --- a/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.spec +++ b/SPECS-EXTENDED/perl-Alien-pkgconf/perl-Alien-pkgconf.spec @@ -1,15 +1,16 @@ Name: perl-Alien-pkgconf -Version: 0.20 +Version: 0.21 Release: 1%{?dist} Summary: Discover pkgconf and libpkgconf -# Other files: GPL+ or Artistic +# Other files: GPL-1.0-or-later OR Artistic-1.0-Perl ## Not used # pkgconf-1.3.9/aclocal.m4: GPLv3+ with exceptions -License: GPL+ or Artistic +License: GPL-1.0-or-later OR Artistic-1.0-Perl +SourceLicense: (%{license}) AND GPL-3.0-or-later WITH Autoconf-exception-macro Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://metacpan.org/release/Alien-pkgconf -Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Alien-pkgconf-%{version}.tar.gz#/perl-Alien-pkgconf-%{version}.tar.gz +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Alien-pkgconf-%{version}.tar.gz Patch0: Alien-pkgconf-0.19-Accept-pkgconf-1.9.patch # This is a full-arch package because it stores data about arch-specific # libpkgconf.so library and it stores them into arch-specific directory. @@ -58,7 +59,7 @@ but rather to provide the necessary package by a CPAN module that needs libpkgconf, such as PkgConfig::LibPkgConf. %prep -%setup -q -n Alien-pkgconf-%{version} +%autosetup -p1 -n Alien-pkgconf-%{version} %build unset ALIEN_FORCE ALIEN_INSTALL_TYPE @@ -72,11 +73,20 @@ perl Makefile.PL INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 %files %license LICENSE %doc Changes README -%{perl_vendorarch}/auto/* -%{perl_vendorarch}/Alien -%{_mandir}/man3/* +%dir %{perl_vendorarch}/auto/Alien +%{perl_vendorarch}/auto/Alien/pkgconf +%dir %{perl_vendorarch}/auto/share +%dir %{perl_vendorarch}/auto/share/dist +%{perl_vendorarch}/auto/share/dist/Alien-pkgconf +%dir %{perl_vendorarch}/Alien +%{perl_vendorarch}/Alien/pkgconf.pm +%{_mandir}/man3/Alien::pkgconf.* %changelog +* Tue Dec 23 2025 Aditya Singh - 0.21-1 +- Upgrade to version 0.21 +- License verified + * Fri Mar 14 2025 Jyoti Kanase - 0.20-1 - Upgrade to 0.20 - License verified diff --git a/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.signatures.json b/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.signatures.json index abd85ecad0a..b5218b048cc 100644 --- a/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.signatures.json +++ b/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "perl-FFI-CheckLib-0.26.tar.gz": "06e33bf0807f841626248197e8f39475ac50d1364a0336836c9b581d19a97f65" + "perl-FFI-CheckLib-0.31.tar.gz": "04d885fc377d44896e5ea1c4ec310f979bb04f2f18658a7e7a4d509f7e80bb80" } } diff --git a/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.spec b/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.spec index 206eb83c520..450f09fe86d 100644 --- a/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.spec +++ b/SPECS-EXTENDED/perl-FFI-CheckLib/perl-FFI-CheckLib.spec @@ -1,20 +1,22 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux # Run optional test %{bcond_without perl_FFI_Changes_enables_optional_test} Name: perl-FFI-CheckLib -Version: 0.26 -Release: 3%{?dist} +Version: 0.31 +Release: 2%{?dist} Summary: Check that a library is available for FFI -License: GPL+ or Artistic -Vendor: Microsoft Corporation -Distribution: Azure Linux +License: GPL-1.0-or-later OR Artistic-1.0-Perl URL: https://metacpan.org/release/FFI-CheckLib Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/FFI-CheckLib-%{version}.tar.gz#/perl-FFI-CheckLib-%{version}.tar.gz BuildArch: noarch +BuildRequires: coreutils BuildRequires: make BuildRequires: perl-generators BuildRequires: perl-interpreter BuildRequires: perl(:VERSION) >= 5.6 +BuildRequires: perl(Config) BuildRequires: perl(ExtUtils::MakeMaker) >= 6.76 BuildRequires: perl(strict) BuildRequires: perl(warnings) @@ -22,16 +24,21 @@ BuildRequires: perl(warnings) BuildRequires: perl(base) BuildRequires: perl(Carp) BuildRequires: perl(DynaLoader) +BuildRequires: perl(Env) BuildRequires: perl(Exporter) +BuildRequires: perl(File::Basename) BuildRequires: perl(File::Spec) +# File::Which is used from private functions which are only called on Darwin. +BuildRequires: perl(List::Util) >= 1.33 # Tests: -BuildRequires: perl(Config) +# File::Which is a run-time dependency on Darwin only. The code is exhibited by a test, +# but never on Linux in production. +BuildRequires: perl(File::Which) BuildRequires: perl(lib) BuildRequires: perl(Test2::API) >= 1.302015 -BuildRequires: perl(Test2::Mock) -BuildRequires: perl(Test2::Require::EnvVar) >= 0.000060 -BuildRequires: perl(Test2::Require::Module) >= 0.000060 -BuildRequires: perl(Test2::V0) >= 0.000060 +BuildRequires: perl(Test2::Require::EnvVar) >= 0.000121 +BuildRequires: perl(Test2::Require::Module) >= 0.000121 +BuildRequires: perl(Test2::V0) >= 0.000121 %if %{with perl_FFI_Changes_enables_optional_test} # Optional tests: BuildRequires: perl(Env) @@ -39,8 +46,18 @@ BuildRequires: perl(Test::Exit) # Test/More.pl is not helpful # FFI::Platypus not used %endif -Requires: perl(:MODULE_COMPAT_%(eval "`perl -V:version`"; echo $version)) Requires: perl(DynaLoader) +Requires: perl(File::Basename) + +# Remove under-specified dependencies +%global __requires_exclude %{?__requires_exclude:%{__requires_exclude}|}^perl\\((Test2::API|Test2::Require::EnvVar|Test2::Require::Module|Test2::V0)\\)$ + +# Remove private modules +%global __requires_exclude %{__requires_exclude}|^perl\\((Test2::Plugin::FauxOS|Test2::Tools::FauxDynaLoader|Test2::Tools::NoteStderr)\\) +%global __provides_exclude %{?__provides_exclude:%{__provides_exclude}|}^perl\\((Alien::libbar|Test2::Plugin::FauxOS|Test2::Tools::FauxDynaLoader|Test2::Tools::NoteStderr)\\) + +# Removed dependency on external modules perl(FFI::Platypus) and perl(Test2::Tools::Process) to correct install failure. +%global __requires_exclude %{?__requires_exclude:%{__requires_exclude}|}(^perl\\(FFI::Platypus\\)$|^perl\\(Test2::Tools::Process\\)$) %description This Perl module checks whether a particular dynamic library is available for @@ -50,28 +67,76 @@ packages are not installed. It also provides a find_lib function that will return the full path to the found dynamic library, which can be feed directly into FFI::Platypus or FFI::Raw. +%if %{with tests} +%package tests +Summary: Tests for %{name} +Requires: %{name} = %{?epoch:%{epoch}:}%{version}-%{release} +Requires: perl-Test-Harness +# Tests: +# File::Which is a run-time dependency on Darwin only. The code is exhibited by a test, +# but never on Linux in production. +Requires: perl(File::Which) +Requires: perl(Test2::API) >= 1.302015 +Requires: perl(Test2::Require::EnvVar) >= 0.000121 +Requires: perl(Test2::Require::Module) >= 0.000121 +Requires: perl(Test2::V0) >= 0.000121 + +%description tests +Tests from %{name}. Execute them +with "%{_libexecdir}/%{name}/test". +%endif + %prep %setup -q -n FFI-CheckLib-%{version} - +%if !%{with perl_FFI_Changes_enables_optional_test} +rm t/ffi_checklib__exit.t +perl -i -ne 'print $_ unless m{\A\Qt/ffi_checklib__exit.t\E\b}' MANIFEST +%endif +# Help generators to recognize Perl scripts +for F in t/*.t; do + perl -i -MConfig -ple 'print $Config{startperl} if $. == 1 && !s{\A#!\s*perl}{$Config{startperl}}' "$F" + chmod +x "$F" +done + %build perl Makefile.PL INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 %{make_build} %install %{make_install} -%{_fixperms} $RPM_BUILD_ROOT/* - +%{_fixperms} %{buildroot}/* +# Install tests +mkdir -p %{buildroot}%{_libexecdir}/%{name} +cp -a corpus t %{buildroot}%{_libexecdir}/%{name} +cat > %{buildroot}%{_libexecdir}/%{name}/test << 'EOF' +#!/bin/sh +unset CIPSOMETHING FFI_CHECKLIB_PATH +cd %{_libexecdir}/%{name} && exec prove -I . -j "$(getconf _NPROCESSORS_ONLN)" +EOF +chmod +x %{buildroot}%{_libexecdir}/%{name}/test + %check -unset EXTRA_CI +unset CIPSOMETHING FFI_CHECKLIB_PATH +export HARNESS_OPTIONS=j$(perl -e 'if ($ARGV[0] =~ /.*-j([0-9][0-9]*).*/) {print $1} else {print 1}' -- '%{?_smp_mflags}') make test %files %license LICENSE %doc Changes README -%{perl_vendorlib}/* -%{_mandir}/man3/* +%dir %{perl_vendorlib}/FFI +%{perl_vendorlib}/FFI/CheckLib.pm +%{_mandir}/man3/FFI::CheckLib.* +%{_libexecdir}/%{name} + %changelog +* Wed Dec 24 2025 Aditya Singh - 0.31-2 +- Removed dependency on external modules to correct install failure. + +* Thu Nov 20 2025 Akarsh Chaudhary - 0.31-1 +- Upgrade to version 0.31 (license: MIT). +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 0.26-3 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.signatures.json b/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.signatures.json index 0e6a212c303..8086e2f3ae1 100644 --- a/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.signatures.json +++ b/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "perl-Params-ValidationCompiler-0.30.tar.gz": "dc5bee23383be42765073db284bed9fbd819d4705ad649c20b644452090d16cb" + "Params-ValidationCompiler-0.31.tar.gz": "7b6497173f1b6adb29f5d51d8cf9ec36d2f1219412b4b2410e9d77a901e84a6d" } } diff --git a/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.spec b/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.spec index 44a9b0a8129..49ba7d52aee 100644 --- a/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.spec +++ b/SPECS-EXTENDED/perl-Params-ValidationCompiler/perl-Params-ValidationCompiler.spec @@ -1,3 +1,5 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux # Run optional test %if ! (0%{?rhel}) %bcond_without perl_Params_ValidationCompiler_enables_optional_test @@ -6,14 +8,12 @@ %endif Name: perl-Params-ValidationCompiler -Version: 0.30 -Release: 7%{?dist} +Version: 0.31 +Release: 1%{?dist} Summary: Build an optimized subroutine parameter validator once, use it forever -License: Artistic 2.0 -Vendor: Microsoft Corporation -Distribution: Azure Linux +License: Artistic-2.0 URL: https://metacpan.org/release/Params-ValidationCompiler -Source0: https://cpan.metacpan.org/modules/by-module/Params/Params-ValidationCompiler-%{version}.tar.gz#/perl-Params-ValidationCompiler-%{version}.tar.gz +Source0: https://cpan.metacpan.org/modules/by-module/Params/Params-ValidationCompiler-%{version}.tar.gz BuildArch: noarch # Build BuildRequires: coreutils @@ -24,7 +24,7 @@ BuildRequires: perl(ExtUtils::MakeMaker) > 6.75 # Module BuildRequires: perl(B) BuildRequires: perl(Carp) -BuildRequires: perl(Class::XSAccessor) +BuildRequires: perl(Class::XSAccessor) >= 1.17 BuildRequires: perl(Eval::Closure) BuildRequires: perl(Exception::Class) BuildRequires: perl(Exporter) @@ -58,8 +58,7 @@ BuildRequires: perl(Types::Standard) %endif %endif # Dependencies -Requires: perl(:MODULE_COMPAT_%(eval "`perl -V:version`"; echo $version)) -Recommends: perl(Class::XSAccessor) +Recommends: perl(Class::XSAccessor) >= 1.17 Recommends: perl(Sub::Util) >= 1.40 %description @@ -89,6 +88,10 @@ make test %{_mandir}/man3/Params::ValidationCompiler::Exceptions.3* %changelog +* Mon Dec 22 2025 Akarsh Chaudhary - 0.31-1 +- Upgrade to version 0.31 (license: MIT). +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 0.30-7 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-Fix-resolving-flags-for-packages-with-a-name-differe.patch b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-Fix-resolving-flags-for-packages-with-a-name-differe.patch new file mode 100644 index 00000000000..925ec103dc9 --- /dev/null +++ b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-Fix-resolving-flags-for-packages-with-a-name-differe.patch @@ -0,0 +1,99 @@ +From 39b8a0e8f664dc103a552dbab1cdccdab8ce3062 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Tue, 7 Mar 2023 09:16:10 +0100 +Subject: [PATCH] Fix resolving flags for packages with a name different from + its identifier +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Alien-Build-2.77 tests revealed a bug in constructing a query for +pkgconf-1.9 solver: If a package file had file name different from +a Name value inside the file, the package was able to be found, but +the flags solver searched for the Name and found nothing. + +Studying pre-pkgconf documentation shows that Name value is only +a human-oriented display name and a base of the package file name +should be used instead as a package identifier. This base name is +stored into an id field of the package structure of pkgconf. + +This patch fixes it by using the id field instead. It also adds a test +to prevent from future regressions. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 2 +- + MANIFEST | 1 + + corpus/lib4/bar.pc | 4 ++++ + t/client.t | 14 +++++++++++++- + 4 files changed, 19 insertions(+), 2 deletions(-) + create mode 100644 corpus/lib4/bar.pc + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index 63c78fb..57e6892 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -117,7 +117,7 @@ solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + #if LIBPKGCONF_VERSION >= 10900 + if (sizeof(query_string) <= + snprintf(query_string, sizeof(query_string), "%s = %s", +- package->realname, package->version)) ++ package->id, package->version)) + false; + pkgconf_queue_push(&query, query_string); + if (loaded_from_file) +diff --git a/MANIFEST b/MANIFEST +index 77378df..1eb4491 100644 +--- a/MANIFEST ++++ b/MANIFEST +@@ -6,6 +6,7 @@ corpus/lib1/foo1a.pc + corpus/lib2/bar.pc + corpus/lib2/foo.pc + corpus/lib3/foo.pc ++corpus/lib4/bar.pc + INSTALL + lib/PkgConfig/LibPkgConf.pm + lib/PkgConfig/LibPkgConf/Client.pm +diff --git a/corpus/lib4/bar.pc b/corpus/lib4/bar.pc +new file mode 100644 +index 0000000..47e52dd +--- /dev/null ++++ b/corpus/lib4/bar.pc +@@ -0,0 +1,4 @@ ++Name: foo ++Description: A pkg-config file whose identifier does not match its name ++Version: 1.2.3 ++Cflags: -fPIC +diff --git a/t/client.t b/t/client.t +index 6c80f83..db115fe 100644 +--- a/t/client.t ++++ b/t/client.t +@@ -206,7 +206,7 @@ subtest 'path attributes' => sub { + + mkpath "$root/$_", 0, 0700 for qw( + foo bar baz ralph trans formers foo/lib bar/lib trans/lib formers/lib +- foo/include bar/include trans/include formers/include ++ /foo/include bar/include trans/include formers/include + ); + + subtest 'search path' => sub { +@@ -295,4 +295,16 @@ subtest 'global' => sub { + + }; + ++subtest 'a package with a different name' => sub { ++ ++ my $client = PkgConfig::LibPkgConf::Client->new( path => 'corpus/lib4' ); ++ ++ is( $client->find('foo'), undef, 'A human-readable name foo is ignored'); ++ ++ my $pkg = $client->find('bar'); ++ isnt( $pkg, undef, 'An identifier bar is found' ); ++ is( $pkg->cflags, '-fPIC ', 'Cflags are retrieved' ); ++ ++}; ++ + done_testing; +-- +2.39.2 + diff --git a/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-adapt_to_pkgconf_1.9.4.patch b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-adapt_to_pkgconf_1.9.4.patch new file mode 100644 index 00000000000..9a6cd94fbb7 --- /dev/null +++ b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/PkgConfig-LibPkgConf-0.11-adapt_to_pkgconf_1.9.4.patch @@ -0,0 +1,1107 @@ +From 75e02d57bd188286c69469e92f75cd672c2bcb08 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Thu, 23 Feb 2023 17:12:24 +0100 +Subject: [PATCH 1/8] Adapt to pkgconf-1.9.4 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Fixes error handled prototype. +Adapts to passing flags to file_open. +Adapts tests to no traling spaces (incompatible change). +Works around a possible pkgconf regression with trailing null bytes. + +TODO: t/package.t fails on missing shared libs from dependencies. +TODO: Restore compatibility with older pkgconfs. +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 13 ++++++++++--- + t/client.t | 2 +- + t/package.t | 28 ++++++++++++++-------------- + t/simple.t | 8 ++++---- + 4 files changed, 29 insertions(+), 22 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index 3afa483..4055d40 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -14,7 +14,7 @@ struct my_client_t { + typedef struct my_client_t my_client_t; + + static bool +-my_error_handler(const char *msg, const pkgconf_client_t *_, const void *data) ++my_error_handler(const char *msg, const pkgconf_client_t *_, void *data) + { + dSP; + +@@ -243,7 +243,7 @@ _package_from_file(self, filename) + CODE: + fp = fopen(filename, "r"); + if(fp != NULL) +- RETVAL = PTR2IV(pkgconf_pkg_new_from_file(&self->client, filename, fp)); ++ RETVAL = PTR2IV(pkgconf_pkg_new_from_file(&self->client, filename, fp, 0)); + else + RETVAL = 0; + OUTPUT: +@@ -385,6 +385,7 @@ _get_string(self, client, type) + INIT: + pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; ++ char *buffer; + size_t len; + int eflag; + int flags; +@@ -411,8 +412,14 @@ _get_string(self, client, type) + len = pkgconf_fragment_render_len(&filtered_list, escape, NULL); + RETVAL = newSV(len == 1 ? len : len-1); + SvPOK_on(RETVAL); ++ buffer = SvPVX(RETVAL); ++ pkgconf_fragment_render_buf(&filtered_list, buffer, len, escape, NULL); ++ /* ++ * Trim trailing null bytes observed in pkgconf-1.9.4. Probably related to ++ * 648a2249fcb10bf679bdb587ef2bbddaab3023ad pkgconf commit. ++ */ ++ while (len > 1 && buffer[len-2] == '\0') len--; + SvCUR_set(RETVAL, len-1); +- pkgconf_fragment_render_buf(&filtered_list, SvPVX(RETVAL), len, escape, NULL); + pkgconf_fragment_free(&filtered_list); + pkgconf_fragment_free(&unfiltered_list); + OUTPUT: +diff --git a/t/client.t b/t/client.t +index 6c80f83..1850622 100644 +--- a/t/client.t ++++ b/t/client.t +@@ -289,7 +289,7 @@ subtest 'global' => sub { + my $client = PkgConfig::LibPkgConf::Client->new( path => 'corpus/lib1', global => { prefix => '/klingon/autobot/force' } ); + my $pkg = $client->find('foo'); + +- is( $pkg->cflags, '-fPIC -I/klingon/autobot/force/include/foo ' ); ++ is( $pkg->cflags, '-fPIC -I/klingon/autobot/force/include/foo' ); + + }; + +diff --git a/t/package.t b/t/package.t +index 8da6efb..a4b1e6d 100644 +--- a/t/package.t ++++ b/t/package.t +@@ -43,9 +43,9 @@ subtest 'find' => sub { + is $pkg->version, '1.2.3', 'version'; + is $pkg->description, 'A testing pkg-config file', 'description'; + +- is $pkg->libs, '-L/test/lib -lfoo ', 'libs'; +- is $pkg->cflags, '-fPIC -I/test/include/foo ', 'cflags'; +- is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC ', 'cflags_static'; ++ is $pkg->libs, '-L/test/lib -lfoo', 'libs'; ++ is $pkg->cflags, '-fPIC -I/test/include/foo', 'cflags'; ++ is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC', 'cflags_static'; + + my @libs = $pkg->list_libs; + my @cflags = $pkg->list_cflags; +@@ -101,9 +101,9 @@ subtest 'package_from_file' => sub { + is $pkg->version, '1.2.3', 'version'; + is $pkg->description, 'A testing pkg-config file', 'description'; + +- is $pkg->libs, '-L/test/lib -lfoo ', 'libs'; +- is $pkg->cflags, '-fPIC -I/test/include/foo ', 'cflags'; +- is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC ', 'cflags_static'; ++ is $pkg->libs, '-L/test/lib -lfoo', 'libs'; ++ is $pkg->cflags, '-fPIC -I/test/include/foo', 'cflags'; ++ is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC', 'cflags_static'; + + my @libs = $pkg->list_libs; + my @cflags = $pkg->list_cflags; +@@ -146,8 +146,8 @@ subtest 'filte sys' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs, '-lfoo ', 'libs'; +- is $pkg->cflags, '-fPIC ', 'cflags'; ++ is $pkg->libs, '-lfoo', 'libs'; ++ is $pkg->cflags, '-fPIC', 'cflags'; + + }; + +@@ -162,8 +162,8 @@ subtest 'quotes and spaces' => sub { + my $pkg = $client->find('foo1'); + + TODO: { local $TODO = 'not important'; +- is $pkg->libs, "-L/test/lib -LC:/Program\\ Files/Foo\\ App/lib -lfoo1 "; +- is $pkg->cflags, '-fPIC -I/test/include/foo1 -IC:/Program\\ Files/Foo\\ App/include '; ++ is $pkg->libs, "-L/test/lib -LC:/Program\\ Files/Foo\\ App/lib -lfoo1"; ++ is $pkg->cflags, '-fPIC -I/test/include/foo1 -IC:/Program\\ Files/Foo\\ App/include'; + }; + + is [map { "$_" } $pkg->list_libs]->[1], '-LC:/Program Files/Foo App/lib'; +@@ -180,9 +180,9 @@ subtest 'package with prereq' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs, '-L/test/lib -lfoo -L/test2/lib -lbar '; +- is $pkg->cflags, '-I/test/include/foo -I/test2/include/bar '; +- is $pkg->cflags_static, '-I/test/include/foo -I/test2/include/bar -DFOO_STATIC -DBAR_STATIC '; ++ is $pkg->libs, '-L/test/lib -lfoo -L/test2/lib -lbar'; ++ is $pkg->cflags, '-I/test/include/foo -I/test2/include/bar'; ++ is $pkg->cflags_static, '-I/test/include/foo -I/test2/include/bar -DFOO_STATIC -DBAR_STATIC'; + + is_deeply [$pkg->list_libs], [qw( -L/test/lib -lfoo -L/test2/lib -lbar )]; + is_deeply [$pkg->list_cflags], [qw( -I/test/include/foo -I/test2/include/bar )]; +@@ -200,7 +200,7 @@ subtest 'package with static libs' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs_static, '-L/test/lib -lfoo -lbar -lbaz '; ++ is $pkg->libs_static, '-L/test/lib -lfoo -lbar -lbaz'; + is_deeply [$pkg->list_libs_static], [qw( -L/test/lib -lfoo -lbar -lbaz )]; + + }; +diff --git a/t/simple.t b/t/simple.t +index c106620..ce04e8c 100644 +--- a/t/simple.t ++++ b/t/simple.t +@@ -18,11 +18,11 @@ subtest 'simple stuff' => sub { + eval { pkgconf_version('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_version not found'; + +- is pkgconf_cflags('foo'), '-fPIC -I/test/include/foo ', 'pkgconf_cflags found'; ++ is pkgconf_cflags('foo'), '-fPIC -I/test/include/foo', 'pkgconf_cflags found'; + eval { pkgconf_cflags('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_cflags not found'; + +- is pkgconf_libs('foo'), '-L/test/lib -lfoo ', 'pkgconf_libs found'; ++ is pkgconf_libs('foo'), '-L/test/lib -lfoo', 'pkgconf_libs found'; + eval { pkgconf_libs('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_libs not found'; + }; +@@ -31,8 +31,8 @@ subtest 'static' => sub { + + local $ENV{PKG_CONFIG_PATH} = 'corpus/lib3'; + +- is pkgconf_cflags_static('foo'), '-I/test/include/foo -DFOO_STATIC ', 'cflags'; +- is pkgconf_libs_static('foo'), '-L/test/lib -lfoo -lbar -lbaz ', 'libs'; ++ is pkgconf_cflags_static('foo'), '-I/test/include/foo -DFOO_STATIC', 'cflags'; ++ is pkgconf_libs_static('foo'), '-L/test/lib -lfoo -lbar -lbaz', 'libs'; + + }; + +-- +2.39.2 + +From e9c5282cc4cb01c6270676f5b2dfd5965ed00a3e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Tue, 28 Feb 2023 18:49:55 +0100 +Subject: [PATCH 2/8] Use solver for cflags/libs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Some t/package.t are fixed now. +But packages loaded with package_from_file() fails on the solver. See +t/package.t. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 33 ++++++++++++++++++++++++++++----- + 1 file changed, 28 insertions(+), 5 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index 4055d40..0469f7f 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -383,6 +383,12 @@ _get_string(self, client, type) + my_client_t *client + int type + INIT: ++ pkgconf_pkg_t dep_graph_root = { ++ .id = "", ++ .realname = "", ++ .flags = PKGCONF_PKG_PROPF_VIRTUAL, ++ }; ++ pkgconf_list_t query = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; + char *buffer; +@@ -391,23 +397,39 @@ _get_string(self, client, type) + int flags; + int old_flags; + bool escape = true; ++ bool resolved; + CODE: + old_flags = flags = pkgconf_client_get_flags(&client->client); +- if(type % 2) +- flags = flags | PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS; ++ if(type % 2) { ++ flags |= (PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); ++ } else { ++ flags &= ~(PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); ++ } + pkgconf_client_set_flags(&client->client, flags); ++ pkgconf_queue_push(&query, self->realname); /* TODO: contrain a version */ ++ pkgconf_solution_free(&client->client, &dep_graph_root); ++ pkgconf_cache_free(&client->client); ++ resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); ++ pkgconf_queue_free(&query); ++ if (!resolved) { ++ pkgconf_solution_free(&client->client, &dep_graph_root); ++ XSRETURN_EMPTY; ++ } + /* + * TODO: attribute for max depth (also in the list version below) + */ + eflag = type > 1 +- ? pkgconf_pkg_cflags(&client->client, self, &unfiltered_list, client->maxdepth) +- : pkgconf_pkg_libs(&client->client, self, &unfiltered_list, client->maxdepth); ++ /* Depth more than 2 duplicates last clfags word. pkgconf hard-codes 2. */ ++ ? pkgconf_pkg_cflags(&client->client, &dep_graph_root, &unfiltered_list, 2/*client->maxdepth*/) ++ : pkgconf_pkg_libs(&client->client, &dep_graph_root, &unfiltered_list, client->maxdepth); + pkgconf_client_set_flags(&client->client, old_flags); + /* + * TODO: throw an exception (also in the list verson below) + */ +- if(eflag != PKGCONF_PKG_ERRF_OK) ++ if(eflag != PKGCONF_PKG_ERRF_OK) { ++ pkgconf_solution_free(&client->client, &dep_graph_root); + XSRETURN_EMPTY; ++ } + pkgconf_fragment_filter(&client->client, &filtered_list, &unfiltered_list, directory_filter, NULL); + len = pkgconf_fragment_render_len(&filtered_list, escape, NULL); + RETVAL = newSV(len == 1 ? len : len-1); +@@ -422,6 +444,7 @@ _get_string(self, client, type) + SvCUR_set(RETVAL, len-1); + pkgconf_fragment_free(&filtered_list); + pkgconf_fragment_free(&unfiltered_list); ++ pkgconf_solution_free(&client->client, &dep_graph_root); + OUTPUT: + RETVAL + +-- +2.39.2 + +From d3efe46b52b6ae3defb90cd695e835ebf6d13204 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Tue, 28 Feb 2023 19:34:00 +0100 +Subject: [PATCH 3/8] Cache packages loaded from files +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Solver works on cached packages. Packages are cached either manually, +or by exploring all installation paths. To obtain cflags/libs with the +solver we need to cache the packages loaded from files. + +Drawbacks: + +(1) pkgconf tool flushed the cache before retrieving nonstatic libs +("reset solver when solving for library groups"). I don't understand +why. I was unable to observe any effect. Because we stash the package +into a cache, we cannot drop the cache when retrieving the libs. Thus +this patch removes flushing the cache added in port to 1.9.4. + +(2) The from-file-loaded package poisons the cache. When querying +other packages, the from-file-loaded package could be included in the +results. I'm not sure whether the original isolation was good or bad. +I can imagine someone wants to override a system-provided pkgconfig +file and thus loads the override from a file. + +Alternatively, we could cache the package loaded from a file +temporarily just around using the solver. That would narrow the time +window when the package is in the cache. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 12 +++++++----- + t/package.t | 2 +- + 2 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index 0469f7f..9740711 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -240,11 +240,15 @@ _package_from_file(self, filename) + const char *filename + INIT: + FILE *fp; ++ pkgconf_pkg_t *package; + CODE: + fp = fopen(filename, "r"); +- if(fp != NULL) +- RETVAL = PTR2IV(pkgconf_pkg_new_from_file(&self->client, filename, fp, 0)); +- else ++ if(fp != NULL) { ++ package = pkgconf_pkg_new_from_file(&self->client, filename, fp, 0); ++ if (package != NULL) ++ pkgconf_cache_add(&self->client, package); ++ RETVAL = PTR2IV(package); ++ } else + RETVAL = 0; + OUTPUT: + RETVAL +@@ -407,8 +411,6 @@ _get_string(self, client, type) + } + pkgconf_client_set_flags(&client->client, flags); + pkgconf_queue_push(&query, self->realname); /* TODO: contrain a version */ +- pkgconf_solution_free(&client->client, &dep_graph_root); +- pkgconf_cache_free(&client->client); + resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); + pkgconf_queue_free(&query); + if (!resolved) { +diff --git a/t/package.t b/t/package.t +index a4b1e6d..91ee84e 100644 +--- a/t/package.t ++++ b/t/package.t +@@ -94,7 +94,7 @@ subtest 'package_from_file' => sub { + note "cflags = @{[ $pkg->cflags ]}"; + note "cflags_static = @{[ $pkg->cflags_static ]}"; + +- is $pkg->refcount, 1, 'refcount'; ++ is $pkg->refcount, 2, 'refcount'; + is $pkg->id, 'foo', 'id'; + is $pkg->filename, 'corpus/lib1/foo.pc', 'filename'; + is $pkg->realname, 'foo', 'realname'; +-- +2.39.2 + +From 07e27f1f569390404cdd5518a3143fda6103ce08 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Wed, 1 Mar 2023 12:53:39 +0100 +Subject: [PATCH 4/8] Handle a version in solver query +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index 9740711..aef513b 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -3,6 +3,7 @@ + #include "XSUB.h" + + #include ++#include + + struct my_client_t { + pkgconf_client_t client; +@@ -392,6 +393,7 @@ _get_string(self, client, type) + .realname = "", + .flags = PKGCONF_PKG_PROPF_VIRTUAL, + }; ++ char query_string[PKGCONF_BUFSIZE]; + pkgconf_list_t query = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; +@@ -403,6 +405,13 @@ _get_string(self, client, type) + bool escape = true; + bool resolved; + CODE: ++ if (sizeof(query_string) <= ++ snprintf(query_string, sizeof(query_string), "%s = %s", ++ self->realname, self->version)) ++ XSRETURN_EMPTY; ++ pkgconf_queue_push(&query, query_string); ++ /*pkgconf_solution_free(&client->client, &dep_graph_root); ++ pkgconf_cache_free(&client->client);*/ + old_flags = flags = pkgconf_client_get_flags(&client->client); + if(type % 2) { + flags |= (PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); +@@ -410,7 +419,6 @@ _get_string(self, client, type) + flags &= ~(PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); + } + pkgconf_client_set_flags(&client->client, flags); +- pkgconf_queue_push(&query, self->realname); /* TODO: contrain a version */ + resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); + pkgconf_queue_free(&query); + if (!resolved) { +-- +2.39.2 + +From 7041a9a6dbcebf3a82e031f979c235e12614d51d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Wed, 1 Mar 2023 14:13:27 +0100 +Subject: [PATCH 5/8] Use solver for list of cflags/libs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This resolves all the remaining tests. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 133 ++++++++++++++++++++++++-------------------------- + 1 file changed, 63 insertions(+), 70 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index aef513b..b0fa350 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -82,6 +82,67 @@ directory_filter(const pkgconf_client_t *client, const pkgconf_fragment_t *frag, + return true; + } + ++/* ++ * Solve cflags/libs recursively using a pkgconf solver for the given package. ++ * On success returns true and the caller needs to free the filtered_list. ++ * Otherwise, returns false and the lists are still untouched or already freed. ++ */ ++static bool ++solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, ++ pkgconf_list_t *filtered_list) { ++ pkgconf_pkg_t dep_graph_root = { ++ .id = "", ++ .realname = "", ++ .flags = PKGCONF_PKG_PROPF_VIRTUAL, ++ }; ++ char query_string[PKGCONF_BUFSIZE]; ++ pkgconf_list_t query = PKGCONF_LIST_INITIALIZER; ++ pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; ++ int eflag; ++ int flags; ++ int old_flags; ++ bool resolved; ++ ++ if (sizeof(query_string) <= ++ snprintf(query_string, sizeof(query_string), "%s = %s", ++ package->realname, package->version)) ++ false; ++ pkgconf_queue_push(&query, query_string); ++ old_flags = flags = pkgconf_client_get_flags(&client->client); ++ if(type % 2) { ++ flags |= (PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); ++ } else { ++ flags &= ~(PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); ++ } ++ pkgconf_client_set_flags(&client->client, flags); ++ resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); ++ pkgconf_queue_free(&query); ++ if (!resolved) { ++ pkgconf_solution_free(&client->client, &dep_graph_root); ++ false; ++ } ++ /* ++ * TODO: attribute for max depth (also in the list version below) ++ */ ++ eflag = type > 1 ++ /* Depth more than 2 duplicates last cflags word. pkgconf hard-codes 2. */ ++ ? pkgconf_pkg_cflags(&client->client, &dep_graph_root, &unfiltered_list, 2/*client->maxdepth*/) ++ : pkgconf_pkg_libs(&client->client, &dep_graph_root, &unfiltered_list, client->maxdepth); ++ pkgconf_client_set_flags(&client->client, old_flags); ++ /* ++ * TODO: throw an exception (also in the list verson below) ++ */ ++ if(eflag != PKGCONF_PKG_ERRF_OK) { ++ pkgconf_solution_free(&client->client, &dep_graph_root); ++ false; ++ } ++ pkgconf_fragment_filter(&client->client, filtered_list, &unfiltered_list, directory_filter, NULL); ++ ++ pkgconf_fragment_free(&unfiltered_list); ++ pkgconf_solution_free(&client->client, &dep_graph_root); ++ return true; ++} ++ + MODULE = PkgConfig::LibPkgConf PACKAGE = PkgConfig::LibPkgConf::Client + + +@@ -388,59 +449,13 @@ _get_string(self, client, type) + my_client_t *client + int type + INIT: +- pkgconf_pkg_t dep_graph_root = { +- .id = "", +- .realname = "", +- .flags = PKGCONF_PKG_PROPF_VIRTUAL, +- }; +- char query_string[PKGCONF_BUFSIZE]; +- pkgconf_list_t query = PKGCONF_LIST_INITIALIZER; +- pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; + char *buffer; + size_t len; +- int eflag; +- int flags; +- int old_flags; + bool escape = true; +- bool resolved; + CODE: +- if (sizeof(query_string) <= +- snprintf(query_string, sizeof(query_string), "%s = %s", +- self->realname, self->version)) +- XSRETURN_EMPTY; +- pkgconf_queue_push(&query, query_string); +- /*pkgconf_solution_free(&client->client, &dep_graph_root); +- pkgconf_cache_free(&client->client);*/ +- old_flags = flags = pkgconf_client_get_flags(&client->client); +- if(type % 2) { +- flags |= (PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); +- } else { +- flags &= ~(PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); +- } +- pkgconf_client_set_flags(&client->client, flags); +- resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); +- pkgconf_queue_free(&query); +- if (!resolved) { +- pkgconf_solution_free(&client->client, &dep_graph_root); ++ if (!solve_flags(self, client, type, &filtered_list)) + XSRETURN_EMPTY; +- } +- /* +- * TODO: attribute for max depth (also in the list version below) +- */ +- eflag = type > 1 +- /* Depth more than 2 duplicates last clfags word. pkgconf hard-codes 2. */ +- ? pkgconf_pkg_cflags(&client->client, &dep_graph_root, &unfiltered_list, 2/*client->maxdepth*/) +- : pkgconf_pkg_libs(&client->client, &dep_graph_root, &unfiltered_list, client->maxdepth); +- pkgconf_client_set_flags(&client->client, old_flags); +- /* +- * TODO: throw an exception (also in the list verson below) +- */ +- if(eflag != PKGCONF_PKG_ERRF_OK) { +- pkgconf_solution_free(&client->client, &dep_graph_root); +- XSRETURN_EMPTY; +- } +- pkgconf_fragment_filter(&client->client, &filtered_list, &unfiltered_list, directory_filter, NULL); + len = pkgconf_fragment_render_len(&filtered_list, escape, NULL); + RETVAL = newSV(len == 1 ? len : len-1); + SvPOK_on(RETVAL); +@@ -453,8 +468,6 @@ _get_string(self, client, type) + while (len > 1 && buffer[len-2] == '\0') len--; + SvCUR_set(RETVAL, len-1); + pkgconf_fragment_free(&filtered_list); +- pkgconf_fragment_free(&unfiltered_list); +- pkgconf_solution_free(&client->client, &dep_graph_root); + OUTPUT: + RETVAL + +@@ -465,33 +478,14 @@ _get_list(self, client, type) + my_client_t *client + int type + INIT: +- pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_node_t *node; + pkgconf_fragment_t *frag; + int count = 0; + HV *h; +- int eflag; +- int flags; +- int old_flags; + CODE: +- old_flags = flags = pkgconf_client_get_flags(&client->client); +- if(type % 2) +- flags = flags | PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS; +- pkgconf_client_set_flags(&client->client, flags); +- /* +- * TODO: attribute for max depth +- */ +- eflag = type > 1 +- ? pkgconf_pkg_cflags(&client->client, self, &unfiltered_list, client->maxdepth) +- : pkgconf_pkg_libs(&client->client, self, &unfiltered_list, client->maxdepth); +- pkgconf_client_set_flags(&client->client, old_flags); +- /* +- * TODO: throw an exception +- */ +- if(eflag != PKGCONF_PKG_ERRF_OK) ++ if (!solve_flags(self, client, type, &filtered_list)) + XSRETURN_EMPTY; +- pkgconf_fragment_filter(&client->client, &filtered_list, &unfiltered_list, directory_filter, NULL); + PKGCONF_FOREACH_LIST_ENTRY(filtered_list.head, node) + { + h = newHV(); +@@ -507,7 +501,6 @@ _get_list(self, client, type) + ST(count++) = newRV_noinc((SV*) h); + } + pkgconf_fragment_free(&filtered_list); +- pkgconf_fragment_free(&unfiltered_list); + XSRETURN(count); + + +-- +2.39.2 + +From 84be5ffc76672161d3b6d9c6dfa96331b010a655 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Wed, 1 Mar 2023 17:14:20 +0100 +Subject: [PATCH 6/8] Restore a trailing space in cflags and libs strings +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +pkgconf-1.9 fixed cflags and libs values by removing the trailing +spaces. However, this consitutes a change in behaviour (people got +used to concatenate flags) and thus this patch returns the trailing +spaces. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 5 +++++ + t/client.t | 2 +- + t/package.t | 28 ++++++++++++++-------------- + t/simple.t | 8 ++++---- + 4 files changed, 24 insertions(+), 19 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index b0fa350..f8a0b65 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -467,6 +467,11 @@ _get_string(self, client, type) + */ + while (len > 1 && buffer[len-2] == '\0') len--; + SvCUR_set(RETVAL, len-1); ++ /* ++ * Append a space if not already there to mimic pkgconf < 1.9 behaviour. ++ */ ++ if (len > 1 && buffer[len-2] != ' ') ++ sv_catpvs(RETVAL, " "); + pkgconf_fragment_free(&filtered_list); + OUTPUT: + RETVAL +diff --git a/t/client.t b/t/client.t +index 1850622..6c80f83 100644 +--- a/t/client.t ++++ b/t/client.t +@@ -289,7 +289,7 @@ subtest 'global' => sub { + my $client = PkgConfig::LibPkgConf::Client->new( path => 'corpus/lib1', global => { prefix => '/klingon/autobot/force' } ); + my $pkg = $client->find('foo'); + +- is( $pkg->cflags, '-fPIC -I/klingon/autobot/force/include/foo' ); ++ is( $pkg->cflags, '-fPIC -I/klingon/autobot/force/include/foo ' ); + + }; + +diff --git a/t/package.t b/t/package.t +index 91ee84e..f409f99 100644 +--- a/t/package.t ++++ b/t/package.t +@@ -43,9 +43,9 @@ subtest 'find' => sub { + is $pkg->version, '1.2.3', 'version'; + is $pkg->description, 'A testing pkg-config file', 'description'; + +- is $pkg->libs, '-L/test/lib -lfoo', 'libs'; +- is $pkg->cflags, '-fPIC -I/test/include/foo', 'cflags'; +- is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC', 'cflags_static'; ++ is $pkg->libs, '-L/test/lib -lfoo ', 'libs'; ++ is $pkg->cflags, '-fPIC -I/test/include/foo ', 'cflags'; ++ is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC ', 'cflags_static'; + + my @libs = $pkg->list_libs; + my @cflags = $pkg->list_cflags; +@@ -101,9 +101,9 @@ subtest 'package_from_file' => sub { + is $pkg->version, '1.2.3', 'version'; + is $pkg->description, 'A testing pkg-config file', 'description'; + +- is $pkg->libs, '-L/test/lib -lfoo', 'libs'; +- is $pkg->cflags, '-fPIC -I/test/include/foo', 'cflags'; +- is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC', 'cflags_static'; ++ is $pkg->libs, '-L/test/lib -lfoo ', 'libs'; ++ is $pkg->cflags, '-fPIC -I/test/include/foo ', 'cflags'; ++ is $pkg->cflags_static, '-fPIC -I/test/include/foo -DFOO_STATIC ', 'cflags_static'; + + my @libs = $pkg->list_libs; + my @cflags = $pkg->list_cflags; +@@ -146,8 +146,8 @@ subtest 'filte sys' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs, '-lfoo', 'libs'; +- is $pkg->cflags, '-fPIC', 'cflags'; ++ is $pkg->libs, '-lfoo ', 'libs'; ++ is $pkg->cflags, '-fPIC ', 'cflags'; + + }; + +@@ -162,8 +162,8 @@ subtest 'quotes and spaces' => sub { + my $pkg = $client->find('foo1'); + + TODO: { local $TODO = 'not important'; +- is $pkg->libs, "-L/test/lib -LC:/Program\\ Files/Foo\\ App/lib -lfoo1"; +- is $pkg->cflags, '-fPIC -I/test/include/foo1 -IC:/Program\\ Files/Foo\\ App/include'; ++ is $pkg->libs, "-L/test/lib -LC:/Program\\ Files/Foo\\ App/lib -lfoo1 "; ++ is $pkg->cflags, '-fPIC -I/test/include/foo1 -IC:/Program\\ Files/Foo\\ App/include '; + }; + + is [map { "$_" } $pkg->list_libs]->[1], '-LC:/Program Files/Foo App/lib'; +@@ -180,9 +180,9 @@ subtest 'package with prereq' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs, '-L/test/lib -lfoo -L/test2/lib -lbar'; +- is $pkg->cflags, '-I/test/include/foo -I/test2/include/bar'; +- is $pkg->cflags_static, '-I/test/include/foo -I/test2/include/bar -DFOO_STATIC -DBAR_STATIC'; ++ is $pkg->libs, '-L/test/lib -lfoo -L/test2/lib -lbar '; ++ is $pkg->cflags, '-I/test/include/foo -I/test2/include/bar '; ++ is $pkg->cflags_static, '-I/test/include/foo -I/test2/include/bar -DFOO_STATIC -DBAR_STATIC '; + + is_deeply [$pkg->list_libs], [qw( -L/test/lib -lfoo -L/test2/lib -lbar )]; + is_deeply [$pkg->list_cflags], [qw( -I/test/include/foo -I/test2/include/bar )]; +@@ -200,7 +200,7 @@ subtest 'package with static libs' => sub { + + my $pkg = $client->find('foo'); + +- is $pkg->libs_static, '-L/test/lib -lfoo -lbar -lbaz'; ++ is $pkg->libs_static, '-L/test/lib -lfoo -lbar -lbaz '; + is_deeply [$pkg->list_libs_static], [qw( -L/test/lib -lfoo -lbar -lbaz )]; + + }; +diff --git a/t/simple.t b/t/simple.t +index ce04e8c..3d07fee 100644 +--- a/t/simple.t ++++ b/t/simple.t +@@ -18,11 +18,11 @@ subtest 'simple stuff' => sub { + eval { pkgconf_version('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_version not found'; + +- is pkgconf_cflags('foo'), '-fPIC -I/test/include/foo', 'pkgconf_cflags found'; ++ is pkgconf_cflags('foo'), '-fPIC -I/test/include/foo ', 'pkgconf_cflags found'; + eval { pkgconf_cflags('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_cflags not found'; + +- is pkgconf_libs('foo'), '-L/test/lib -lfoo', 'pkgconf_libs found'; ++ is pkgconf_libs('foo'), '-L/test/lib -lfoo ', 'pkgconf_libs found'; + eval { pkgconf_libs('bogus') }; + like $@, qr{package bogus not found}, 'pkgconf_libs not found'; + }; +@@ -31,8 +31,8 @@ subtest 'static' => sub { + + local $ENV{PKG_CONFIG_PATH} = 'corpus/lib3'; + +- is pkgconf_cflags_static('foo'), '-I/test/include/foo -DFOO_STATIC', 'cflags'; +- is pkgconf_libs_static('foo'), '-L/test/lib -lfoo -lbar -lbaz', 'libs'; ++ is pkgconf_cflags_static('foo'), '-I/test/include/foo -DFOO_STATIC ', 'cflags'; ++ is pkgconf_libs_static('foo'), '-L/test/lib -lfoo -lbar -lbaz ', 'libs'; + + }; + +-- +2.39.2 + +From 348261375be479ced9b352eaf6252ab21e87f0f4 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Thu, 2 Mar 2023 15:27:06 +0100 +Subject: [PATCH 7/8] Make it buildable with pkgconf < 1.9 again +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 25 ++++++++++++++++++++++++- + 1 file changed, 24 insertions(+), 1 deletion(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index f8a0b65..e7da2e4 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -15,7 +15,11 @@ struct my_client_t { + typedef struct my_client_t my_client_t; + + static bool ++#if LIBPKGCONF_VERSION >= 10900 + my_error_handler(const char *msg, const pkgconf_client_t *_, void *data) ++#else ++my_error_handler(const char *msg, const pkgconf_client_t *_, const void *data) ++#endif + { + dSP; + +@@ -90,6 +94,7 @@ directory_filter(const pkgconf_client_t *client, const pkgconf_fragment_t *frag, + static bool + solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + pkgconf_list_t *filtered_list) { ++#if LIBPKGCONF_VERSION >= 10900 + pkgconf_pkg_t dep_graph_root = { + .id = "", + .realname = "", +@@ -97,17 +102,20 @@ solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + }; + char query_string[PKGCONF_BUFSIZE]; + pkgconf_list_t query = PKGCONF_LIST_INITIALIZER; ++ bool resolved; ++#endif + pkgconf_list_t unfiltered_list = PKGCONF_LIST_INITIALIZER; + int eflag; + int flags; + int old_flags; +- bool resolved; + ++#if LIBPKGCONF_VERSION >= 10900 + if (sizeof(query_string) <= + snprintf(query_string, sizeof(query_string), "%s = %s", + package->realname, package->version)) + false; + pkgconf_queue_push(&query, query_string); ++#endif + old_flags = flags = pkgconf_client_get_flags(&client->client); + if(type % 2) { + flags |= (PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); +@@ -115,31 +123,42 @@ solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + flags &= ~(PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS | PKGCONF_PKG_PKGF_SEARCH_PRIVATE); + } + pkgconf_client_set_flags(&client->client, flags); ++#if LIBPKGCONF_VERSION >= 10900 + resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); + pkgconf_queue_free(&query); + if (!resolved) { + pkgconf_solution_free(&client->client, &dep_graph_root); + false; + } ++#endif + /* + * TODO: attribute for max depth (also in the list version below) + */ + eflag = type > 1 ++#if LIBPKGCONF_VERSION >= 10900 + /* Depth more than 2 duplicates last cflags word. pkgconf hard-codes 2. */ + ? pkgconf_pkg_cflags(&client->client, &dep_graph_root, &unfiltered_list, 2/*client->maxdepth*/) + : pkgconf_pkg_libs(&client->client, &dep_graph_root, &unfiltered_list, client->maxdepth); ++#else ++ ? pkgconf_pkg_cflags(&client->client, package, &unfiltered_list, client->maxdepth) ++ : pkgconf_pkg_libs(&client->client, package, &unfiltered_list, client->maxdepth); ++#endif + pkgconf_client_set_flags(&client->client, old_flags); + /* + * TODO: throw an exception (also in the list verson below) + */ + if(eflag != PKGCONF_PKG_ERRF_OK) { ++#if LIBPKGCONF_VERSION >= 10900 + pkgconf_solution_free(&client->client, &dep_graph_root); ++#endif + false; + } + pkgconf_fragment_filter(&client->client, filtered_list, &unfiltered_list, directory_filter, NULL); + + pkgconf_fragment_free(&unfiltered_list); ++#if LIBPKGCONF_VERSION >= 10900 + pkgconf_solution_free(&client->client, &dep_graph_root); ++#endif + return true; + } + +@@ -306,7 +325,11 @@ _package_from_file(self, filename) + CODE: + fp = fopen(filename, "r"); + if(fp != NULL) { ++#if LIBPKGCONF_VERSION >= 10900 + package = pkgconf_pkg_new_from_file(&self->client, filename, fp, 0); ++#else ++ package = pkgconf_pkg_new_from_file(&self->client, filename, fp); ++#endif + if (package != NULL) + pkgconf_cache_add(&self->client, package); + RETVAL = PTR2IV(package); +-- +2.39.2 + +From 464e9e0ef0ec9ad768353f1b451f4fe5ad5188d3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20P=C3=ADsa=C5=99?= +Date: Fri, 3 Mar 2023 11:11:33 +0100 +Subject: [PATCH 8/8] Cache packages loaded from file only temporarily +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This implements an alternative approach outlined in +d3efe46b52b6ae3defb90cd695e835ebf6d13204 commit. + +This prevents from having packages loaded from files in a client-wide +cache all the time. The cache is used for resolving cflags/libs. + +A downside is that the code is more complex. + +Signed-off-by: Petr Písař +--- + LibPkgConf.xs | 27 +++++++++++++++++++-------- + lib/PkgConfig/LibPkgConf/Package.pm | 16 ++++++++-------- + t/package.t | 2 +- + 3 files changed, 28 insertions(+), 17 deletions(-) + +diff --git a/LibPkgConf.xs b/LibPkgConf.xs +index e7da2e4..63c78fb 100644 +--- a/LibPkgConf.xs ++++ b/LibPkgConf.xs +@@ -88,12 +88,17 @@ directory_filter(const pkgconf_client_t *client, const pkgconf_fragment_t *frag, + + /* + * Solve cflags/libs recursively using a pkgconf solver for the given package. ++ * Type encodes cflags/libs/shared/static queried property. ++ * loaded_from_file is true temporarily injecting a loaded-from-file package ++ * into a package cache as pkgconf_queue_solve() operates only on the cache ++ * and packages in a path. + * On success returns true and the caller needs to free the filtered_list. +- * Otherwise, returns false and the lists are still untouched or already freed. ++ * Otherwise, returns false and the filterer_list is still untouched or ++ * already freed. + */ + static bool + solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, +- pkgconf_list_t *filtered_list) { ++ bool loaded_from_file, pkgconf_list_t *filtered_list) { + #if LIBPKGCONF_VERSION >= 10900 + pkgconf_pkg_t dep_graph_root = { + .id = "", +@@ -115,6 +120,10 @@ solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + package->realname, package->version)) + false; + pkgconf_queue_push(&query, query_string); ++ if (loaded_from_file) ++ loaded_from_file = (NULL == pkgconf_cache_lookup(&client->client, package->id)); ++ if (loaded_from_file) ++ pkgconf_cache_add(&client->client, package); + #endif + old_flags = flags = pkgconf_client_get_flags(&client->client); + if(type % 2) { +@@ -125,6 +134,8 @@ solve_flags(pkgconf_pkg_t *package, my_client_t *client, int type, + pkgconf_client_set_flags(&client->client, flags); + #if LIBPKGCONF_VERSION >= 10900 + resolved = pkgconf_queue_solve(&client->client, &query, &dep_graph_root, client->maxdepth); ++ if (loaded_from_file) ++ pkgconf_cache_remove(&client->client, package); + pkgconf_queue_free(&query); + if (!resolved) { + pkgconf_solution_free(&client->client, &dep_graph_root); +@@ -330,8 +341,6 @@ _package_from_file(self, filename) + #else + package = pkgconf_pkg_new_from_file(&self->client, filename, fp); + #endif +- if (package != NULL) +- pkgconf_cache_add(&self->client, package); + RETVAL = PTR2IV(package); + } else + RETVAL = 0; +@@ -467,17 +476,18 @@ pc_filedir(self) + + + SV * +-_get_string(self, client, type) ++_get_string(self, client, type, loaded_from_file) + pkgconf_pkg_t *self + my_client_t *client + int type ++ bool loaded_from_file + INIT: + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; + char *buffer; + size_t len; + bool escape = true; + CODE: +- if (!solve_flags(self, client, type, &filtered_list)) ++ if (!solve_flags(self, client, type, loaded_from_file, &filtered_list)) + XSRETURN_EMPTY; + len = pkgconf_fragment_render_len(&filtered_list, escape, NULL); + RETVAL = newSV(len == 1 ? len : len-1); +@@ -501,10 +511,11 @@ _get_string(self, client, type) + + + void +-_get_list(self, client, type) ++_get_list(self, client, type, loaded_from_file) + pkgconf_pkg_t *self + my_client_t *client + int type ++ bool loaded_from_file + INIT: + pkgconf_list_t filtered_list = PKGCONF_LIST_INITIALIZER; + pkgconf_node_t *node; +@@ -512,7 +523,7 @@ _get_list(self, client, type) + int count = 0; + HV *h; + CODE: +- if (!solve_flags(self, client, type, &filtered_list)) ++ if (!solve_flags(self, client, type, loaded_from_file, &filtered_list)) + XSRETURN_EMPTY; + PKGCONF_FOREACH_LIST_ENTRY(filtered_list.head, node) + { +diff --git a/lib/PkgConfig/LibPkgConf/Package.pm b/lib/PkgConfig/LibPkgConf/Package.pm +index a5b65be..9198566 100644 +--- a/lib/PkgConfig/LibPkgConf/Package.pm ++++ b/lib/PkgConfig/LibPkgConf/Package.pm +@@ -86,7 +86,7 @@ Library flags. This usually includes things like C<-L/foo/lib> and C<-lfoo>. + sub libs + { + my($self) = @_; +- $self->_get_string($self->{client}, 0); ++ $self->_get_string($self->{client}, 0, exists $self->{filename}); + } + + =head2 libs_static +@@ -98,7 +98,7 @@ Static library flags. + sub libs_static + { + my($self) = @_; +- $self->_get_string($self->{client}, 1); ++ $self->_get_string($self->{client}, 1, exists $self->{filename}); + } + + =head2 cflags +@@ -110,7 +110,7 @@ Compiler flags. This usually includes things like C<-I/foo/include> and C<-DFOO + sub cflags + { + my($self) = @_; +- $self->_get_string($self->{client}, 2); ++ $self->_get_string($self->{client}, 2, exists $self->{filename}); + } + + =head2 cflags_static +@@ -122,7 +122,7 @@ Static compiler flags. + sub cflags_static + { + my($self) = @_; +- $self->_get_string($self->{client}, 3); ++ $self->_get_string($self->{client}, 3, exists $self->{filename}); + } + + =head2 list_libs +@@ -144,7 +144,7 @@ sub list_libs + { + my($self) = @_; + require PkgConfig::LibPkgConf::Fragment; +- map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 0); ++ map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 0, exists $self->{filename}); + } + + =head2 list_libs_static +@@ -159,7 +159,7 @@ sub list_libs_static + { + my($self) = @_; + require PkgConfig::LibPkgConf::Fragment; +- map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 1); ++ map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 1, exists $self->{filename}); + } + + =head2 list_cflags +@@ -181,7 +181,7 @@ sub list_cflags + { + my($self) = @_; + require PkgConfig::LibPkgConf::Fragment; +- map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 2); ++ map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 2, exists $self->{filename}); + } + + =head2 list_cflags_static +@@ -196,7 +196,7 @@ sub list_cflags_static + { + my($self) = @_; + require PkgConfig::LibPkgConf::Fragment; +- map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 3); ++ map { bless $_, 'PkgConfig::LibPkgConf::Fragment' } $self->_get_list($self->{client}, 3, exists $self->{filename}); + } + + =head2 variable +diff --git a/t/package.t b/t/package.t +index f409f99..486f6c4 100644 +--- a/t/package.t ++++ b/t/package.t +@@ -94,7 +94,7 @@ subtest 'package_from_file' => sub { + note "cflags = @{[ $pkg->cflags ]}"; + note "cflags_static = @{[ $pkg->cflags_static ]}"; + +- is $pkg->refcount, 2, 'refcount'; ++ is $pkg->refcount, 1, 'refcount'; + is $pkg->id, 'foo', 'id'; + is $pkg->filename, 'corpus/lib1/foo.pc', 'filename'; + is $pkg->realname, 'foo', 'realname'; +-- +2.39.2 + diff --git a/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/perl-PkgConfig-LibPkgConf.spec b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/perl-PkgConfig-LibPkgConf.spec index 331af84bd5f..00e538a7ac6 100644 --- a/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/perl-PkgConfig-LibPkgConf.spec +++ b/SPECS-EXTENDED/perl-PkgConfig-LibPkgConf/perl-PkgConfig-LibPkgConf.spec @@ -1,12 +1,18 @@ +%bcond_with perl_PkgConfig_LibPkgConf_enables_Alien_pkgconf +# Perform optional tests +%bcond_without perl_PkgConfig_LibPkgConf_enables_optional_test + Name: perl-PkgConfig-LibPkgConf Version: 0.11 -Release: 2%{?dist} +Release: 24%{?dist} Summary: Interface to pkg-config files via libpkgconf -License: GPL+ or Artistic +License: GPL-1.0-or-later OR Artistic-1.0-Perl Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://metacpan.org/release/PkgConfig-LibPkgConf Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/PkgConfig-LibPkgConf-%{version}.tar.gz#/perl-PkgConfig-LibPkgConf-%{version}.tar.gz +Patch0: PkgConfig-LibPkgConf-0.11-adapt_to_pkgconf_1.9.4.patch +Patch1: PkgConfig-LibPkgConf-0.11-Fix-resolving-flags-for-packages-with-a-name-differe.patch BuildRequires: findutils BuildRequires: gcc BuildRequires: make @@ -14,9 +20,11 @@ BuildRequires: perl-devel BuildRequires: perl-generators BuildRequires: perl-interpreter BuildRequires: perl(:VERSION) >= 5.8.1 -# Use Alien::pkgconf instead of some complicated guess -# script/cc_wrapper.pl and script/ld_wrapper.pl not used with Alien::pkgconf +%if %{with perl_PkgConfig_LibPkgConf_enables_Alien_pkgconf} BuildRequires: perl(Alien::pkgconf) >= 0.12 +%else +BuildRequires: pkgconf +%endif BuildRequires: pkgconfig(libpkgconf) >= 1.5.0 BuildRequires: perl(Config) BuildRequires: perl(ExtUtils::MakeMaker) >= 6.98 @@ -36,10 +44,15 @@ BuildRequires: perl(File::Basename) BuildRequires: perl(File::Path) BuildRequires: perl(File::Temp) BuildRequires: perl(Test::More) >= 0.98 +%if %{with perl_PkgConfig_LibPkgConf_enables_optional_test} # Optional tests: BuildRequires: perl(YAML) -Requires: perl(:MODULE_COMPAT_%(eval "`perl -V:version`"; echo $version)) +%endif Requires: perl(Carp) +Requires: libpkgconf > 2.0.1 + +# Filter under-specified dependencies +%global __requires_exclude %{?__requires_exclude:%{__requires_exclude}|}^perl\\(Test::More\\)$ %description Many libraries in compiled languages such as C or C++ provide *.pc files to @@ -47,30 +60,80 @@ specify the flags required for compiling and linking against those libraries. Traditionally, the command line program pkg-config is used to query these files. This package provides a Perl-level API using libpkgconf to these files. +%package tests +Summary: Tests for %{name} +BuildArch: noarch +Requires: %{name} = %{?epoch:%{epoch}:}%{version}-%{release} +Requires: coreutils +Requires: perl-Test-Harness +Requires: perl(Cwd) +Requires: perl(Test::More) >= 0.98 +%if %{with perl_PkgConfig_LibPkgConf_enables_optional_test} +Requires: perl(YAML) +%endif + +%description tests +Tests from %{name}. Execute them +with "%{_libexecdir}/%{name}/test". + %prep -%setup -q -n PkgConfig-LibPkgConf-%{version} +%autosetup -p1 -n PkgConfig-LibPkgConf-%{version} +# Help generators to recognize Perl scripts +for F in t/*.t; do + perl -i -MConfig -ple 'print $Config{startperl} if $. == 1' "$F" + chmod +x "$F" +done %build unset FFI_PLATYPUS_DEBUG +export PKG_CONFIG=%{_bindir}/pkgconf perl Makefile.PL INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 OPTIMIZE="$RPM_OPT_FLAGS" %{make_build} %install %{make_install} find $RPM_BUILD_ROOT -type f -name '*.bs' -size 0 -delete +# Install tests +mkdir -p %{buildroot}%{_libexecdir}/%{name} +cp -a corpus t %{buildroot}%{_libexecdir}/%{name} +cat > %{buildroot}%{_libexecdir}/%{name}/test << 'EOF' +#!/bin/bash +set -e +# audit_set_log() in t/client.t writed into CWD +DIR=$(mktemp -d) +cp -a %{_libexecdir}/%{name}/* "$DIR" +pushd "$DIR" +prove -I . -j "$(getconf _NPROCESSORS_ONLN)" +popd +rm -r "$DIR" +EOF +chmod +x %{buildroot}%{_libexecdir}/%{name}/test %{_fixperms} $RPM_BUILD_ROOT/* %check +export HARNESS_OPTIONS=j$(perl -e 'if ($ARGV[0] =~ /.*-j([0-9][0-9]*).*/) {print $1} else {print 1}' -- '%{?_smp_mflags}') +rm -f t/client.t make test %files %license LICENSE %doc Changes README -%{perl_vendorarch}/auto/* -%{perl_vendorarch}/PkgConfig* -%{_mandir}/man3/* +%dir %{perl_vendorarch}/auto/PkgConfig +%{perl_vendorarch}/auto/PkgConfig/LibPkgConf +%dir %{perl_vendorarch}/PkgConfig +%{perl_vendorarch}/PkgConfig/LibPkgConf +%{perl_vendorarch}/PkgConfig/LibPkgConf.pm +%{_mandir}/man3/PkgConfig::LibPkgConf.* +%{_mandir}/man3/PkgConfig::LibPkgConf::* + +%files tests +%{_libexecdir}/%{name} %changelog +* Mon Dec 22 2025 Durga Jagadeesh Palli - 0.11-24 +- Upgrade to 0.11-24 (Reference: Fedora 42) +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 0.11-2 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.signatures.json b/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.signatures.json index 9dc502c9219..093efecf4dc 100644 --- a/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.signatures.json +++ b/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "perl-Return-MultiLevel-0.05.tar.gz": "ff076fbaba653c09953ac53959fe6c64fb52a33db3b5d223a015ab75ac3ed91d" + "Return-MultiLevel-0.08.tar.gz": "51b1aef30c5c4009f640267a08589212e87dcd101800f0d20f9c635c9ffe88a1" } } diff --git a/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.spec b/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.spec index a6f1b9e5cc8..ac4fefbf2e8 100644 --- a/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.spec +++ b/SPECS-EXTENDED/perl-Return-MultiLevel/perl-Return-MultiLevel.spec @@ -1,12 +1,12 @@ Name: perl-Return-MultiLevel -Version: 0.05 -Release: 10%{?dist} +Version: 0.08 +Release: 1%{?dist} Summary: Return across multiple call levels -License: GPL+ or Artistic +License: GPL-1.0-or-later OR Artistic-1.0-Perl Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://metacpan.org/release/Return-MultiLevel -Source0: https://cpan.metacpan.org/authors/id/M/MA/MAUKE/Return-MultiLevel-%{version}.tar.gz#/perl-Return-MultiLevel-%{version}.tar.gz +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Return-MultiLevel-%{version}.tar.gz BuildArch: noarch # Build BuildRequires: coreutils @@ -14,28 +14,22 @@ BuildRequires: make BuildRequires: perl-generators BuildRequires: perl-interpreter BuildRequires: perl(ExtUtils::MakeMaker) >= 6.76 -BuildRequires: perl(File::Find) -BuildRequires: perl(File::Spec) # Module BuildRequires: perl(Carp) -BuildRequires: perl(Data::Munge) >= 0.07 BuildRequires: perl(Exporter) BuildRequires: perl(parent) BuildRequires: perl(strict) BuildRequires: perl(warnings) # Test Suite +BuildRequires: perl(Config) BuildRequires: perl(Test::Fatal) BuildRequires: perl(Test::More) # Dependencies -Requires: perl(:MODULE_COMPAT_%(eval "`perl -V:version`"; echo $version)) -Requires: perl(Data::Munge) >= 0.07 +# (none) # Optional Functionality BuildRequires: perl(Scope::Upper) >= 0.29 Requires: perl(Scope::Upper) >= 0.29 -# Filter under-specified dependencies -%global __requires_exclude %{?__requires_exclude:__requires_exclude|}^perl\\(Data::Munge\\)$ - %description This module provides a way to return immediately from a deeply nested call stack. This is similar to exceptions, but exceptions don't stop automatically @@ -43,7 +37,7 @@ at a target frame (and they can be caught by intermediate stack frames using eval). In other words, this is more like setjmp(3)/longjmp(3) than die. %prep -%setup -q -n Return-MultiLevel-%{version} +%autosetup -n Return-MultiLevel-%{version} %build perl Makefile.PL INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 @@ -57,11 +51,16 @@ perl Makefile.PL INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 make test %files +%license LICENSE %doc Changes README %{perl_vendorlib}/Return/ %{_mandir}/man3/Return::MultiLevel.3* %changelog +* Fri Dec 26 2025 Aditya Singh - 0.08-1 +- Upgrade to version 0.08 +- License verified. + * Fri Oct 15 2021 Pawel Winogrodzki - 0.05-10 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/perl-Test-Simple/Test-Simple-1.302200-add_perl.patch b/SPECS-EXTENDED/perl-Test-Simple/Test-Simple-1.302200-add_perl.patch deleted file mode 100755 index 13bf4f2f1f8..00000000000 --- a/SPECS-EXTENDED/perl-Test-Simple/Test-Simple-1.302200-add_perl.patch +++ /dev/null @@ -1,629 +0,0 @@ ---- Test-Simple-1.302200/t/acceptance/Workflow-Acceptance.t -+++ Test-Simple-1.302200/t/acceptance/Workflow-Acceptance.t -@@ -172,7 +172,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 77; -+ prop line => 78; - - call subevents => array { - event Ok => sub { -@@ -181,7 +181,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 12; -+ prop line => 13; - }; - - event Ok => sub { -@@ -190,7 +190,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 16; -+ prop line => 17; - }; - - event Subtest => sub { -@@ -199,7 +199,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 34; -+ prop line => 35; - - call subevents => array { - event Ok => sub { -@@ -208,7 +208,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 37; -+ prop line => 38; - }; - - event Ok => sub { -@@ -217,7 +217,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 40; -+ prop line => 41; - }; - - event Ok => sub { -@@ -226,7 +226,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 34; -+ prop line => 35; - }; - - event Skip => sub { -@@ -236,7 +236,7 @@ is( - call reason => 'No isolation method available'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 48; -+ prop line => 49; - }; - - event Subtest => sub { -@@ -245,7 +245,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 52; -+ prop line => 53; - - call subevents => array { - event Ok => sub { -@@ -254,7 +254,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -263,7 +263,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -272,7 +272,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 51; -+ prop line => 52; - }; - - event Ok => sub { -@@ -281,7 +281,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -290,14 +290,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 52; -+ prop line => 53; - }; - end(); - }; -@@ -309,7 +309,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 57; -+ prop line => 58; - - call subevents => array { - event Ok => sub { -@@ -318,7 +318,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -327,7 +327,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -337,14 +337,14 @@ is( - call todo => 'foo todo'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 56; -+ prop line => 57; - }; - - event Note => sub { - call message => match qr{^\n?Failed test}; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 56; -+ prop line => 57; - }; - - event Ok => sub { -@@ -353,7 +353,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -362,14 +362,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 57; -+ prop line => 58; - }; - end(); - }; -@@ -381,7 +381,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 61; -+ prop line => 62; - - call subevents => array { - event Ok => sub { -@@ -390,7 +390,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -399,7 +399,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -408,14 +408,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 60; -+ prop line => 61; - }; - - event Note => sub { - call message => match qr{^\n?Failed test}; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 60; -+ prop line => 61; - }; - - event Ok => sub { -@@ -424,7 +424,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -433,14 +433,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 61; -+ prop line => 62; - }; - end(); - }; -@@ -462,7 +462,7 @@ is( - call reason => 'No isolation method available'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 76; -+ prop line => 77; - }; - - event Ok => sub { -@@ -471,7 +471,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 42; -+ prop line => 43; - }; - - event Ok => sub { -@@ -480,14 +480,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 44; -+ prop line => 45; - }; - - event Plan => sub { - call max => 11; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 34; -+ prop line => 35; - }; - end(); - }; -@@ -499,7 +499,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 35; -+ prop line => 36; - - call subevents => array { - event Ok => sub { -@@ -508,7 +508,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 37; -+ prop line => 38; - }; - - event Ok => sub { -@@ -517,7 +517,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 40; -+ prop line => 41; - }; - - event Ok => sub { -@@ -526,7 +526,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 35; -+ prop line => 36; - }; - - event Skip => sub { -@@ -536,7 +536,7 @@ is( - call reason => 'No isolation method available'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 48; -+ prop line => 49; - }; - - event Subtest => sub { -@@ -545,7 +545,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 52; -+ prop line => 53; - - call subevents => array { - event Ok => sub { -@@ -554,7 +554,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -563,7 +563,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -572,7 +572,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 51; -+ prop line => 52; - }; - - event Ok => sub { -@@ -581,7 +581,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -590,14 +590,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 52; -+ prop line => 53; - }; - end(); - }; -@@ -609,7 +609,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 57; -+ prop line => 58; - - call subevents => array { - event Ok => sub { -@@ -618,7 +618,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -627,7 +627,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -637,14 +637,14 @@ is( - call todo => 'foo todo'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 56; -+ prop line => 57; - }; - - event Note => sub { - call message => match qr{^\n?Failed test}; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 56; -+ prop line => 57; - }; - - event Ok => sub { -@@ -653,7 +653,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -662,14 +662,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 57; -+ prop line => 58; - }; - end(); - }; -@@ -681,7 +681,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 61; -+ prop line => 62; - - call subevents => array { - event Ok => sub { -@@ -690,7 +690,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 23; -+ prop line => 24; - }; - - event Ok => sub { -@@ -699,7 +699,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 27; -+ prop line => 28; - }; - - event Ok => sub { -@@ -708,14 +708,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 60; -+ prop line => 61; - }; - - event Note => sub { - call message => match qr{^\n?Failed test}; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 60; -+ prop line => 61; - }; - - event Ok => sub { -@@ -724,7 +724,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 29; -+ prop line => 30; - }; - - event Ok => sub { -@@ -733,14 +733,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 32; -+ prop line => 33; - }; - - event Plan => sub { - call max => 5; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 61; -+ prop line => 62; - }; - end(); - }; -@@ -762,7 +762,7 @@ is( - call reason => 'No isolation method available'; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 76; -+ prop line => 77; - }; - - event Ok => sub { -@@ -771,7 +771,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 42; -+ prop line => 43; - }; - - event Ok => sub { -@@ -780,14 +780,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 44; -+ prop line => 45; - }; - - event Plan => sub { - call max => 11; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 35; -+ prop line => 36; - }; - end(); - }; -@@ -799,7 +799,7 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 18; -+ prop line => 19; - }; - - event Ok => sub { -@@ -808,14 +808,14 @@ is( - call effective_pass => 1; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 21; -+ prop line => 22; - }; - - event Plan => sub { - call max => 6; - - prop file => match qr{\QAcceptance.t\E$}; -- prop line => 77; -+ prop line => 78; - }; - end(); - }; ---- Test-Simple-1.302200/t/Legacy_And_Test2/diag_event_on_ok.t -+++ Test-Simple-1.302200/t/Legacy_And_Test2/diag_event_on_ok.t -@@ -17,6 +17,6 @@ is($ok->pass, 0, "'ok' test failed"); - is($ok->name, 'name', "got 'ok' name"); - - ok($diag->isa('Test2::Event::Diag'), "got 'ok' result"); --is($diag->message, " Failed test 'name'\n at $0 line 9.\n", "got all diag message in one diag event"); -+is($diag->message, " Failed test 'name'\n at $0 line 10.\n", "got all diag message in one diag event"); - - done_testing; diff --git a/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.signatures.json b/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.signatures.json deleted file mode 100644 index f52201e593f..00000000000 --- a/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "perl-Test-Simple-1.302204.tar.gz": "03749d1027a7817ca7f11e420ef72951f20a849ea65af2eb595f34df47d1226e" - } -} diff --git a/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.spec b/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.spec deleted file mode 100755 index 4b1591dd88f..00000000000 --- a/SPECS-EXTENDED/perl-Test-Simple/perl-Test-Simple.spec +++ /dev/null @@ -1,1189 +0,0 @@ -%bcond_with perl_Test_Simple_enables_Module_Pluggable -%bcond_with perl_Test_Simple_enables_optional_test -%bcond_with perl_Test_Simple_enables_unicode - -Name: perl-Test-Simple -Summary: Basic utilities for writing tests -Version: 1.302204 -Release: 2%{?dist} -Vendor: Microsoft Corporation -Distribution: Azure Linux -# CC0-1.0: lib/ok.pm -# Public Domain: lib/Test/Tutorial.pod -# GPL-1.0-or-later OR Artistic-1.0-Perl: the rest of the distribution -License: (GPL-1.0-or-later OR Artistic-1.0-Perl) AND CC0-1.0 AND LicenseRef-Fedora-Public-Domain -URL: https://metacpan.org/release/Test-Simple -Source0: https://cpan.metacpan.org/modules/by-module/Test/Test-Simple-%{version}.tar.gz#/%{name}-%{version}.tar.gz -Patch0: Test-Simple-1.302200-add_perl.patch -BuildArch: noarch -# Module Build -BuildRequires: coreutils -BuildRequires: make -BuildRequires: perl-generators -BuildRequires: perl-interpreter -BuildRequires: perl(ExtUtils::MakeMaker) >= 6.76 -# Module Runtime -BuildRequires: perl(B) -BuildRequires: perl(base) -BuildRequires: perl(Carp) -BuildRequires: perl(Config) -BuildRequires: perl(constant) -BuildRequires: perl(Data::Dumper) -BuildRequires: perl(Exporter) -BuildRequires: perl(File::Spec) -BuildRequires: perl(File::Temp) -BuildRequires: perl(IO::Handle) -BuildRequires: perl(JSON::PP) -BuildRequires: perl(List::Util) -%if %{with perl_Test_Simple_enables_Module_Pluggable} && !%{defined perl_bootstrap} -BuildRequires: perl(Module::Pluggable) >= 2.7 -%endif -# mro used since Perl 5.010 -BuildRequires: perl(mro) -BuildRequires: perl(overload) -BuildRequires: perl(PerlIO) >= 1.02 -BuildRequires: perl(POSIX) -BuildRequires: perl(Scalar::Util) >= 1.13 -BuildRequires: perl(Storable) -BuildRequires: perl(strict) -BuildRequires: perl(Sub::Util) -BuildRequires: perl(Symbol) -BuildRequires: perl(Term::ANSIColor) -BuildRequires: perl(Term::Table) >= 0.013 -BuildRequires: perl(Term::Table::Cell) -BuildRequires: perl(Term::Table::LineBreak) -BuildRequires: perl(Term::Table::Util) -BuildRequires: perl(Time::HiRes) -BuildRequires: perl(vars) -BuildRequires: perl(warnings) -# Test Suite -BuildRequires: perl(Cwd) -BuildRequires: perl(File::Basename) -BuildRequires: perl(if) -BuildRequires: perl(IO::Pipe) -BuildRequires: perl(lib) -BuildRequires: perl(threads) -# Optional Tests -BuildRequires: perl(CPAN::Meta) -BuildRequires: perl(CPAN::Meta::Requirements) >= 2.120920 -BuildRequires: perl(IPC::Open3) -BuildRequires: perl(Module::Metadata) -BuildRequires: perl(Test::Harness) >= 2.03 -%if !%{defined perl_bootstrap} -%if %{with perl_Test_Simple_enables_optional_test} -BuildRequires: perl(JSON::MaybeXS) -BuildRequires: perl(Test::Class) -BuildRequires: perl(Test::Pod) >= 0.95 -BuildRequires: perl(Test::Script) -%endif -%endif -%if %{with perl_Test_Simple_enables_unicode} -BuildRequires: perl(Unicode::GCString) -%endif -BuildRequires: perl(utf8) -# Dependencies -Requires: perl(Data::Dumper) -Requires: perl(JSON::PP) -%if %{with perl_Test_Simple_enables_Module_Pluggable} && !%{defined perl_bootstrap} -Recommends: perl(Module::Pluggable) >= 2.7 -%endif -# mro used since Perl 5.010 -Requires: perl(mro) -Requires: perl(PerlIO) >= 1.02 -Requires: perl(Sub::Util) -Requires: perl(Term::ANSIColor) -Requires: perl(Term::Table) >= 0.013 -Requires: perl(threads) -%if %{with perl_Test_Simple_enables_unicode} -Recommends: perl(Unicode::GCString) -%endif -Requires: perl(utf8) -# perl-Test2-Suite-0.000163-4.fc41 merged -Obsoletes: perl-Test2-Suite < 0.000163-5 -Provides: perl-Test2-Suite = %{version}-%{release} -# 3 inlined modules for future Perl Core -Provides: bundled(Importer) = 0.026 -Provides: bundled(Scope::Guard) = 0.21 -Provides: bundled(Sub::Info) = 0.002 - -# Remove under-specified dependencies -%global __requires_exclude %{?__requires_exclude:%{__requires_exclude}|}^perl\\(Term::Table\\)$ - -# Remove private modules -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(Dev::Null\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(main::HBase\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(main::HBase::Wrapped\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(MyOverload\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(MyTest\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(MyTest::Target\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(SmallTest\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(Test::Builder::NoOutput\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(Test::Simple::Catch\\)$ -%global __requires_exclude %{?__requires_exclude:%__requires_exclude|}^perl\\(TieOut\\)$ -%global __provides_exclude_from %{?__provides_exclude_from:%__provides_exclude_from|}^%{_libexecdir} - -%description -This package provides the bulk of the core testing facilities. For more -information, see perldoc for Test::Simple, Test::More, etc. - -This package is the CPAN component of the dual-lifed core package Test-Simple. - -%package tests -Summary: Tests for %{name} -Requires: %{name} = %{version}-%{release} -Requires: perl-Test-Harness -Requires: perl(CPAN::Meta) -Requires: perl(CPAN::Meta::Requirements) >= 2.120920 -Requires: perl(JSON::MaybeXS) -Requires: perl(Module::Metadata) -Requires: perl(Test::Pod) >= 0.95 -# perl-Test2-Suite-0.000163-4.fc41 merged -Obsoletes: perl-Test2-Suite-tests < 0.000163-5 -Provides: perl-Test2-Suite-tests = %{version}-%{release} - -%description tests -Tests from %{name}. Execute them -with "%{_libexecdir}/%{name}/test". - -%prep -%setup -q -n Test-Simple-%{version} - -# Help generators to recognize Perl scripts -for F in `find . -type f -name '*.t'`; do - perl -i -MConfig -ple 'print $Config{startperl} if $. == 1 && !s{\A#!\s*(/usr/bin/)?perl}{$Config{startperl}}' "$F" - chmod +x "$F" -done - -# Fix tests to work with added shellbangs -%patch -P0 -p1 - -%build -perl Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 -%{make_build} - -%install -%{make_install} -%{_fixperms} -c %{buildroot} - -# Install tests -mkdir -p %{buildroot}%{_libexecdir}/%{name} -cp -a t %{buildroot}%{_libexecdir}/%{name} -cat > %{buildroot}%{_libexecdir}/%{name}/test << 'EOF' -#!/bin/bash -set -e -# Some tests write into temporary files/directories -DIR=$(mktemp -d) -pushd "$DIR" -cp -a %{_libexecdir}/%{name}/* ./ -prove -r -I . -j "$(getconf _NPROCESSORS_ONLN)" t/ -popd -rm -rf "$DIR" -EOF -chmod +x %{buildroot}%{_libexecdir}/%{name}/test - -%check -make test %{!?perl_bootstrap:AUTHOR_TESTING=1} - -%files -%license LICENSE -%doc Changes README examples/ -%dir %{perl_vendorlib}/Test/ -%{perl_vendorlib}/ok.pm -%{perl_vendorlib}/Test/Builder.pm -%{perl_vendorlib}/Test/Builder/ -%{perl_vendorlib}/Test/More.pm -%{perl_vendorlib}/Test/Simple.pm -%{perl_vendorlib}/Test/Tester.pm -%{perl_vendorlib}/Test/Tester/ -%doc %{perl_vendorlib}/Test/Tutorial.pod -%{perl_vendorlib}/Test/use/ -%{perl_vendorlib}/Test2.pm -%{perl_vendorlib}/Test2/ -%{_mandir}/man3/ok.3* -%{_mandir}/man3/Test::Builder.3* -%{_mandir}/man3/Test::Builder::Formatter.3* -%{_mandir}/man3/Test::Builder::IO::Scalar.3* -%{_mandir}/man3/Test::Builder::Module.3* -%{_mandir}/man3/Test::Builder::Tester.3* -%{_mandir}/man3/Test::Builder::Tester::Color.3* -%{_mandir}/man3/Test::Builder::TodoDiag.3* -%{_mandir}/man3/Test::More.3* -%{_mandir}/man3/Test::Simple.3* -%{_mandir}/man3/Test::Tester.3* -%{_mandir}/man3/Test::Tester::Capture.3* -%{_mandir}/man3/Test::Tester::CaptureRunner.3* -%{_mandir}/man3/Test::Tutorial.3* -%{_mandir}/man3/Test::use::ok.3* -%{_mandir}/man3/Test2.3* -%{_mandir}/man3/Test2::API.3* -%{_mandir}/man3/Test2::API::Breakage.3* -%{_mandir}/man3/Test2::API::Context.3* -%{_mandir}/man3/Test2::API::Instance.3* -%{_mandir}/man3/Test2::API::InterceptResult.3* -%{_mandir}/man3/Test2::API::InterceptResult::Event.3* -%{_mandir}/man3/Test2::API::InterceptResult::Hub.3* -%{_mandir}/man3/Test2::API::InterceptResult::Squasher.3* -%{_mandir}/man3/Test2::API::Stack.3* -%{_mandir}/man3/Test2::AsyncSubtest.3* -%{_mandir}/man3/Test2::AsyncSubtest::Event::Attach.3* -%{_mandir}/man3/Test2::AsyncSubtest::Event::Detach.3* -%{_mandir}/man3/Test2::AsyncSubtest::Hub.3* -%{_mandir}/man3/Test2::Bundle.3* -%{_mandir}/man3/Test2::Bundle::Extended.3* -%{_mandir}/man3/Test2::Bundle::More.3* -%{_mandir}/man3/Test2::Bundle::Simple.3* -%{_mandir}/man3/Test2::Compare.3* -%{_mandir}/man3/Test2::Compare::Array.3* -%{_mandir}/man3/Test2::Compare::Bag.3* -%{_mandir}/man3/Test2::Compare::Base.3* -%{_mandir}/man3/Test2::Compare::Bool.3* -%{_mandir}/man3/Test2::Compare::Custom.3* -%{_mandir}/man3/Test2::Compare::DeepRef.3* -%{_mandir}/man3/Test2::Compare::Delta.3* -%{_mandir}/man3/Test2::Compare::Event.3* -%{_mandir}/man3/Test2::Compare::EventMeta.3* -%{_mandir}/man3/Test2::Compare::Float.3* -%{_mandir}/man3/Test2::Compare::Hash.3* -%{_mandir}/man3/Test2::Compare::Isa.3* -%{_mandir}/man3/Test2::Compare::Meta.3* -%{_mandir}/man3/Test2::Compare::Negatable.3* -%{_mandir}/man3/Test2::Compare::Number.3* -%{_mandir}/man3/Test2::Compare::Object.3* -%{_mandir}/man3/Test2::Compare::OrderedSubset.3* -%{_mandir}/man3/Test2::Compare::Pattern.3* -%{_mandir}/man3/Test2::Compare::Ref.3* -%{_mandir}/man3/Test2::Compare::Regex.3* -%{_mandir}/man3/Test2::Compare::Scalar.3* -%{_mandir}/man3/Test2::Compare::Set.3* -%{_mandir}/man3/Test2::Compare::String.3* -%{_mandir}/man3/Test2::Compare::Undef.3* -%{_mandir}/man3/Test2::Compare::Wildcard.3* -%{_mandir}/man3/Test2::Event.3* -%{_mandir}/man3/Test2::Event::Bail.3* -%{_mandir}/man3/Test2::Event::Diag.3* -%{_mandir}/man3/Test2::Event::Encoding.3* -%{_mandir}/man3/Test2::Event::Exception.3* -%{_mandir}/man3/Test2::Event::Fail.3* -%{_mandir}/man3/Test2::Event::Generic.3* -%{_mandir}/man3/Test2::Event::Note.3* -%{_mandir}/man3/Test2::Event::Ok.3* -%{_mandir}/man3/Test2::Event::Pass.3* -%{_mandir}/man3/Test2::Event::Plan.3* -%{_mandir}/man3/Test2::Event::Skip.3* -%{_mandir}/man3/Test2::Event::Subtest.3* -%{_mandir}/man3/Test2::Event::TAP::Version.3* -%{_mandir}/man3/Test2::Event::V2.3* -%{_mandir}/man3/Test2::Event::Waiting.3* -%{_mandir}/man3/Test2::EventFacet.3* -%{_mandir}/man3/Test2::EventFacet::About.3* -%{_mandir}/man3/Test2::EventFacet::Amnesty.3* -%{_mandir}/man3/Test2::EventFacet::Assert.3* -%{_mandir}/man3/Test2::EventFacet::Control.3* -%{_mandir}/man3/Test2::EventFacet::Error.3* -%{_mandir}/man3/Test2::EventFacet::Hub.3* -%{_mandir}/man3/Test2::EventFacet::Info.3* -%{_mandir}/man3/Test2::EventFacet::Info::Table.3* -%{_mandir}/man3/Test2::EventFacet::Meta.3* -%{_mandir}/man3/Test2::EventFacet::Parent.3* -%{_mandir}/man3/Test2::EventFacet::Plan.3* -%{_mandir}/man3/Test2::EventFacet::Render.3* -%{_mandir}/man3/Test2::EventFacet::Trace.3* -%{_mandir}/man3/Test2::Formatter.3* -%{_mandir}/man3/Test2::Formatter::TAP.3* -%{_mandir}/man3/Test2::Hub.3* -%{_mandir}/man3/Test2::Hub::Interceptor.3* -%{_mandir}/man3/Test2::Hub::Interceptor::Terminator.3* -%{_mandir}/man3/Test2::Hub::Subtest.3* -%{_mandir}/man3/Test2::IPC.3* -%{_mandir}/man3/Test2::IPC::Driver.3* -%{_mandir}/man3/Test2::IPC::Driver::Files.3* -%{_mandir}/man3/Test2::Manual.3* -%{_mandir}/man3/Test2::Manual::Anatomy.3* -%{_mandir}/man3/Test2::Manual::Anatomy::API.3* -%{_mandir}/man3/Test2::Manual::Anatomy::Context.3* -%{_mandir}/man3/Test2::Manual::Anatomy::EndToEnd.3* -%{_mandir}/man3/Test2::Manual::Anatomy::Event.3* -%{_mandir}/man3/Test2::Manual::Anatomy::Hubs.3* -%{_mandir}/man3/Test2::Manual::Anatomy::IPC.3* -%{_mandir}/man3/Test2::Manual::Anatomy::Utilities.3* -%{_mandir}/man3/Test2::Manual::Concurrency.3* -%{_mandir}/man3/Test2::Manual::Contributing.3* -%{_mandir}/man3/Test2::Manual::Testing.3* -%{_mandir}/man3/Test2::Manual::Testing::Introduction.3* -%{_mandir}/man3/Test2::Manual::Testing::Migrating.3* -%{_mandir}/man3/Test2::Manual::Testing::Planning.3* -%{_mandir}/man3/Test2::Manual::Testing::Todo.3* -%{_mandir}/man3/Test2::Manual::Tooling.3* -%{_mandir}/man3/Test2::Manual::Tooling::FirstTool.3* -%{_mandir}/man3/Test2::Manual::Tooling::Formatter.3* -%{_mandir}/man3/Test2::Manual::Tooling::Nesting.3* -%{_mandir}/man3/Test2::Manual::Tooling::Plugin::TestExit.3* -%{_mandir}/man3/Test2::Manual::Tooling::Plugin::TestingDone.3* -%{_mandir}/man3/Test2::Manual::Tooling::Plugin::ToolCompletes.3* -%{_mandir}/man3/Test2::Manual::Tooling::Plugin::ToolStarts.3* -%{_mandir}/man3/Test2::Manual::Tooling::Subtest.3* -%{_mandir}/man3/Test2::Manual::Tooling::TestBuilder.3* -%{_mandir}/man3/Test2::Manual::Tooling::Testing.3* -%{_mandir}/man3/Test2::Mock.3* -%{_mandir}/man3/Test2::Plugin.3* -%{_mandir}/man3/Test2::Plugin::BailOnFail.3* -%{_mandir}/man3/Test2::Plugin::DieOnFail.3* -%{_mandir}/man3/Test2::Plugin::ExitSummary.3* -%{_mandir}/man3/Test2::Plugin::SRand.3* -%{_mandir}/man3/Test2::Plugin::Times.3* -%{_mandir}/man3/Test2::Plugin::UTF8.3* -%{_mandir}/man3/Test2::Require.3* -%{_mandir}/man3/Test2::Require::AuthorTesting.3* -%{_mandir}/man3/Test2::Require::AutomatedTesting.3* -%{_mandir}/man3/Test2::Require::EnvVar.3* -%{_mandir}/man3/Test2::Require::ExtendedTesting.3* -%{_mandir}/man3/Test2::Require::Fork.3* -%{_mandir}/man3/Test2::Require::Module.3* -%{_mandir}/man3/Test2::Require::NonInteractiveTesting.3* -%{_mandir}/man3/Test2::Require::Perl.3* -%{_mandir}/man3/Test2::Require::RealFork.3* -%{_mandir}/man3/Test2::Require::ReleaseTesting.3* -%{_mandir}/man3/Test2::Require::Threads.3* -%{_mandir}/man3/Test2::Suite.3* -%{_mandir}/man3/Test2::Todo.3* -%{_mandir}/man3/Test2::Tools.3* -%{_mandir}/man3/Test2::Tools::AsyncSubtest.3* -%{_mandir}/man3/Test2::Tools::Basic.3* -%{_mandir}/man3/Test2::Tools::Class.3* -%{_mandir}/man3/Test2::Tools::ClassicCompare.3* -%{_mandir}/man3/Test2::Tools::Compare.3* -%{_mandir}/man3/Test2::Tools::Defer.3* -%{_mandir}/man3/Test2::Tools::Encoding.3* -%{_mandir}/man3/Test2::Tools::Event.3* -%{_mandir}/man3/Test2::Tools::Exception.3* -%{_mandir}/man3/Test2::Tools::Exports.3* -%{_mandir}/man3/Test2::Tools::GenTemp.3* -%{_mandir}/man3/Test2::Tools::Grab.3* -%{_mandir}/man3/Test2::Tools::Mock.3* -%{_mandir}/man3/Test2::Tools::Ref.3* -%{_mandir}/man3/Test2::Tools::Refcount.3* -%{_mandir}/man3/Test2::Tools::Spec.3* -%{_mandir}/man3/Test2::Tools::Subtest.3* -%{_mandir}/man3/Test2::Tools::Target.3* -%{_mandir}/man3/Test2::Tools::Tester.3* -%{_mandir}/man3/Test2::Tools::Tiny.3* -%{_mandir}/man3/Test2::Tools::Warnings.3* -%{_mandir}/man3/Test2::Transition.3* -%{_mandir}/man3/Test2::Util.3* -%{_mandir}/man3/Test2::Util::ExternalMeta.3* -%{_mandir}/man3/Test2::Util::Facets2Legacy.3* -%{_mandir}/man3/Test2::Util::Grabber.3* -%{_mandir}/man3/Test2::Util::Guard.3* -%{_mandir}/man3/Test2::Util::HashBase.3* -%{_mandir}/man3/Test2::Util::Importer.3* -%{_mandir}/man3/Test2::Util::Ref.3* -%{_mandir}/man3/Test2::Util::Stash.3* -%{_mandir}/man3/Test2::Util::Sub.3* -%{_mandir}/man3/Test2::Util::Table.3* -%{_mandir}/man3/Test2::Util::Table::LineBreak.3* -%{_mandir}/man3/Test2::Util::Times.3* -%{_mandir}/man3/Test2::Util::Trace.3* -%{_mandir}/man3/Test2::V0.3* -%{_mandir}/man3/Test2::Workflow.3* -%{_mandir}/man3/Test2::Workflow::BlockBase.3* -%{_mandir}/man3/Test2::Workflow::Build.3* -%{_mandir}/man3/Test2::Workflow::Runner.3* -%{_mandir}/man3/Test2::Workflow::Task.3* -%{_mandir}/man3/Test2::Workflow::Task::Action.3* -%{_mandir}/man3/Test2::Workflow::Task::Group.3* - -%files tests -%{_libexecdir}/%{name} - -%changelog -* Fri Mar 14 2025 Jyoti kanase - 1.302204-2 -- Initial Azure Linux import from Fedora 41 (license: MIT). -- License verified. - -* Sun Sep 15 2024 Paul Howarth - 3:1.302204-1 -- Update to 1.302204 - - Add pending diagnostics functionality - - Show warnings/exceptions for no_warnings() and lives() - -* Thu Sep 5 2024 Paul Howarth - 3:1.302203-1 -- Update to 1.302203 - - Fix some tests when run on Windows (GH#1002, GH#1003) - -* Wed Sep 4 2024 Paul Howarth - 3:1.302202-1 -- Update to 1.302202 - - Add comment on how to make tables bigger (GH#931) - -* Mon Sep 2 2024 Paul Howarth - 3:1.302201-2 -- Term::Table required when bootstrapping (rhbz#2308981) - -* Wed Aug 14 2024 Paul Howarth - 3:1.302201-1 -- Update to 1.302201 - - Fix bug found by new warnings in blead - -* Wed Aug 7 2024 Paul Howarth - 3:1.302200-1 -- Update to 1.302200 - - Merge Test2-Suite into Test-Simple - - Some documentation updates - - Some test fixes -- Package tests - -* Fri Jul 19 2024 Fedora Release Engineering - 3:1.302199-512 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild - -* Wed Jun 12 2024 Jitka Plesnikova - 3:1.302199-511 -- Perl 5.40 re-rebuild of bootstrapped packages - -* Mon Jun 10 2024 Jitka Plesnikova - 3:1.302199-510 -- Increase release to favour standalone package - -* Fri Apr 26 2024 Paul Howarth - 3:1.302199-1 -- Update to 1.302199 - - Minor fixes - -* Thu Jan 25 2024 Fedora Release Engineering - 3:1.302198-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild - -* Sun Jan 21 2024 Fedora Release Engineering - 3:1.302198-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild - -* Fri Dec 1 2023 Paul Howarth - 3:1.302198-1 -- Update to 1.302198 - - Remove use of defined-or operator - -* Wed Nov 29 2023 Paul Howarth - 3:1.302197-1 -- Update to 1.302197 - - Add ability to attach timestamps to trace objects via API or environment - variable - -* Wed Oct 25 2023 Paul Howarth - 3:1.302196-1 -- Update to 1.302196 - - Raise error on missing Hub ID, which should never happen (GH#882) - - Fix handling of VSTRING and LVALUE refs in is_deeply() (GH#918) - - Merge several documentation fixes (GH#910, GH#911, GH#912) - -* Fri Jul 21 2023 Fedora Release Engineering - 3:1.302195-5 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild - -* Wed Jul 12 2023 Jitka Plesnikova - 3:1.302195-4 -- Perl 5.38 re-rebuild of bootstrapped packages - -* Tue Jul 11 2023 Jitka Plesnikova - 3:1.302195-3 -- Perl 5.38 rebuild - -* Thu May 25 2023 Paul Howarth - 3:1.302195-2 -- Use SPDX-format license tag - -* Fri Apr 28 2023 Paul Howarth - 3:1.302195-1 -- Update to 1.302195 - - Fix done_testing(0) producing 2 plans and an incorrect message - -* Wed Mar 15 2023 Paul Howarth - 3:1.302194-1 -- Update to 1.302194 - - Fix failing test on 5.10 - -* Mon Mar 6 2023 Paul Howarth - 3:1.302193-1 -- Update to 1.302193 - - Deprecate isn't() - -* Thu Feb 2 2023 Paul Howarth - 3:1.302192-1 -- Update to 1.302192 - - Silence deprecation warning when testing smartmatch - -* Fri Jan 20 2023 Fedora Release Engineering - 3:1.302191-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild - -* Fri Jul 22 2022 Fedora Release Engineering - 3:1.302191-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild - -* Mon Jul 11 2022 Paul Howarth - 3:1.302191-1 -- Update to 1.302191 - - CI fixes - - Avoid failing when printing diagnostic info comparing partial overload - objects - -* Fri Jun 03 2022 Jitka Plesnikova - 3:1.302190-489 -- Perl 5.36 re-rebuild of bootstrapped packages - -* Mon May 30 2022 Jitka Plesnikova - 3:1.302190-488 -- Increase release to favour standalone package - -* Sat Mar 5 2022 Paul Howarth - 3:1.302190-1 -- Update to 1.302190 - - Fix subtest times to be hi-res - -* Fri Feb 25 2022 Paul Howarth - 3:1.302189-1 -- Update to 1.302189 - - GH#890, GH#891: Methods used in overload should always be invoked with 3 - parameters - -* Fri Jan 21 2022 Fedora Release Engineering - 3:1.302188-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild - -* Wed Sep 29 2021 Paul Howarth - 3:1.302188-1 -- Update to 1.302188 - - Fix for non-gcc compilers on 5.10.0 - -* Sat Sep 18 2021 Paul Howarth - 3:1.302187-1 -- Update to 1.302187 - - Fix tests for core boolean support - -* Tue Jul 27 2021 Paul Howarth - 3:1.302186-1 -- Update to 1.302186 - - Add start/stop timestamps to subtests - -* Fri Jul 23 2021 Fedora Release Engineering - 3:1.302185-479 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild - -* Mon May 24 2021 Jitka Plesnikova - 3:1.302185-478 -- Perl 5.34 re-rebuild of bootstrapped packages - -* Fri May 21 2021 Jitka Plesnikova - 3:1.302185-477 -- Increase release to favour standalone package - -* Thu May 20 2021 Paul Howarth - 3:1.302185-1 -- Update to 1.302185 - - Fix Test::Builder->skip to stringify arguments - -* Wed Jan 27 2021 Fedora Release Engineering - 3:1.302183-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild - -* Thu Oct 22 2020 Paul Howarth - 3:1.302183-1 -- Update to 1.302183 - - Avoid closing over scalar in BEGIN block in cmp_ok eval - -* Thu Oct 15 2020 Petr Pisar - 3:1.302182-2 -- Demote Module::Pluggable hard dependency to Suggests level - -* Tue Oct 6 2020 Paul Howarth - 3:1.302182-1 -- Update to 1.302182 - - Fix 5.6 support - - Fix fragile %%INC handling in a test - -* Mon Sep 14 2020 Paul Howarth - 3:1.302181-1 -- Update to 1.302181 - - Put try_sig_mask back where it goes (and add test to prevent this in the - future) - - Drop new List::Util requirement back down - -* Mon Sep 14 2020 Paul Howarth - 3:1.302180-1 -- Update to 1.302180 - - Move try_sig_mask to the only module that uses it - - Inherit warnings bitmask in cmp_ok string eval - - Update copyright date - - Improved API for intercept {} and what it returns - - Bump minimum List::Util version (for uniq) - -* Fri Aug 07 2020 Petr Pisar - 3:1.302177-1 -- Update to 1.302177 - - Minor fix to author downstream test - - No significant changes since the last trial - - Fix Test::More's $TODO inside intercept (#862) - -* Tue Jul 28 2020 Fedora Release Engineering - 3:1.302175-458 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild - -* Fri Jun 26 2020 Jitka Plesnikova - 3:1.302175-457 -- Perl 5.32 re-rebuild of bootstrapped packages - -* Mon Jun 22 2020 Jitka Plesnikova - 3:1.302175-456 -- Increase release to favour standalone package - -* Tue Apr 14 2020 Paul Howarth - 3:1.302175-1 -- Update to 1.302175 - - Fix typos in POD - - Fix incorrect Test2::Hub documentation - - Fix test that needed . in @INC on Windows - - Fix Breakage test to show more info - -* Tue Mar 31 2020 Paul Howarth - 3:1.302174-1 -- Update to 1.302174 - - Fall back to Data::Dumper if JSON::PP is not available during IPC errors - -* Fri Mar 27 2020 Paul Howarth - 3:1.302173-1 -- Update to 1.302173 - - Add extra debugging for "Not all files from hub '...' have been collected!" - -* Mon Mar 9 2020 Paul Howarth - 3:1.302172-1 -- Update to 1.302172 - - Fix transition documentation - - Fix warnings from info/debug tap - -* Thu Jan 30 2020 Fedora Release Engineering - 3:1.302171-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Sun Jan 19 2020 Paul Howarth - 3:1.302171-1 -- Update to 1.302171 - - Fix 5.6 - - Fix EBCDIC - - Upgrade Object::HashBase - - Clarify error message in test (GH#841) - - Spelling/Grammar fixes - -* Thu Jan 02 2020 Petr Pisar - 3:1.302170-2 -- Require mro - -* Tue Dec 3 2019 Paul Howarth - 3:1.302170-1 -- Update to 1.302170 - - Fix unwanted END phase event (GH#840) - -* Tue Nov 19 2019 Paul Howarth - 3:1.302169-1 -- Update to 1.302169 - - Update inlined Object::HashBase - - Avoid 'used only once' warnings in BEGIN and END blocks - -* Fri Sep 6 2019 Paul Howarth - 3:1.302168-1 -- Update to 1.302168 - - Fix typo in a Test2::API::Breakage warning - - Delay loading of Term::Table until needed - -* Mon Aug 26 2019 Paul Howarth - 3:1.302167-1 -- Update to 1.302167 - - Add test2_is_testing_done api method - - Fix string compare warning - -* Fri Aug 16 2019 Paul Howarth - 3:1.302166-1 -- Update to 1.302166 - - Better diagnostics when a context is destroyed unexpectedly - - Add an event to notify when END phase starts - -* Fri Jul 26 2019 Fedora Release Engineering - 3:1.302164-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild - -* Sun Jun 02 2019 Jitka Plesnikova - 3:1.302164-3 -- Perl 5.30 re-rebuild of bootstrapped packages - -* Thu May 30 2019 Jitka Plesnikova - 3:1.302164-2 -- Perl 5.30 rebuild - -* Sun Apr 28 2019 Paul Howarth - 2:1.302164-1 -- Update to 1.302164 - - Do not use threads::shared in Test::Tester::Capture (GH#826) - - Add missing version info to Info/Table - - Fix event in global destruction bug (GH#827) - - Proper fix for todo = '' (GH#812, GH#829) -- Modernize spec using %%{make_build} and %%{make_install} -- Drop obsoletes/provides for perl-Test2 dating back to Fedora 25 - -* Wed Feb 6 2019 Paul Howarth - 2:1.302162-1 -- Update to 1.302162 - - Remove SHM Optimization - - Typo fixes in documentation - -* Sat Feb 02 2019 Fedora Release Engineering - 2:1.302160-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Sat Jan 19 2019 Paul Howarth - 2:1.302160-1 -- Update to 1.302160 - - Fix minor typos and missing doc sections - - Add table support in info facet and TAP formatter - - Fix TAP test on Windows - - Fix math errors in table indentation - - Devel requires Term::Table - - Add table support to ctx->fail and ctx->fail_and_return - - Fix Instance.t on haiku-os - -* Tue Jan 8 2019 Paul Howarth - 2:1.302156-1 -- Update to 1.302156 - - Fix Windows fork+test failure (GH#814) - - Documentation updates (GH#819) - - Fix verbose TAP newline regression (GH#810) - - Fix local $TODO bugs (GH#812, GH#817) - - Fix shm read warning (GH#815) - - Merge doc fix PR's from magnolia-k - - Fix failure to check error code on shmwrite (GH#815) - - Fix localization error in new test (GH#820) - - Fix SHM test to work on machines without SHM - - Fix locale errors in Instance.t - - Windows test fixes - - Perl 5.6 test fixes - - Add trace to SHM error when possible - - Fix test not to fail in non-english locales - -* Sun Dec 2 2018 Paul Howarth - 2:1.302141-1 -- Update to 1.302141 - - Fix bug where IPC init failed in preload+fork environments - -* Tue Aug 14 2018 Paul Howarth - 2:1.302140-1 -- Update to 1.302140 - - Mask warning from the recent IPC fix generated when threaded Test tools are - loaded at run-time - -* Fri Jul 13 2018 Fedora Release Engineering - 2:1.302138-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Thu Jul 12 2018 Paul Howarth - 2:1.302138-1 -- Update to 1.302138 - - Make it safe to fork before events in IPC - -* Sat Jun 30 2018 Jitka Plesnikova - 2:1.302136-3 -- Perl 5.28 re-rebuild of bootstrapped packages - -* Tue Jun 26 2018 Jitka Plesnikova - 2:1.302136-2 -- Perl 5.28 rebuild - -* Thu Apr 19 2018 Paul Howarth - 1:1.302136-1 -- Update to 1.302136 - - Add test2_add_callback_testing_done to Test2::API - -* Fri Mar 30 2018 Paul Howarth - 1:1.302135-1 -- Update to 1.302135 - - Make sure all hubs, events, and contexts get a unique (per run) id - - Use a common generator for unique(enough) id's (not UUIDs) - -* Mon Mar 12 2018 Paul Howarth - 1:1.302133-1 -- Update to 1.302133 - - Make sure event puts the uuid into the about facet - - Add method to validate facet data - - Add Test2::Event::V2 event class, and context helpers - - Improve how events handle facets - - Break out meta_facet_data - - Document and fix Facets2Legacy - - Fix nested and in_subtest to look at hub facets - - Fix event->related and trace with uuid - -* Thu Mar 8 2018 Paul Howarth - 1:1.302130-1 -- Update to 1.302130 - - Make hubs tag events with a new facet - -* Tue Mar 06 2018 Petr Pisar - 1:1.302128-1 -- Update to 1.302128 - - Add optional UUID tagging - -* Thu Feb 22 2018 Paul Howarth - 1:1.302125-1 -- Update to 1.302125 - - Make it possible to disable IPC - - Fix a test to skip without threads - -* Tue Feb 6 2018 Paul Howarth - 1:1.302122-1 -- Update to 1.302122 - - Add 'mode' to render facet - -* Mon Feb 5 2018 Paul Howarth - 1:1.302121-1 -- Update to 1.302121 - - Update Copyright - - Add 'render' facet - -* Thu Nov 30 2017 Paul Howarth - 1:1.302120-1 -- Update to 1.302120 - - Fix IPC reload bug - -* Wed Nov 29 2017 Paul Howarth - 1:1.302118-1 -- Update to 1.302118 - - Added pre-subtest hook to Test2::API (GH#801) - - ipc_wait now reports exit and signal values - - Add better interface for ipc_wait - - Fix event Out of Order bug - - Add driver_abort() hook for IPC Drivers - -* Tue Nov 21 2017 Paul Howarth - 1:1.302113-1 -- Update to 1.302113 - - Fix test on threaded 5.8 - - Fix SIGPIPE in IPC test - - Mark t/Test2/regression/gh_16.t as usually AUTHOR_TESTING only - -* Mon Nov 20 2017 Paul Howarth - 1:1.302111-1 -- Update to 1.302111 - - Fix some fragile tests - - Apply p5p test patch from Craig A. Berry - - Allow regexp in Test::Tester - -* Mon Oct 23 2017 Paul Howarth - 1:1.302106-1 -- Update to 1.302106 - - Combine multiple diags into one event - - Make version number in HashBase sane - -* Mon Oct 16 2017 Paul Howarth - 1:1.302103-1 -- Update to 1.302103 - - Fix some TODO edge cases that were not previously accounted for - -* Fri Oct 13 2017 Paul Howarth - 1:1.302101-1 -- Update to 1.302101 - - Bump Test::Builder::IO::Scalar version for core - -* Wed Oct 11 2017 Paul Howarth - 1:1.302100-1 -- Update to 1.302100 - - Fix run_subtest inherit_trace option - -* Tue Oct 3 2017 Paul Howarth - 1:1.302098-1 -- Update to 1.302098 - - Add docs for test2_stdout and test2_stderr - - Fix 5.6 support - -* Tue Oct 3 2017 Paul Howarth - 1:1.302097-1 -- Update to 1.302097 - - Fix hub->process bug that could let an error pass - - Fix modification of read only value (#789) - - Fix typo in Test::Builder when looking for IPC (#777) - - Fix clone_io broke on scalar io layer (#791) - - Fix Exception event stringify exception (#756, #790) - - Localize $^E in context (#780) - - Fix test that failed in verbose mode (#770) - -* Mon Sep 11 2017 Paul Howarth - 1:1.302096-1 -- Update to 1.302096 - - Introduce 'Facets' for events - - Performance enhancements - - Upgrade inline HashBase - - Move Test2::Util::Trace to Test2::EventFacet::Trace - - Track hub id in Trace - - Remove Info event - - Add Pass and Fail events - - Remove Event JSON interface - - Fix tests on perl 5.25+ with newer Data::Dumper - - Fix plan in buffered subtest so that the facts say it is buffered - - Fix test that unintentionally required Test2::Suite - - Add 'new_root' constructor for formatters - - Add intercept_deep() to the API - - Fix bug in Version event - - Add 'number' attribute to assertion facet - - Fix bug in Facets for TodoDiag - - Add API command to reset after a fork - - Add 'important' flag to info event facet - - Make sure Test::Builder does not initialize Test2 too soon - - Fix Test::Builder in a preload scenario - - Make several tests work with preload - - Fix to work with subref-in-stash optimization - -* Thu Jul 27 2017 Fedora Release Engineering - 1:1.302086-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Wed Jun 21 2017 Paul Howarth - 1:1.302086-1 -- Update to 1.302086 - - Make it possible to turn off result logging in Test::Builder - -* Sat Jun 03 2017 Jitka Plesnikova - 1:1.302085-2 -- Perl 5.26 rebuild - -* Tue May 2 2017 Paul Howarth - 1.302085-1 -- Update to 1.302085 - - Better IO management - - Allow access to the STDERR/STDOUT Test2::API uses - - Formatters should use the Test2::API handles - -* Sat Apr 15 2017 Paul Howarth - 1.302083-1 -- Update to 1.302083 - - Fixes for '. in @INC' changes (#768) - - Timeout when waiting for child procs and threads (#765) - - Fix SIGSYS localization issue (#758) - - Fix outdated docs (#759, #754) - - Fix bail-out in buffered subtest (#747) - - Fix threads timeout for older perls (as best we can) - - Fix test that incorrectly called private function as method - - Update some breakage info for Test::More::Prefix and - Test::DBIx::Class::Schema - -* Thu Mar 2 2017 Paul Howarth - 1.302078-1 -- Update to 1.302078 - - Fix crash when TB->reset used inside subtest - - Fix #762, newlines for todo subtest - - Revisit #637, fix rare race condition it created - -* Sat Feb 11 2017 Fedora Release Engineering - 1.302075-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Wed Jan 11 2017 Paul Howarth - 1.302075-1 -- Update to 1.302075 - - Add 'cid' to trace - - Add signatures to trace - - Add related() to events - - Now it is possible to check if events are related - - Add 'no_fork' option to run_subtest() - -* Mon Dec 19 2016 Paul Howarth - 1.302073-1 -- Update to 1.302073 - - Add TO_JSON and from_json methods to Test2::Event and Test2::Trace::Util to - facilitate transferring event data between processes (#741) - - Generate HashBase from Object::HashBase, which has been split out - - When a subtest is marked as todo, all of its contained Ok and Subtest - events are now updated so that they return true for $e->effective_pass - (#742) - - Added two new event classes, Test2::Event::Encoding and - Test2::Event::TAP::Version; these are primarily being added for the benefit - of Test2::Harness now, but they could be useful for other Test2 event - consumer tools in the future (#743) - - Expose tools.pl as Test2::Tools::Tiny - -* Thu Nov 24 2016 Paul Howarth - 1.302067-1 -- Update to 1.302067 - - Fix double release when 'throw' is used in context_do() - - Repo management improvements - - Better handling of info vs. diag in ->send_event - - Fix test that used 'parent' - - Better handling of non-bumping failures (#728) - - Set the TEST_ACTIVE env var to true - - Set the TEST2_ACTIVE env var to true - - Fix cmp_ok output in some confusing cases (#6) - - Update travis config - - Add missing author deps - - Fix handling of negative pids on Windows - - Add can() to Test::Tester::Delegate (despite deprecation) - - Fix some minor test issues - - Handle cases where SysV IPC can be available but not enabled - - Import 'context' into Test2::IPC; it is used by 'cull' - - Propagate warnings settings to use_ok (#736) - - Fix context test for recent blead - -* Thu Oct 20 2016 Paul Howarth - 1.302062-1 -- Update to 1.302062 - - Formatters now have terminate() and finalize() methods; these are called - when there is a skip_all or bail event (terminate), or when a test suite is - exiting normally (finalize), which allows formatters to finalize their - output - this is important for any sort of document-oriented format (as - opposed to a stream format like TAP) (#723) - - Removed a warning when using a non-TAP formatter with Test::Builder about - the formatter not supporting "no_header" and "no_diag"; this happened even - if the alternative formatter class implemented these attributes - -* Mon Sep 26 2016 Paul Howarth - 1.302059-1 -- Update to 1.302059 - - Documentation fixes - - Win32 color support in Test::Builder::Tester - - Support v-strings in is_deeply - - A streamed subtest run inside a buffered subtest will automatically be - converted to a buffered subtest; otherwise, the output from inside the - subtest is lost entirely (#721) - - Mask warning when comparing $@ in Test2::API::Context -- Drop obsoletes/provides for perl-Test-Tester and perl-Test-use-ok, which - were integrated into this package in Fedora 22 - -* Tue Sep 13 2016 Paul Howarth - 1.302056-1 -- Update to 1.302056 - - Fix skip_all in require in intercept (#696) - - Documentation of what is better in Test2 (#663) - - Document Test::Builder::Tester plan limitations - - Document limitations in is_deeply (#595) - - Better documentation of done_testing purpose (#151) - - Make ctx->send_event detect termination events (#707) - - Allow '#' and '\n' in ok names - - Fix special case of ok line ending in backslash - - Improve a test that captures STDERR/STDOUT - -* Sun Aug 14 2016 Paul Howarth - 1.302052-1 -- Update to 1.302052 - - Add contact info to main doc and readme - - Fix setting hub when getting context - -* Fri Jul 29 2016 Paul Howarth - 1.302049-1 -- Update to 1.302049 - - Add 'active' attribute to hub - -* Sat Jul 23 2016 Paul Howarth - 1.302047-1 -- Update to 1.302047 - - Restore traditional note/diag return values (#694) - -* Tue Jul 19 2016 Paul Howarth - 1.302045-1 -- Update to 1.302045 - - Work around IPC bug on windows - - Fix IPC event ordering bug - - Fix TODO in mixed T2/TB subtests - - Fix test that segv'd on older perls - -* Sun Jul 10 2016 Paul Howarth - 1.302040-1 -- Update to 1.302040 - - Fix broken MANIFEST.SKIP entries (#689) - - Add Info event for better diagnostics - -* Mon Jul 4 2016 Paul Howarth - 1.302037-1 -- Update to 1.302037 - - Restore PerlIO layer cloning on STDERR and STDOUT -- Bump obsoletes/provides versions for perl-Test2 to maintain upgrade path from - packages in third-party repositories - -* Tue Jun 28 2016 Paul Howarth - 1.302035-1 -- Update to 1.302035 - - Fix some breakage info - - POD fixes - -* Fri Jun 24 2016 Paul Howarth - 1.302033-1 -- Update to 1.302033 - - Fix nested TODO handling of diags (#684) - -* Wed Jun 22 2016 Paul Howarth - 1.302031-1 -- Update to 1.302031 - - Remove Carp from dependency list (#682) - -* Sun Jun 19 2016 Paul Howarth - 1.302030-1 -- Update to 1.302030 - - Use pre_filter instead of filter for TODO in Test::Builder (fix #683) - - Fix typos in transitions doc (#681) - - Add 'inherit_trace' param to run_subtest - - Properly skip thread test when threads are broken - -* Tue Jun 14 2016 Paul Howarth - 1.302026-1 -- Update to 1.302026 - - Do not fail if Test2::API::Breakage cannot load (rare 5.10.0 issue) - - Potential fix for t/Legacy/Regression/637.t - - Make t/Legacy/Regression/637.t AUTHOR_TESTING for now - - Add Generic event type - - Make sure enabling culling/shm sets pid and tid (fix #679) - -* Sun May 29 2016 Paul Howarth - 1.302022-1 -- Update to 1.302022 - - Many micro-optimizations - - Spelling fixes and tests - - Fix leaky File.t file so that tmp doesn't fill up - - Move some modules out of the known broken list in xt tests - - Add Test2-based tools to downstream testing - - Change when PID/TID are stashed (for forkprove) - - VMS fixes for Files.t and IPC system - - Improve thread checks to better detect broken 5.10 builds - - Use thread checks to skip/run t/Legacy/Regression/637.t - -* Mon May 23 2016 Petr Pisar - 1.302019-2 -- Obsolete perl-Test2-0.000044-2 too - -* Thu May 19 2016 Paul Howarth - 1.302019-1 -- Update to 1.302019 - - Block signals in critical IPC section (fix #661 and #668) - - Merge Examples and examples into one dir (#660) - - Documentation and typo fixes - - Make Test2::Util::get_tid have a consistent prototype (#665) - - Make TB->no_plan a no-op if a plan is set - - Fix util.t win32 bug - - Handle Test::Builder::Exception properly - - Silence noisy STDERR in test suite - - POD spelling fixes -- BR: perl-generators - -* Wed May 18 2016 Paul Howarth - 1.302015-1 -- Update to 1.302015 - - Major refactoring of existing API on top of (included) Test2 -- Obsolete/Provide perl-Test2 - -* Sat May 14 2016 Jitka Plesnikova - 1.001014-365 -- Increase release to favour standalone package - -* Thu Feb 04 2016 Fedora Release Engineering - 1.001014-347 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Thu Jun 18 2015 Fedora Release Engineering - 1.001014-346 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Thu Jun 04 2015 Jitka Plesnikova - 1.001014-345 -- Increase release to favour standalone package - -* Wed Jun 03 2015 Jitka Plesnikova - 1.001014-3 -- Perl 5.22 rebuild - -* Wed Mar 04 2015 Petr Šabata - 1.001014-2 -- Correct the license tag - -* Wed Jan 7 2015 Paul Howarth - 1.001014-1 -- Update to 1.001014 - - Fix a unit test that broke on some platforms with spaces in the $^X path - - Add a test to ensure that the Changes file is updated - -* Wed Dec 24 2014 Paul Howarth - 1.001012-1 -- Update to 1.001012 - - Move test that was dropped in the wrong directory - -* Tue Dec 23 2014 Paul Howarth - 1.001011-1 -- Update to 1.001011 - - Fix windows test bug (GH#491) - - Integrate Test::Tester and Test::use::ok for easier downgrade from trial - - Remove POD Coverage test -- Obsolete/Provide perl-Test-Tester and perl-Test-use-ok -- Classify buildreqs by usage -- Use features from recent ExtUtils::MakeMaker to simplify spec -- Run tests with AUTHOR_TESTING=1 so we do the threads test too - -* Tue Nov 4 2014 Paul Howarth - 1.001009-1 -- Update to 1.001009 - - Backport cmp_ok fix from alphas (GH#478) - -* Thu Oct 16 2014 Paul Howarth - 1.001008-1 -- Update to 1.001008 - - Fix subtest name when skip_all is used - -* Tue Sep 9 2014 Paul Howarth - 1.001006-1 -- Update to 1.001006 - - Documentation updates - - Subtests accept args - - Outdent subtest diag - - Changed install path for perl 5.12 or higher - -* Tue Aug 26 2014 Jitka Plesnikova - 1.001003-3 -- Perl 5.20 rebuild - -* Sat Jun 07 2014 Fedora Release Engineering - 1.001003-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Sat Mar 22 2014 Paul Howarth - 1.001003-1 -- Update to 1.001003 - - Documentation updates for maintainer change -- This release by EXODIST -> update source URL -- Drop obsoletes/provides for old tests sub-package - -* Tue Nov 5 2013 Paul Howarth - 1.001002-1 -- Update to 1.001002 - - Restore ability to use regex with test_err and test_out (CPAN RT#89655) -- Drop upstreamed regex patch - -* Sat Oct 12 2013 Paul Howarth - 0.99-1 -- 0.99 bump -- This release by RJBS -> update source URL - -* Fri Aug 09 2013 Petr Pisar - 0.98.05-3 -- Pass regular expression intact - -* Sun Aug 04 2013 Fedora Release Engineering - 0.98.05-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild - -* Mon Jul 15 2013 Petr Pisar - 0.98.05-1 -- 0.98_05 bump - -* Fri Jul 12 2013 Petr Pisar - 0.98-244 -- Perl 5.18 rebuild - -* Thu Feb 14 2013 Fedora Release Engineering - 0.98-243 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild - -* Wed Nov 21 2012 Jitka Plesnikova - 0.98-242 -- Update dependencies and comments - -* Thu Aug 23 2012 Paul Howarth - 0.98-241 -- Merge tests sub-package back into main package -- Don't need to remove empty directories from the buildroot -- Drop %%defattr, redundant since rpm 4.4 -- Make %%files list more explicit -- Don't use macros for commands -- Mark Tutorial.pod as %%doc -- Drop explicit dependency on perl-devel - -* Mon Aug 13 2012 Marcela Mašláňová - 0.98-240 -- Bump release to override sub-package from perl.spec - -* Fri Jul 20 2012 Fedora Release Engineering - 0.98-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Wed Jun 06 2012 Petr Pisar - 0.98-6 -- Perl 5.16 rebuild - -* Thu May 31 2012 Petr Pisar - 0.98-5 -- Specify all dependencies - -* Fri Jan 13 2012 Fedora Release Engineering - 0.98-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild - -* Tue Aug 16 2011 Marcela Mašláňová - 0.98-3 -- Change path on vendor, so our debuginfo are not conflicting with - perl core debuginfos - -* Fri Jun 17 2011 Marcela Mašláňová - 0.98-2 -- Perl mass rebuild - -* Thu Feb 24 2011 Iain Arnell - 0.98-1 -- Update to latest upstream version - -* Wed Feb 09 2011 Fedora Release Engineering - 0.96-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild - -* Sat Nov 20 2010 Iain Arnell - 0.96-1 -- Update to latest upstream version -- Clean up spec for modern rpmbuild - -* Fri May 07 2010 Marcela Maslanova - 0.94-2 -- Mass rebuild with perl-5.12.0 - -* Tue Mar 16 2010 Chris Weyl - 0.94-1 -- Specfile by Fedora::App::MaintainerTools 0.006 diff --git a/SPECS-EXTENDED/pngcrush/pngcrush-1.8.13-fix-undeclared-identifier.patch b/SPECS-EXTENDED/pngcrush/pngcrush-1.8.13-fix-undeclared-identifier.patch new file mode 100644 index 00000000000..6b573f5e770 --- /dev/null +++ b/SPECS-EXTENDED/pngcrush/pngcrush-1.8.13-fix-undeclared-identifier.patch @@ -0,0 +1,25 @@ +From 720533055469af660e8e79a127999d1c626e95f7 Mon Sep 17 00:00:00 2001 +From: John Bowler +Date: Thu, 1 Feb 2024 05:06:45 -0800 +Subject: [PATCH] Check for defined PNG_IGNORE_ADLER32 + +The check was missing round the first usage, it was there in the second. +--- + pngcrush.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/pngcrush.c b/pngcrush.c +index 102bb39..43b1fe5 100644 +--- a/pngcrush.c ++++ b/pngcrush.c +@@ -5524,8 +5524,10 @@ int main(int argc, char *argv[]) + * they were already checked in the pngcrush_measure_idat + * function + */ ++# ifdef PNG_IGNORE_ADLER32 + png_set_option(read_ptr, PNG_IGNORE_ADLER32, + PNG_OPTION_ON); ++# endif + png_set_crc_action(read_ptr, PNG_CRC_QUIET_USE, + PNG_CRC_QUIET_USE); + } diff --git a/SPECS-EXTENDED/pngcrush/pngcrush.spec b/SPECS-EXTENDED/pngcrush/pngcrush.spec index b99d73b24a6..d60abbf2074 100644 --- a/SPECS-EXTENDED/pngcrush/pngcrush.spec +++ b/SPECS-EXTENDED/pngcrush/pngcrush.spec @@ -6,12 +6,13 @@ Distribution: Azure Linux Summary: Optimizer for PNG (Portable Network Graphics) files Name: pngcrush Version: 1.8.13 -Release: 11%{?dist} +Release: 12%{?dist} License: zlib URL: http://pmt.sourceforge.net/%{name}/ Source0: http://downloads.sourceforge.net/pmt/%{name}-%{version}-nolib.tar.xz # from Debian sid. Source1: %{name}.sgml +Patch0: pngcrush-1.8.13-fix-undeclared-identifier.patch BuildRequires: docbook-utils BuildRequires: gcc BuildRequires: libpng-devel @@ -26,7 +27,7 @@ remove unwanted ancillary chunks, or to add certain chunks including gAMA, tRNS, iCCP, and textual chunks. %prep -%autosetup -n %{name}-%{version}-nolib +%autosetup -p1 -n %{name}-%{version}-nolib cp %{SOURCE1} . %build @@ -46,6 +47,9 @@ docbook2man %{name}.sgml %doc %{_mandir}/man1/%{name}.1.gz %changelog +* Fri Dec 19 2025 Ratiranjan Behera - 1.8.13-12 +- Added patch from Fedora to fix build. + * Mon Mar 06 2023 Muhammad Falak R Wani - 1.8.13-11 - Initial CBL-Mariner import from Fedora 36 (license: MIT). - License Verified diff --git a/SPECS-EXTENDED/podman/podman.spec b/SPECS-EXTENDED/podman/podman.spec index f5773c60a8e..c314e0974a6 100644 --- a/SPECS-EXTENDED/podman/podman.spec +++ b/SPECS-EXTENDED/podman/podman.spec @@ -31,7 +31,7 @@ Epoch: 0 # If you're reading this on dist-git, the version is automatically filled in by Packit. Version: 5.6.1 License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0 -Release: 4%{?dist} +Release: 7%{?dist} ExclusiveArch: aarch64 ppc64le s390x x86_64 riscv64 Summary: Manage Pods, Containers and Container Images Vendor: Microsoft Corporation @@ -48,7 +48,7 @@ BuildRequires: btrfs-progs-devel BuildRequires: gcc BuildRequires: glib2-devel BuildRequires: glibc-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: golang BuildRequires: git-core @@ -68,7 +68,7 @@ BuildRequires: ostree-devel BuildRequires: systemd BuildRequires: systemd-devel Requires: catatonit -Requires: conmon >= 2:2.1.7-2 +Requires: conmon >= 2.1.7-2 Requires: libcontainers-common Provides: %{name}-quadlet = %{epoch}:%{version}-%{release} @@ -89,6 +89,7 @@ Summary: Emulate Docker CLI using %{name} BuildArch: noarch Requires: %{name} = %{epoch}:%{version}-%{release} Conflicts: docker +Conflicts: docker-cli Conflicts: docker-latest Conflicts: docker-ce Conflicts: docker-ee @@ -114,6 +115,8 @@ Requires: slirp4netns Requires: buildah Requires: gnupg Requires: xfsprogs +Requires: crun +Requires: netavark %description tests %{summary} @@ -295,6 +298,16 @@ make localunit # rhcontainerbot account currently managed by lsm5 %changelog +* Thu Jan 22 2026 Kanishk Bansal - 0:5.6.1-7 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 0:5.6.1-6 +- Bump to rebuild with updated glibc + +* Thu Dec 18 2025 Sandeep Karambelkar - 0:5.6.1-5 +- Fix install issues +- Add runtime required packages for installation along with podman + * Mon Nov 10 2025 Andrew Phelps - 0:5.6.1-4 - Bump to rebuild with updated glibc diff --git a/SPECS-EXTENDED/python-debtcollector/0x2ef3fe0ec2b075ab7458b5f8b702b20b13df2318.txt b/SPECS-EXTENDED/python-debtcollector/0x2ef3fe0ec2b075ab7458b5f8b702b20b13df2318.txt new file mode 100644 index 00000000000..3621be055e1 --- /dev/null +++ b/SPECS-EXTENDED/python-debtcollector/0x2ef3fe0ec2b075ab7458b5f8b702b20b13df2318.txt @@ -0,0 +1,45 @@ +pub ed25519/0xB702B20B13DF2318 2023-09-29 [SC] [expires: 2024-06-25] + Key fingerprint = 2EF3 FE0E C2B0 75AB 7458 B5F8 B702 B20B 13DF 2318 +uid OpenStack Infra (2024.1/Caracal Cycle) +sig 3 0xB702B20B13DF2318 2023-09-29 OpenStack Infra (2024.1/Caracal Cycle) +sig 0xCC0DFE2D21C023C9 2023-09-29 OpenStack Infra (2023.2/Bobcat Cycle) +sig 0x48F9961143495829 2023-09-29 Jeremy Stanley +sub cv25519/0x4F5DA9D1088748F6 2023-09-29 [E] [expires: 2024-06-25] +sig 0xB702B20B13DF2318 2023-09-29 OpenStack Infra (2024.1/Caracal Cycle) +sub ed25519/0x7A1014352C3D5B21 2023-09-29 [S] +sig 0xB702B20B13DF2318 2023-09-29 OpenStack Infra (2024.1/Caracal Cycle) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEZRbQahYJKwYBBAHaRw8BAQdAdlyc51z1dob7/74iQk2wS5qOo+Y8e2fKK52N +5MBISga0QU9wZW5TdGFjayBJbmZyYSAoMjAyNC4xL0NhcmFjYWwgQ3ljbGUpIDxp +bmZyYS1yb290QG9wZW5zdGFjay5vcmc+iJYEExYIAD4WIQQu8/4OwrB1q3RYtfi3 +ArILE98jGAUCZRbQagIbAwUJAWP1AAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRC3ArILE98jGBFRAPoD6AIGsIElTS4YD9Ge4CvkAqYScnJX1f0fwB42jh8qUQEA +g5uBeIiITboxJJ2sAp7BFglmI9qlIA8oXcH5JxWn3AyIdQQQFggAHRYhBIFa/scp +OSOGSA4HbcwN/i0hwCPJBQJlFtFxAAoJEMwN/i0hwCPJq2wA/3oE/e6NW708CSHn +xVzibsKyGXGBvTUodrEeuH+X7wq8AP9da5TeEBTTF4NroTUhcAYiF/zy9JfuXgBR +yxPAPkRuCokCMwQQAQoAHRYhBJeuSW/ALeyfw1Oy50j5lhFDSVgpBQJlFtkkAAoJ +EEj5lhFDSVgpQZ8P/3iFup9THezUYtlRJ1Vc8XQCQ76dp51qaRIQrEcDDhcbLe+8 +AMCORms/o002ZmuQGQpWniNF4e8Yk7aUKyDNOqJTXYGGbL9GGJ7scuhlgxkvxp43 +ifrFBNkcZA6bUQLmzcTygNAPeWt9nX+asoHohTeX+ulbFIkGIRMg79ycQg0uotYu +UPii98t2cZWdWAlQCznMd8/KQLkiJIX6ENNu7ogpH8HFBLG4+jx5j89cEg1I8HJc +Siz8OFYukYSXBJjpqGtFrz443q+VDC0EmBuflGSY5WHoWSMGguvw3XKsKEEKKqlM +Pj6PY8R/Jn6yaHaSFGwJ0gLbSBDT0gpdXqtMRhcqd0FADUde4Vyikg4yKz8jt0yq +3JCMGWUgMT3lgjK2HDGrSOwMfUf6nCE2+1YgrZzDRMEuBRB21Wl5NZi4zxIly+JQ +SUtZxPHaYWHBFFh8KGU3usRunTj9BFif5cKelMPz4VgN6l8DQdL9c1SBxzE/J5M6 +4okpRTl5M8OjPPbH9saiTJtHQW5u41saWRHa3MOIDDGeUfE4vK+BQxFcGnzY/e30 +QR5GefNg2g5Wvgvxnt4cAibm5eM6++GoUEXQLVjnqSQ25/PYnJpIR0z5kOsI3GmJ +qsM/9O2fvddK7c8rPQASRIEJq9isZqkeSH2sjmEuW1aq59J2F2jglv/tzHGRuDgE +ZRbQahIKKwYBBAGXVQEFAQEHQMbvqwwqOivFt2vYoNCctSgvADuUs7thWQlGANPu +K4l5AwEIB4h+BBgWCAAmFiEELvP+DsKwdat0WLX4twKyCxPfIxgFAmUW0GoCGwwF +CQFj9QAACgkQtwKyCxPfIxi6iwD/coi4ZSSgqXWInRfnSERFL00F2pWVi5y6BGvs +jyWkiV0A/35kndSvYkC+EiZ7TDnWXQIByEcqYKJm6+CEhKdLuB4HuDMEZRbREBYJ +KwYBBAHaRw8BAQdA+Cg/MCVfg7hppQbogWXwDIGLmmMDK+k2yDrnXKY4tWCI7wQY +FggAIBYhBC7z/g7CsHWrdFi1+LcCsgsT3yMYBQJlFtEQAhsCAIEJELcCsgsT3yMY +diAEGRYIAB0WIQSNoB+ORY98QEADfvJ6EBQ1LD1bIQUCZRbREAAKCRB6EBQ1LD1b +IUGNAPwK9JB4ipZQ12YSe+Bpa4N46wF10AaOhvZkR0Ucdf03gQD/UPQ1zNTV/BMw +LSfyNXao5aUX3uz+z66ESNcLhHEcFQo4fAEAoby1BGLxBos0hVwg48fj4vPsEP45 +lE9fHjXo74+OHVwBAO8lAhJslw+KHHVS+lcTTfS/nMk0pPzWl3SMLBjiD6kH +=7Dhm +-----END PGP PUBLIC KEY BLOCK----- diff --git a/SPECS-EXTENDED/python-debtcollector/debtcollector-3.0.0.tar.gz.asc b/SPECS-EXTENDED/python-debtcollector/debtcollector-3.0.0.tar.gz.asc new file mode 100644 index 00000000000..1ada1445fa2 --- /dev/null +++ b/SPECS-EXTENDED/python-debtcollector/debtcollector-3.0.0.tar.gz.asc @@ -0,0 +1,7 @@ +-----BEGIN PGP SIGNATURE----- + +iHQEABYIAB0WIQSNoB+ORY98QEADfvJ6EBQ1LD1bIQUCZddqlgAKCRB6EBQ1LD1b +Ie6gAQDHIWhuyB9qB0Cu7YOYPBlm4hdrkmlPGj13lw4FyQz5OAD4wXtrWFpFBeFF +55uuPCnfWgW4PdtKqeK/DKuBBagFDw== +=mmVk +-----END PGP SIGNATURE----- diff --git a/SPECS-EXTENDED/python-debtcollector/python-debtcollector.signatures.json b/SPECS-EXTENDED/python-debtcollector/python-debtcollector.signatures.json index cdac8651db0..ac1953533e2 100644 --- a/SPECS-EXTENDED/python-debtcollector/python-debtcollector.signatures.json +++ b/SPECS-EXTENDED/python-debtcollector/python-debtcollector.signatures.json @@ -1,5 +1,7 @@ { "Signatures": { - "python-debtcollector-1.22.0.tar.gz": "d1756440d25a50e3eca2fc399c9e5f1ca2f9e6b837570a80b9450999f4290525" + "0x2ef3fe0ec2b075ab7458b5f8b702b20b13df2318.txt": "a4f619daa6f2f0d47c611fabc489cf634e55d2386bb4f92d2929d2c8aafef53e", + "debtcollector-3.0.0.tar.gz": "2a8917d25b0e1f1d0d365d3c1c6ecfc7a522b1e9716e8a1a4a915126f7ccea6f", + "debtcollector-3.0.0.tar.gz.asc": "38e46360865e6584be6ed0745b292ab2a8457bd65ad2d53b39ad4496962232f9" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/python-debtcollector/python-debtcollector.spec b/SPECS-EXTENDED/python-debtcollector/python-debtcollector.spec index f6fb2fc7f50..556fb0cc99e 100644 --- a/SPECS-EXTENDED/python-debtcollector/python-debtcollector.spec +++ b/SPECS-EXTENDED/python-debtcollector/python-debtcollector.spec @@ -1,14 +1,12 @@ Vendor: Microsoft Corporation Distribution: Azure Linux -# Macros for py2/py3 compatibility -%global pyver %{python3_pkgversion} -%global pyver_bin python%{pyver} -%global pyver_sitelib %{expand:%{python%{pyver}_sitelib}} -%global pyver_install %{expand:%{py%{pyver}_install}} -%global pyver_build %{expand:%{py%{pyver}_build}} -# End of macros for py2/py3 compatibility + +%{!?sources_gpg: %{!?dlrn:%global sources_gpg 1} } +%global sources_gpg_sign 0x2ef3fe0ec2b075ab7458b5f8b702b20b13df2318 %{!?upstream_version: %global upstream_version %{version}%{?milestone}} +# we are excluding some BRs from automatic generator +%global excluded_brs doc8 bandit pre-commit hacking flake8-import-order %global pypi_name debtcollector %global with_doc 1 @@ -21,35 +19,55 @@ It is a collection of functions/decorators which is used to signal a user when \ * further customizing the emitted messages Name: python-%{pypi_name} -Version: 1.22.0 -Release: 4%{?dist} +Version: 3.0.0 +Release: 10%{?dist} Summary: A collection of Python deprecation patterns and strategies -License: ASL 2.0 +License: Apache-2.0 URL: https://pypi.python.org/pypi/%{pypi_name} -Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz#/python-%{pypi_name}-%{upstream_version}.tar.gz +Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz +# Required for tarball sources verification +%if 0%{?sources_gpg} == 1 +Source101: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz.asc +Source102: https://releases.openstack.org/_static/%{sources_gpg_sign}.txt +%endif + +BuildRequires: python-pip +BuildRequires: python-pbr +BuildRequires: python-wheel +BuildRequires: python-setuptools +BuildRequires: python3-pytest +BuildRequires: python-dulwich +BuildRequires: python-openstackdocstheme +BuildRequires: python-toml +BuildRequires: python-tox +BuildRequires: python-wrapt +BuildRequires: python-extras +BuildRequires: python-sphinx +BuildRequires: python-tox-current-env +BuildRequires: python-virtualenv +BuildRequires: python3-testtools +BuildRequires: python3-fixtures BuildArch: noarch -BuildRequires: git +# Required for tarball sources verification +%if 0%{?sources_gpg} == 1 +BuildRequires: /usr/bin/gpgv2 +%endif + +BuildRequires: git-core %description %{common_desc} -%package -n python%{pyver}-%{pypi_name} +%package -n python3-%{pypi_name} Summary: A collection of Python deprecation patterns and strategies -%{?python_provide:%python_provide python%{pyver}-%{pypi_name}} -BuildRequires: python%{pyver}-devel -BuildRequires: python%{pyver}-setuptools -BuildRequires: python%{pyver}-pbr +BuildRequires: python3-devel +BuildRequires: pyproject-rpm-macros -Requires: python%{pyver}-funcsigs -Requires: python%{pyver}-pbr -Requires: python%{pyver}-six -Requires: python%{pyver}-wrapt - -%description -n python%{pyver}-%{pypi_name} +%description -n python3-%{pypi_name} %{common_desc} @@ -57,42 +75,53 @@ Requires: python%{pyver}-wrapt %package -n python-%{pypi_name}-doc Summary: Documentation for the debtcollector module -BuildRequires: python%{pyver}-sphinx -BuildRequires: python%{pyver}-openstackdocstheme -BuildRequires: python%{pyver}-fixtures -BuildRequires: python%{pyver}-six -BuildRequires: python%{pyver}-wrapt - %description -n python-%{pypi_name}-doc Documentation for the debtcollector module %endif - %prep +# Required for tarball sources verification +%if 0%{?sources_gpg} == 1 +%{gpgverify} --keyring=%{SOURCE102} --signature=%{SOURCE101} --data=%{SOURCE0} +%endif %autosetup -n %{pypi_name}-%{upstream_version} -S git -# let RPM handle deps -rm -rf *requirements.txt + +sed -i /^[[:space:]]*-c{env:.*_CONSTRAINTS_FILE.*/d tox.ini +sed -i "s/^deps = -c{env:.*_CONSTRAINTS_FILE.*/deps =/" tox.ini +sed -i /^minversion.*/d tox.ini +sed -i /^requires.*virtualenv.*/d tox.ini + +# Exclude some bad-known BRs +for pkg in %{excluded_brs};do + for reqfile in doc/requirements.txt test-requirements.txt; do + if [ -f $reqfile ]; then + sed -i /^${pkg}.*/d $reqfile + fi + done +done %build -%{pyver_build} +%pyproject_wheel + +%install +%pyproject_install %if 0%{?with_doc} # doc -%{pyver_bin} setup.py build_sphinx -b html +PYTHONPATH="%{buildroot}/%{python3_sitelib}" +%tox -e docs # Fix hidden-file-or-dir warnings -rm -fr doc/build/html/.buildinfo +rm -fr doc/build/html/.{doctrees,buildinfo} +rm -f doc/build/html/_static/images/docs/license.png %endif -%install -%{pyver_install} - -%files -n python%{pyver}-%{pypi_name} +%files -n python3-%{pypi_name} %doc README.rst CONTRIBUTING.rst %license LICENSE -%{pyver_sitelib}/%{pypi_name} -%{pyver_sitelib}/%{pypi_name}*.egg-info -%exclude %{pyver_sitelib}/%{pypi_name}/tests +%{python3_sitelib}/%{pypi_name} +%{python3_sitelib}/%{pypi_name}*.dist-info +%exclude %{python3_sitelib}/%{pypi_name}/tests %if 0%{?with_doc} %files -n python-%{pypi_name}-doc @@ -101,6 +130,10 @@ rm -fr doc/build/html/.buildinfo %endif %changelog +* Fri Jan 09 2026 Durga Jagadeesh Palli - 3.0.0-10 +- Upgrade to 3.0.0 (Reference: Fedora 44) +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 1.22.0-4 - Initial CBL-Mariner import from Fedora 32 (license: MIT). @@ -121,4 +154,3 @@ rm -fr doc/build/html/.buildinfo * Fri Mar 08 2019 RDO 1.21.0-1 - Update to 1.21.0 - diff --git a/SPECS-EXTENDED/python-fixtures/python-fixtures.spec b/SPECS-EXTENDED/python-fixtures/python-fixtures.spec index bf9d6fa2fdb..dc34a41c2d5 100644 --- a/SPECS-EXTENDED/python-fixtures/python-fixtures.spec +++ b/SPECS-EXTENDED/python-fixtures/python-fixtures.spec @@ -8,7 +8,7 @@ Distribution: Azure Linux Name: python-%{pypi_name} Version: 4.0.1 -Release: 11%{?dist} +Release: 12%{?dist} Summary: Fixtures, reusable state for writing clean tests and more License: Apache-2.0 OR BSD-3-Clause @@ -72,21 +72,18 @@ sed -e 's/import mock/import unittest.mock as mock/' -i fixtures/tests/_fixtures %pyproject_install %pyproject_save_files %{pypi_name} -%if %{with tests} -# Note: Tests are executed using the '%tox' macro rather than the direct -# '%{__python3} -m testtools.run fixtures.test_suite' invocation, for better -# compatibility with Python build environments. %check -%if %{without bootstrap} %tox -%endif -%endif %files -n python%{python3_pkgversion}-%{pypi_name} -f %{pyproject_files} %license Apache-2.0 BSD %doc README.rst GOALS NEWS %changelog +* Thu Dec 18 2025 Akarsh Chaudhary - 4.0.1-12 +- Build fix by removing unwanted if-else conditions (license: MIT). +- License verified. + * Thu Feb 20 2025 Akarsh Chaudhary - 4.0.1-11 - Initial CBL-Mariner import from Fedora 41 (license: MIT). - License verified. diff --git a/SPECS-EXTENDED/python-flake8/python-flake8.signatures.json b/SPECS-EXTENDED/python-flake8/python-flake8.signatures.json index 2ac6358e8cd..111c45546a8 100644 --- a/SPECS-EXTENDED/python-flake8/python-flake8.signatures.json +++ b/SPECS-EXTENDED/python-flake8/python-flake8.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "python-flake8-3.7.7.tar.gz": "859996073f341f2670741b51ec1e67a01da142831aa1fdc6242dbf88dffbe661" + "python-flake8-7.3.0.tar.gz": "71a7d2ec4166f83c6fc66e6465a45c49b4565ee29b69f27b335366ce101d8c2b" } } diff --git a/SPECS-EXTENDED/python-flake8/python-flake8.spec b/SPECS-EXTENDED/python-flake8/python-flake8.spec index 99be8c38928..9d4800bc6ce 100644 --- a/SPECS-EXTENDED/python-flake8/python-flake8.spec +++ b/SPECS-EXTENDED/python-flake8/python-flake8.spec @@ -2,24 +2,28 @@ Vendor: Microsoft Corporation Distribution: Azure Linux %global modname flake8 -%global entrypoints_dep >= 0.3 -%global pyflakes_dep >= 2.1.0 -%global pycodestyle_dep >= 2.5.0 -%global mccabe_dep >= 0.6.0 - Name: python-%{modname} -Version: 3.7.7 -Release: 9%{?dist} +Version: 7.3.0 +Release: 1%{?dist} Summary: Python code checking using pyflakes, pycodestyle, and mccabe License: MIT -URL: https://gitlab.com/pycqa/flake8 -Source0: https://files.pythonhosted.org/packages/source/f/%{modname}/%{modname}-%{version}.tar.gz#/python-%{modname}-%{version}.tar.gz +URL: https://github.com/PyCQA/flake8 +Source: https://github.com/PyCQA/%{modname}/archive/refs/tags/%{version}.tar.gz#/python-%{modname}-%{version}.tar.gz + BuildArch: noarch -%if 0%{?with_check} -BuildRequires: python3-pip -%endif +BuildRequires: python%{python3_pkgversion}-devel +BuildRequires: python3-pip +BuildRequires: python3-wheel +BuildRequires: python%{python3_pkgversion}-pycodestyle +BuildRequires: python%{python3_pkgversion}-pyflakes +BuildRequires: python%{python3_pkgversion}-entrypoints +BuildRequires: python%{python3_pkgversion}-mccabe + +# tox config mixes coverage and tests, so we specify this manually instead +BuildRequires: python%{python3_pkgversion}-pytest + %description Flake8 is a wrapper around PyFlakes, pycodestyle, and Ned's McCabe script. It runs all the tools by launching the single flake8 script, @@ -32,23 +36,8 @@ complexity checker is included, and it is extendable through flake8.extension entry points. %package -n python%{python3_pkgversion}-%{modname} -Summary: Python code checking using pyflakes, pycodestyle, and mccabe - -%{?python_provide:%python_provide python%{python3_pkgversion}-%{modname}} - -Requires: python%{python3_pkgversion}-setuptools -Requires: python%{python3_pkgversion}-mccabe %{mccabe_dep} -Requires: python%{python3_pkgversion}-pycodestyle %{pycodestyle_dep} -Requires: python%{python3_pkgversion}-pyflakes %{pyflakes_dep} - -BuildRequires: python%{python3_pkgversion}-devel -BuildRequires: python%{python3_pkgversion}-setuptools -BuildRequires: python%{python3_pkgversion}-entrypoints %{entrypoints_dep} -BuildRequires: python%{python3_pkgversion}-mccabe %{mccabe_dep} -BuildRequires: python%{python3_pkgversion}-pycodestyle %{pycodestyle_dep} -BuildRequires: python%{python3_pkgversion}-pyflakes %{pyflakes_dep} -BuildRequires: python%{python3_pkgversion}-mock - +Summary: %{summary} + %description -n python%{python3_pkgversion}-%{modname} Flake8 is a wrapper around PyFlakes, pycodestyle, and Ned's McCabe script. It runs all the tools by launching the single flake8 script, @@ -62,39 +51,61 @@ flake8.extension entry points. %prep %autosetup -p1 -n %{modname}-%{version} +# Allow pycodestyle 2.12, https://bugzilla.redhat.com/2325146 +sed -i 's/pycodestyle>=2.11.0,<2.12.0/pycodestyle>=2.11.0,<2.13.0/' setup.cfg -# we have 0.3, that is not deemed >= 0.3.0 by RPM -sed -i 's/entrypoints >= 0.3.0/entrypoints >= 0.3/' setup.py - +%generate_buildrequires +%pyproject_buildrequires %build -%py3_build - +%pyproject_wheel %install -%py3_install -ln -s flake8 %{buildroot}%{_bindir}/flake8-3 -ln -s flake8 %{buildroot}%{_bindir}/flake8-%{python3_version} -ln -s flake8 %{buildroot}%{_bindir}/python3-flake8 +%pyproject_install +%pyproject_save_files %{modname} +# Backwards-compatibility symbolic links from when we had both Python 2 and 3 +ln -s %{modname} %{buildroot}%{_bindir}/%{modname}-3 +ln -s %{modname} %{buildroot}%{_bindir}/%{modname}-%{python3_version} +ln -s %{modname} %{buildroot}%{_bindir}/python3-%{modname} %check -pip3 install pytest -pip3 install . -%{__python3} -m pytest tests -v - - -%files -n python%{python3_pkgversion}-%{modname} -%license LICENSE -%doc README.rst CONTRIBUTORS.txt -%{_bindir}/flake8 -%{_bindir}/flake8-3 -%{_bindir}/flake8-%{python3_version} -%{_bindir}/python3-flake8 -%{python3_sitelib}/%{modname}* - +# Patch mccabe upstream module used in tests so argparse receives a callable +# type (int) rather than the string 'int'. Some upstream mccabe versions set +# the option type as a string which fails under argparse in our test env. +for p in \ + %{buildroot}/usr/lib/python3.12/site-packages/mccabe.py \ + %{buildroot}/usr/lib64/python3.12/site-packages/mccabe.py \ + /usr/lib/python3.12/site-packages/mccabe.py \ + /usr/lib64/python3.12/site-packages/mccabe.py +do + if [ -f "$p" ]; then + echo "Patching mccabe at $p" + # Replace several common spellings: type = 'int', "type": "int", 'type': 'int', etc. + sed -i "s/type = 'int'/type=int/g" "$p" || true + sed -i 's/type = \"int\"/type=int/g' "$p" || true + sed -i "s/'type': 'int'/'type': int/g" "$p" || true + sed -i 's/"type": "int"/"type": int/g' "$p" || true + sed -i "s/'type': \"int\"/'type': int/g" "$p" || true + sed -i 's/"type": '\''int'\''/"type": int/g' "$p" || true + # Remove compiled caches so Python imports the patched source + rm -f "${p}c" || true + rm -rf "$(dirname "$p")/__pycache__" || true + fi +done + %pytest -v --deselect tests/unit/test_pyflakes_codes.py::test_all_pyflakes_messages_have_flake8_codes_assigned + +%files -n python%{python3_pkgversion}-%{modname} -f %{pyproject_files} +%{_bindir}/%{modname} +%{_bindir}/%{modname}-3 +%{_bindir}/%{modname}-%{python3_version} +%{_bindir}/python3-%{modname} %changelog +* Tue Apr 22 2025 Akarsh Chaudhary - 7.3.0-1 +- Update to version 7.3.0 +- License verified + * Tue Apr 26 2022 Muhammad Falak - 3.7.7-9 - Drop BR on `pytest` and add an explict BR pip - pip install latest deps to enable ptest @@ -257,4 +268,3 @@ pip3 install . * Tue Jul 10 2012 Matej Cepl - 1.4-1 - initial package for Fedora - diff --git a/SPECS-EXTENDED/python-openstackdocstheme/python-openstackdocstheme.spec b/SPECS-EXTENDED/python-openstackdocstheme/python-openstackdocstheme.spec index a28628741cb..b1dcc700bf3 100644 --- a/SPECS-EXTENDED/python-openstackdocstheme/python-openstackdocstheme.spec +++ b/SPECS-EXTENDED/python-openstackdocstheme/python-openstackdocstheme.spec @@ -70,6 +70,7 @@ export PYTHONPATH=. sphinx-build -b html doc/source doc/build/html # remove the sphinx-build leftovers rm -rf html/.{doctrees,buildinfo} +rm -f doc/build/html/_static/images/docs/license.png %install %pyproject_install diff --git a/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.signatures.json b/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.signatures.json index 49f15dad773..f9ff3a0301b 100644 --- a/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.signatures.json +++ b/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "python-oslo-i18n-5.1.0.tar.gz": "6bf111a6357d5449640852de4640eae4159b5562bbba4c90febb0034abc095d0" + "python-oslo-i18n-6.7.1.tar.gz": "7dc879089056fe287a6fb46fa2e73ad88f8d4b989bd63f00486f494435b24ced" } } diff --git a/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.spec b/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.spec index 4d8d52943f4..8daac5b154e 100644 --- a/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.spec +++ b/SPECS-EXTENDED/python-oslo-i18n/python-oslo-i18n.spec @@ -1,38 +1,53 @@ -%global pypi_name oslo.i18n -%global pkg_name oslo-i18n -%global with_doc 0 +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global pypi_name oslo_i18n +%global pkg_name oslo_i18n +%global with_doc 1 + %global common_desc \ The oslo.i18n library contain utilities for working with internationalization \ (i18n) features, especially translation for text strings in an application \ or library. -Summary: OpenStack i18n library + Name: python-oslo-i18n -Version: 5.1.0 -Release: 4%{?dist} -License: ASL 2.0 +Version: 6.7.1 +Release: 2%{?dist} +Summary: OpenStack i18n library +License: Apache-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/openstack/%{pypi_name} -Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{version}.tar.gz#/%{name}-%{version}.tar.gz -BuildRequires: git-core +Source0: https://files.pythonhosted.org/packages/source/o/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz#/%{name}-%{version}.tar.gz + BuildArch: noarch +BuildRequires: git-core + %description %{common_desc} %package -n python3-%{pkg_name} -%{?python_provide:%python_provide python3-%{pkg_name}} Summary: OpenStack i18n Python 2 library + BuildRequires: python3-devel +BuildRequires: pyproject-rpm-macros BuildRequires: python3-setuptools BuildRequires: python3-pbr BuildRequires: python3-babel BuildRequires: python3-six BuildRequires: python3-fixtures -# Required to compile translation files +BuildRequires: python3-tox +BuildRequires: python3-pluggy +BuildRequires: python3-py +BuildRequires: python3-toml +BuildRequires: python3-openstackdocstheme +BuildRequires: python3-dulwich +BuildRequires: python3-tox-current-env +BuildRequires: python3-filelock +BuildRequires: python3-pip BuildRequires: python3-babel +BuildRequires: python3-wheel +BuildRequires: python3-sphinxcontrib-apidoc Requires: python-%{pkg_name}-lang = %{version}-%{release} -Requires: python3-pbr >= 2.0.0 %description -n python3-%{pkg_name} %{common_desc} @@ -40,50 +55,69 @@ Requires: python3-pbr >= 2.0.0 %if 0%{?with_doc} %package -n python-%{pkg_name}-doc Summary: Documentation for OpenStack i18n library -BuildRequires: python3-sphinx -BuildRequires: python3-openstackdocstheme -BuildRequires: python3-sphinxcontrib-apidoc %description -n python-%{pkg_name}-doc Documentation for the oslo.i18n library. %endif %package -n python-%{pkg_name}-lang -Summary: Translation files for Oslo i18n library +Summary: Translation files for Oslo i18n library %description -n python-%{pkg_name}-lang Translation files for Oslo i18n library %prep -%autosetup -n %{pypi_name}-%{version} -rm -rf *.egg-info +%autosetup -n %{pypi_name}-%{upstream_version} -S git -# Let RPM handle the dependencies -rm -rf *requirements.txt -%build -%py3_build +sed -i /^[[:space:]]*-c{env:.*_CONSTRAINTS_FILE.*/d tox.ini +sed -i "s/^deps = -c{env:.*_CONSTRAINTS_FILE.*/deps =/" tox.ini +sed -i /^minversion.*/d tox.ini +sed -i /^requires.*virtualenv.*/d tox.ini -# Generate i18n files -python3 setup.py compile_catalog -d oslo_i18n/locale --domain oslo_i18n +# Exclude some bad-known BRs +for pkg in %{excluded_brs}; do + for reqfile in doc/requirements.txt test-requirements.txt; do + if [ -f $reqfile ]; then + sed -i /^${pkg}.*/d $reqfile + fi + done +done + +# Automatic BR generation +%generate_buildrequires +%if 0%{?with_doc} + %pyproject_buildrequires -t -e docs +%else + %pyproject_buildrequires +%endif + +%build +%pyproject_wheel %install -%py3_install +%pyproject_install %if 0%{?with_doc} -python3 setup.py build_sphinx --build-dir . -b html +%tox -e docs # remove the sphinx-build-3 leftovers -rm -rf html/.{doctrees,buildinfo} +rm -rf doc/build/html/.{doctrees,buildinfo} +rm -f doc/build/html/_static/images/docs/license.png # Fix this rpmlint warning +if [ -f html/_static/jquery.js ]; then sed -i "s|\r||g" html/_static/jquery.js +fi %endif +# Generate i18n files +python3 setup.py compile_catalog -d %{buildroot}%{python3_sitelib}/oslo_i18n/locale --domain oslo_i18n + # Install i18n .mo files (.po and .pot are not required) install -d -m 755 %{buildroot}%{_datadir} -rm -f oslo_i18n/locale/*/LC_*/oslo_i18n*po -rm -f oslo_i18n/locale/*pot -mv oslo_i18n/locale %{buildroot}%{_datadir}/locale +rm -f %{buildroot}%{python3_sitelib}/oslo_i18n/locale/*/LC_*/oslo_i18n*po +rm -f %{buildroot}%{python3_sitelib}/oslo_i18n/locale/*pot +mv %{buildroot}%{python3_sitelib}/oslo_i18n/locale %{buildroot}%{_datadir}/locale # Find language files %find_lang oslo_i18n --all-name @@ -92,21 +126,84 @@ mv oslo_i18n/locale %{buildroot}%{_datadir}/locale %doc ChangeLog CONTRIBUTING.rst PKG-INFO README.rst %license LICENSE %{python3_sitelib}/oslo_i18n -%{python3_sitelib}/*.egg-info +%{python3_sitelib}/*.dist-info %if 0%{?with_doc} %files -n python-%{pkg_name}-doc %license LICENSE -%doc html +%doc doc/build/html %endif %files -n python-%{pkg_name}-lang -f oslo_i18n.lang %license LICENSE %changelog -* Wed Mar 08 2023 Sumedh Sharma - 5.1.0-4 -- Initial CBL-Mariner import from Fedora 37 (license: MIT) -- license verified +* Mon Dec 22 2025 Archana Shettigar - 6.7.1-2 +- Initial Azure Linux import from Fedora 44 (license: MIT) +- License verified + +* Thu Nov 20 2025 Gwyn Ciesla - 6.7.1-1 +- 6.7.1 + +* Thu Nov 13 2025 Gwyn Ciesla - 6.7.0-1 +- 6.7.0 + +* Fri Sep 19 2025 Python Maint - 6.4.0-7 +- Rebuilt for Python 3.14.0rc3 bytecode + +* Fri Aug 15 2025 Python Maint - 6.4.0-6 +- Rebuilt for Python 3.14.0rc2 bytecode + +* Fri Jul 25 2025 Fedora Release Engineering - 6.4.0-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_43_Mass_Rebuild + +* Tue Jun 03 2025 Python Maint - 6.4.0-4 +- Rebuilt for Python 3.14 + +* Mon Jun 02 2025 Python Maint - 6.4.0-3 +- Bootstrap for Python 3.14 + +* Sat Jan 18 2025 Fedora Release Engineering - 6.4.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_42_Mass_Rebuild + +* Mon Oct 07 2024 Joel Capitao 6.4.0-1 +- Update to upstream version 6.4.0 + +* Fri Jul 19 2024 Fedora Release Engineering - 6.3.0-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild + +* Mon Jun 10 2024 Python Maint - 6.3.0-4 +- Rebuilt for Python 3.13 + +* Fri Jun 07 2024 Python Maint - 6.3.0-3 +- Bootstrap for Python 3.13 + +* Mon May 06 2024 Alfredo Moralejo 6.3.0-2 +- Update to upstream version 6.3.0 + +* Fri Jan 26 2024 Fedora Release Engineering - 6.1.0-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Mon Jan 22 2024 Fedora Release Engineering - 6.1.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Wed Oct 25 2023 Alfredo Moralejo 6.1.0-1 +- Update to upstream version 6.1.0 + +* Fri Jul 21 2023 Fedora Release Engineering - 6.0.0-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild + +* Thu Jun 15 2023 Python Maint - 6.0.0-3 +- Rebuilt for Python 3.12 + +* Wed Apr 19 2023 Karolina Kula 6.0.0-2 +- Update to upstream version 6.0.0 + +* Thu Apr 13 2023 Alfredo Moralejo - 5.1.0-5 +- Fixed compatibility with sphinx >= 6.0 + +* Fri Jan 20 2023 Fedora Release Engineering - 5.1.0-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild * Fri Jul 22 2022 Fedora Release Engineering - 5.1.0-3 - Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild @@ -158,3 +255,4 @@ mv oslo_i18n/locale %{buildroot}%{_datadir}/locale * Fri Mar 08 2019 RDO 3.23.1-1 - Update to 3.23.1 + diff --git a/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.signatures.json b/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.signatures.json index 5078c2040d0..4b93b27aba0 100644 --- a/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.signatures.json +++ b/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "python-pytest-flake8-1.0.4.tar.gz": "4d225c13e787471502ff94409dcf6f7927049b2ec251c63b764a4b17447b60c0" + "python-pytest-flake8-1.3.0.tar.gz": "7b7fb4836e5510c924d1d49af9c1253286f1e353f78b2444ae2733a2cac9b6bc" } } diff --git a/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.spec b/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.spec index 7dc6ca178f9..1b0eb7d2627 100644 --- a/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.spec +++ b/SPECS-EXTENDED/python-pytest-flake8/python-pytest-flake8.spec @@ -11,19 +11,21 @@ Distribution: Azure Linux and efficiently checking for PEP8 compliance of a project. Name: python-%{pypi_name} -Version: 1.0.4 -Release: 6%{?dist} +Version: 1.3.0 +Release: 1%{?dist} Summary: Plugin for pytest to check PEP8 compliance with Flake8 License: BSD URL: https://github.com/tholo/pytest-flake8 -Source0: https://files.pythonhosted.org/packages/source/p/%{pypi_name}/%{pypi_name}-%{version}.tar.gz#/python-%{pypi_name}-%{version}.tar.gz +Source0: https://github.com/coherent-oss/pytest-flake8/archive/refs/tags/v%{version}.tar.gz#/python-%{pypi_name}-%{version}.tar.gz BuildArch: noarch BuildRequires: python3-devel BuildRequires: python3dist(flake8) >= 3.5 BuildRequires: python3dist(pytest) >= 3.5 BuildRequires: python3dist(setuptools) +BuildRequires: python3-pip +BuildRequires: python3-wheel %description %{desc} @@ -33,29 +35,29 @@ Summary: %{summary} %description -n python3-%{pypi_name} %{desc} - %prep %autosetup -n %{pypi_name}-%{version} # Remove bundled egg-info rm -rf %{pypi_name}.egg-info - %build -%py3_build - +%pyproject_wheel %install -%py3_install - +%pyproject_install %files -n python3-%{pypi_name} %license LICENSE %doc README.rst %{python3_sitelib}/__pycache__/* %{python3_sitelib}/pytest_flake8.py -%{python3_sitelib}/pytest_flake8-%{version}-py?.?.egg-info +%{python3_sitelib}/pytest_flake8-0.0.0.dist-info/* %changelog +* Mon Dec 15 2025 Akarsh Chaudhary - 1.3.0-1 +- Upgrade to version 1.3.0 (license: MIT). +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 1.0.4-6 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.signatures.json b/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.signatures.json new file mode 100644 index 00000000000..8120bf42995 --- /dev/null +++ b/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.signatures.json @@ -0,0 +1,6 @@ +{ + "Signatures": { + "scikit_build_core-0.11.5.tar.gz": "8f0a1edb86cb087876f3c699d2a2682012efd8867b390ed37355f13949d0628e" + } +} + diff --git a/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.spec b/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.spec new file mode 100644 index 00000000000..f4d11dd5d42 --- /dev/null +++ b/SPECS-EXTENDED/python-scikit-build-core/python-scikit-build-core.spec @@ -0,0 +1,193 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux + +%global debug_package %{nil} + +# On epel python hatch/trove classifier check may fail because of old package +# Fedora checks should be sufficient though. +%bcond no_classifier_check 0%{?rhel} + +Name: python-scikit-build-core +Version: 0.11.5 +Release: 5%{?dist} +Summary: Build backend for CMake based projects + +# The main project is licensed under Apache-2.0, but it has a vendored project +# src/scikit_build_core/_vendor/pyproject_metadata: MIT +# https://github.com/scikit-build/scikit-build-core/issues/933 +License: Apache-2.0 AND MIT +URL: https://github.com/scikit-build/scikit-build-core +Source: %{pypi_source scikit_build_core} + +BuildRequires: python-pip +BuildRequires: python-hatchling +BuildRequires: python-hatch-vcs +BuildRequires: python-tomli +BuildRequires: python-pathspec +BuildRequires: python-packaging +BuildRequires: python-editables +BuildRequires: python-pluggy +BuildRequires: python-setuptools_scm +BuildRequires: python-trove-classifiers +BuildRequires: python3-pytest +BuildRequires: python3-virtualenv +BuildRequires: python3-numpy +BuildRequires: python3-devel + +# Testing dependences +BuildRequires: cmake +BuildRequires: ninja-build +BuildRequires: gcc +BuildRequires: gcc-c++ +BuildRequires: git + +%global _description %{expand: +A next generation Python CMake adapter and Python API for plugins +} + +%description %_description + +%package -n python3-scikit-build-core +Summary: %{summary} +Requires: cmake +Requires: ninja-build +BuildArch: noarch + +Provides: bundled(python3dist(pyproject-metadata)) = 0.9.1 + +Obsoletes: python3-scikit-build-core+pyproject < 0.10.7-3 + +%description -n python3-scikit-build-core %_description + + +%prep +%autosetup -n scikit_build_core-%{version} +# Rename the bundled license so that it can be installed together +cp -p src/scikit_build_core/_vendor/pyproject_metadata/LICENSE LICENSE-pyproject-metadata + +# Remove unsupported Python 3.14 classifier (keep valid ones like 3.12) +sed -i '/Programming Language :: Python :: 3\.14/d' pyproject.toml + + +%generate_buildrequires +%if %{with no_classifier_check} +export HATCH_METADATA_CLASSIFIERS_NO_VERIFY=1 +%endif +%pyproject_buildrequires -x test,test-meta,test-numpy + + +%build +%if %{with no_classifier_check} +export HATCH_METADATA_CLASSIFIERS_NO_VERIFY=1 +%endif +%pyproject_wheel + + +%install +%pyproject_install +%pyproject_save_files scikit_build_core + +#skipping check section +%check +%pyproject_check_import scikit_build_core +true + +%files -n python3-scikit-build-core -f %{pyproject_files} +%license %{python3_sitelib}/scikit_build_core-%{version}.dist-info/licenses/* +%doc README.md + + +%changelog +* Fri Nov 28 2025 BinduSri Adabala - 0.11.5-5 +- Initial Azure Linux import from Fedora 43 (license: MIT). +- License verified + +* Fri Sep 19 2025 Python Maint - 0.11.5-4 +- Rebuilt for Python 3.14.0rc3 bytecode + +* Fri Aug 15 2025 Python Maint - 0.11.5-3 +- Rebuilt for Python 3.14.0rc2 bytecode + +* Fri Jul 25 2025 Fedora Release Engineering - 0.11.5-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_43_Mass_Rebuild + +* Fri Jun 27 2025 Packit - 0.11.5-1 +- Update to 0.11.5 upstream release + +* Wed May 21 2025 Miro Hrončok - 0.11.0-2 +- Avoid cattrs test dependency to unblock the Python 3.14 rebuild + +* Fri Feb 28 2025 Cristian Le - 0.11.0-1 +- Update to 0.11.0 upstream release +- Resolves: rhbz#2348951 + +* Wed Feb 12 2025 Cristian Le - 0.10.7-3 +- Various simplifications +- Added `Requires: ninja-build` by default +- Removed `Suggests` and `Recommends`, these are not likely to be used +- Removed the test conditional for epel10 since everything will be packaged +- Removed the `pyproject` subpackage since it's empty and all dependents + are gone + +* Sat Jan 18 2025 Fedora Release Engineering - 0.10.7-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_42_Mass_Rebuild + +* Mon Sep 30 2024 Packit - 0.10.7-1 +- Update to 0.10.7 upstream release + +* Wed Sep 11 2024 Packit - 0.10.6-1 +- Update to 0.10.6 upstream release + +* Wed Aug 07 2024 Packit - 0.10.1-1 +- Update to 0.10.1 upstream release + +* Fri Jul 19 2024 Fedora Release Engineering - 0.9.4-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild + +* Tue Jun 18 2024 Cristian Le - 0.9.4-3 +- Relax pip requirement until rawhide catches up + +* Mon Jun 17 2024 Python Maint - 0.9.4-2 +- Rebuilt for Python 3.13 + +* Tue May 14 2024 Packit - 0.9.4-1 +- Update to 0.9.4 upstream release + +* Fri Apr 19 2024 Packit - 0.9.0-1 +- Update to 0.9.0 upstream release + +* Thu Mar 28 2024 Benjamin A. Beasley - 0.8.2-2 +- Produce a metapackage for the pyproject extra + +* Thu Feb 29 2024 Packit - 0.8.2-1 +- [packit] 0.8.2 upstream release + +* Tue Jan 23 2024 Packit - 0.8.0-1 +- [packit] 0.8.0 upstream release + +* Mon Jan 22 2024 Fedora Release Engineering - 0.6.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Wed Nov 08 2023 Packit - 0.6.1-1 +- [packit] 0.6.1 upstream release + +* Thu Sep 21 2023 Packit - 0.5.1-1 +- [packit] 0.5.1 upstream release + +* Tue Aug 22 2023 Cristian Le - 0.5.0-1 +- [packit] 0.5.0 upstream release + +* Fri Jul 21 2023 Fedora Release Engineering - 0.4.4-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild + +* Mon Jun 05 2023 Packit - 0.4.4-1 +- 0.4.4 upstream release + +* Thu Apr 13 2023 Packit - 0.3.0-1 +- [packit] 0.3.0 upstream release + +* Fri Mar 24 2023 Miro Hrončok - 0.2.2-2 +- Move the Requires, Recommends and Suggests to python3-scikit-build-core + +* Mon Mar 20 2023 Cristian Le - 0.2.2-1 +- Initial import (#2179414). \ No newline at end of file diff --git a/SPECS-EXTENDED/python-toml/python-toml.signatures.json b/SPECS-EXTENDED/python-toml/python-toml.signatures.json index ca555441312..f3c32da1198 100644 --- a/SPECS-EXTENDED/python-toml/python-toml.signatures.json +++ b/SPECS-EXTENDED/python-toml/python-toml.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "toml-0.10.1.tar.gz": "926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f" + "toml-0.10.2.tar.gz": "b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" } } diff --git a/SPECS-EXTENDED/python-toml/python-toml.spec b/SPECS-EXTENDED/python-toml/python-toml.spec index 6429ee930ed..f403df42bb0 100644 --- a/SPECS-EXTENDED/python-toml/python-toml.spec +++ b/SPECS-EXTENDED/python-toml/python-toml.spec @@ -8,8 +8,8 @@ This package loads toml file into python dictionary and dump dictionary into \ toml file. Name: python-%{pypi_name} -Version: 0.10.1 -Release: 3%{?dist} +Version: 0.10.2 +Release: 1%{?dist} Summary: Python Library for Tom's Obvious, Minimal Language License: MIT @@ -20,6 +20,8 @@ BuildArch: noarch BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-setuptools +BuildRequires: python3-wheel +BuildRequires: python3-pip %bcond_without tests %if %{with tests} @@ -41,33 +43,32 @@ BuildRequires: python%{python3_pkgversion}-devel %prep %setup -q -n %{pypi_name}-%{version} +%generate_buildrequires +%pyproject_buildrequires %{?with_tests:-t} -%build -%py3_build +%build +%pyproject_wheel %install -%py3_install - +%pyproject_install +%pyproject_save_files %{pypi_name} %if %{with tests} %check ln -s /usr/share/toml-test/ . # python tests require test cases here -%pytest -# Also using the language independent toml-test suite to launch tests -ln -s /usr/share/toml-test/tests/* tests/ # toml-test requires them here -toml-test $(pwd)/tests/decoding_test3.sh +%pytest -k "not test_valid_tests and not test_invalid_tests" %endif - -%files -n python%{python3_pkgversion}-%{pypi_name} +%files -n python%{python3_pkgversion}-%{pypi_name} -f %{pyproject_files} %license LICENSE %doc README.rst -%{python3_sitelib}/%{pypi_name}-%{version}-py%{python3_version}.egg-info -%{python3_sitelib}/%{pypi_name}/ - %changelog +* Mon Dec 22 2025 Akarsh Chaudhary - 0.10.2-1 +- Update to version 0.10.2. +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 0.10.1-3 - Initial CBL-Mariner import from Fedora 33 (license: MIT). diff --git a/SPECS-EXTENDED/python-zmq/python-zmq.signatures.json b/SPECS-EXTENDED/python-zmq/python-zmq.signatures.json index 233a5c974df..f93058d205e 100644 --- a/SPECS-EXTENDED/python-zmq/python-zmq.signatures.json +++ b/SPECS-EXTENDED/python-zmq/python-zmq.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "pyzmq-18.1.0.tar.gz": "32f7618b8104021bc96cbd60be4330bdf37b929e8061dbce362c9f3478a08e21" + "pyzmq-27.1.0.tar.gz": "4fd229c1c7b10e2b578778a8919694186731c69dcb26e9c33239bee67a4dc0be" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/python-zmq/python-zmq.spec b/SPECS-EXTENDED/python-zmq/python-zmq.spec index dad8118dc43..56b5d1b3ce1 100644 --- a/SPECS-EXTENDED/python-zmq/python-zmq.spec +++ b/SPECS-EXTENDED/python-zmq/python-zmq.spec @@ -1,148 +1,177 @@ +%global debug_package %{nil} + Vendor: Microsoft Corporation Distribution: Azure Linux -# we don't want to provide private python extension libs in python3 dirs -%global __provides_exclude_from ^%{python3_sitearch}/.*\\.so$ - -%global checkout b58cb3a2ee8baaab543729e398fc1cde25ff68c3 - -%global srcname pyzmq -%global modname zmq -%global run_tests 0 +# Allows additional import checks (zmq.green) and tests +%bcond gevent 1 Name: python-zmq -Version: 18.1.0 -Release: 4%{?dist} -Summary: Software library for fast, message-based applications - -License: LGPLv3+ and ASL 2.0 and BSD -URL: http://www.zeromq.org/bindings:python -# VCS: git:http://github.com/zeromq/pyzmq.git -# git checkout with the commands: -# git clone http://github.com/zeromq/pyzmq.git pyzmq.git -# cd pyzmq.git -# git archive --format=tar --prefix=pyzmq-%%{version}/ %%{checkout} | xz -z --force - > pyzmq-%%{version}.tar.xz -Source0: https://github.com/zeromq/pyzmq/archive/v%{version}.tar.gz#/pyzmq-%{version}.tar.gz - -Provides: python%{python3_pkgversion}-pyzmq = %{version} +Version: 27.1.0 +Release: 1%{?dist} +Summary: Python bindings for zeromq + +# As noted in https://github.com/zeromq/pyzmq/blob/v26.2.0/RELICENSE/README.md: +# pyzmq starting with 26.0.0 is fully licensed under the 3-clause Modified +# BSD License. A small part of the core (Cython backend only) was previously +# licensed under LGPLv3 for historical reasons. Permission has been granted +# by the contributors of the vast majority of those components to relicense +# under MPLv2 or BSD. This backend has been completely replaced in pyzmq 26, +# and the new implementation is fully licensed under BSD-3-Clause, so pyzmq +# is now under a single license. +# Nevertheless: +# - zmq/ssh/forward.py, which is derived from a Paramiko demo, is +# LGPL-2.1-or-later +# - zmq/eventloop/zmqstream.py is Apache-2.0 +# See also the Inherited licenses in pyzmq section in CONTRIBUTING.md. +License: %{shrink: + BSD-3-Clause AND + LGPL-2.1-or-later AND + Apache-2.0 + } +# Additionally, the following do not affect the license of the binary RPMs: +# - tools/run_with_env.cmd is CC0-1.0; for distribution in the source RPM, it +# is covered by Existing uses of CC0-1.0 on code files in Fedora packages +# prior to 2022-08-01, and subsequent upstream versions of those files in +# those packages, continue to be allowed. We encourage Fedora package +# maintainers to ask upstreams to relicense such files. +# https://gitlab.com/fedora/legal/fedora-license-data/-/issues/91#note_1151947383 +# - examples/device/device.py and examples/win32-interrupt/display.py are +# LicenseRef-Fedora-Public-Domain; approved in Review of +# python-zmq examples dedicated to the public domain, +# https://gitlab.com/fedora/legal/fedora-license-data/-/issues/616; see +# https://gitlab.com/fedora/legal/fedora-license-data/-/merge_requests/716 +SourceLicense: %{shrink: + %{license} AND + CC0-1.0 AND + LicenseRef-Fedora-Public-Domain + } +URL: https://zeromq.org/languages/python/ +%global forgeurl https://github.com/zeromq/pyzmq +Source: %{forgeurl}/archive/refs/tags/v%{version}.tar.gz#/pyzmq-%{version}.tar.gz + + +# BuildRequires for build backend & tooling BuildRequires: gcc -BuildRequires: chrpath -BuildRequires: %{_bindir}/pathfix.py - -BuildRequires: zeromq-devel +BuildRequires: gcc-c++ +BuildRequires: pkgconfig(libzmq) +BuildRequires: python3-devel +BuildRequires: python3dist(setuptools) +BuildRequires: python3dist(wheel) +BuildRequires: python3-pip +BuildRequires: python3dist(scikit-build-core) +BuildRequires: python3dist(cffi) +BuildRequires: python-pathspec +BuildRequires: python3-pytest-asyncio +BuildRequires: python3dist(pytest) +BuildRequires: ninja-build +BuildRequires: python3dist(cython) + + + +# Add some manual test dependencies that are not in test-requirements.txt, but +# which enable additional tests. +# +# Tests in zmq/tests/mypy.py require mypy, but see: +# https://docs.fedoraproject.org/en-US/packaging-guidelines/Python/#_linters +# +# Some tests in zmq/tests/test_context.py and zmq/tests/test_socket.py require +# pyczmq, which is not packaged and has not been updated in a decade. +# +# Enable more tests in zmq/tests/test_message.py: +BuildRequires: %{py3_dist numpy} +%if %{with gevent} +BuildRequires: %{py3_dist gevent} +%endif -# For some tests -# czmq currently FTBFS, so enable it some time later -#BuildRequires: czmq-devel +%global common_description %{expand: +This package contains Python bindings for ZeroMQ. MQ is a lightweight and fast +messaging implementation.} -BuildRequires: python%{python3_pkgversion}-devel -BuildRequires: python%{python3_pkgversion}-setuptools -BuildRequires: python%{python3_pkgversion}-Cython -%if 0%{?run_tests} -BuildRequires: python%{python3_pkgversion}-pytest -BuildRequires: python%{python3_pkgversion}-tornado -%endif +%description %{common_description} -%description -The 0MQ lightweight messaging kernel is a library which extends the -standard socket interfaces with features traditionally provided by -specialized messaging middle-ware products. 0MQ sockets provide an -abstraction of asynchronous message queues, multiple messaging -patterns, message filtering (subscriptions), seamless access to -multiple transport protocols and more. +%package -n python3-pyzmq +Summary: %{summary} -This package contains the python bindings. +# https://docs.fedoraproject.org/en-US/packaging-guidelines/Python/#_provides_for_importable_modules +%py_provides python3-zmq -%package -n python%{python3_pkgversion}-zmq -Summary: %{summary} -License: LGPLv3+ -%{?python_provide:%python_provide python%{python3_pkgversion}-%{modname}} -%description -n python%{python3_pkgversion}-zmq -The 0MQ lightweight messaging kernel is a library which extends the -standard socket interfaces with features traditionally provided by -specialized messaging middle-ware products. 0MQ sockets provide an -abstraction of asynchronous message queues, multiple messaging -patterns, message filtering (subscriptions), seamless access to -multiple transport protocols and more. - -This package contains the python bindings. - - -%package -n python%{python3_pkgversion}-zmq-tests -Summary: %{summary}, testsuite -License: LGPLv3+ -Requires: python%{python3_pkgversion}-zmq = %{version}-%{release} -%{?python_provide:%python_provide python%{python3_pkgversion}-%{modname}-tests} -%description -n python%{python3_pkgversion}-zmq-tests -The 0MQ lightweight messaging kernel is a library which extends the -standard socket interfaces with features traditionally provided by -specialized messaging middle-ware products. 0MQ sockets provide an -abstraction of asynchronous message queues, multiple messaging -patterns, message filtering (subscriptions), seamless access to -multiple transport protocols and more. - -This package contains the testsuite for the python bindings. +%description -n python3-pyzmq %{common_description} %prep -%setup -q -n %{srcname}-%{version} - -# remove bundled libraries -rm -rf bundled +%autosetup -n pyzmq-%{version} -# forcibly regenerate the Cython-generated .c files: -find zmq -name "*.c" -delete -%{__python3} setup.py cython +# Remove any Cython-generated .c files in order to regenerate them: +find . -type f -exec grep -FrinIl 'Generated by Cython' '{}' '+' | + xargs -r -t rm -v -# remove shebangs -for lib in zmq/eventloop/*.py; do - sed '/\/usr\/bin\/env/d' $lib > $lib.new && - touch -r $lib $lib.new && - mv $lib.new $lib -done +# Remove shebangs from non-script sources. The find-then-modify pattern +# preserves mtimes on sources that did not need to be modified. +find 'src' -type f -name '*.py' \ + -exec gawk '/^#!/ { print FILENAME }; { nextfile }' '{}' '+' | + xargs -r sed -r -i '1{/^#!/d}' -# remove excecutable bits -chmod -x examples/pubsub/topics_pub.py -chmod -x examples/pubsub/topics_sub.py -# delete hidden files -#find examples -name '.*' | xargs rm -v +# https://docs.fedoraproject.org/en-US/packaging-guidelines/Python/#_linters +# - pymongo is used only in examples/mongodb/, and we do not run examples +sed -r \ + -e 's/^(black|codecov|coverage|flake8|mypy|pytest-cov)\b/# &/' \ + -e 's/^(pymongo)\b/# &/' \ + test-requirements.txt | tee test-requirements-filtered.txt %build -%py3_build +%pyproject_wheel %install -%global RPATH /zmq/{backend/cython,devices} -%py3_install -pathfix.py -pn -i %{__python3} %{buildroot}%{python3_sitearch} +%pyproject_install +%pyproject_save_files zmq %check -%if 0%{?run_tests} - # Make sure we import from the install directory - #rm zmq/__*.py - PYTHONPATH=%{buildroot}%{python3_sitearch} \ - %{__python3} setup.py test +%pyproject_check_import zmq + +# to avoid partially initialized zmq module from cwd +mkdir -p _empty +cd _empty +ln -s ../tests/ ../pytest.ini ./ + + +# With Python 3.14, in test_process_teardown, while spawning the multiprocess +# forkserver child: +# ModuleNotFoundError: No module named 'tests' +# This does not really make sense to report upstream because the problem does not +# happen when running tests against an editable install in a virtualenv as they +# do. Adding the working directory to PYTHONPATH is a workaround. +export PYTHONPATH="%{buildroot}%{python3_sitearch}:${PWD}" + +%ifarch %{power64} +# Several of the green/gevent tests fail with segmentation faults, so we +# disable all of them for simplicity. +# +# BUG: test_green_device crashes with Python 3.12 on ppc64le +# https://github.com/zeromq/pyzmq/issues/1880 +k="${k-}${k+ and }not Green" %endif +export ZMQ_BACKEND=cython +pytest -k "not cffi ${k-}" -v -rs tests/ -%files -n python%{python3_pkgversion}-zmq -%license COPYING.* -%doc README.md -# examples/ -%{python3_sitearch}/%{srcname}-*.egg-info -%{python3_sitearch}/zmq/ -%exclude %{python3_sitearch}/zmq/tests -%files -n python%{python3_pkgversion}-zmq-tests -%{python3_sitearch}/zmq/tests/ +%files -n python3-pyzmq -f %{pyproject_files} +%license %{python3_sitelib}/pyzmq-27.1.0.dist-info/licenses/* +%doc README.md %changelog +* Tue Dec 23 2025 BinduSri Adabala - 27.1.0-1 +- Upgrade to 27.1.0 +- License verified + * Fri Oct 15 2021 Pawel Winogrodzki - 18.1.0-4 - Initial CBL-Mariner import from Fedora 32 (license: MIT). diff --git a/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.signatures.json b/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.signatures.json deleted file mode 100644 index 72513af3954..00000000000 --- a/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "qtconnectivity-everywhere-src-5.14.2.tar.xz": "abe67b3e3a775e2a2e27c62a5391f37007ffbe72bce58b96116995616cfcbc28" - } -} diff --git a/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.spec b/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.spec deleted file mode 100644 index f1b48739fcb..00000000000 --- a/SPECS-EXTENDED/qt5-qtconnectivity/qt5-qtconnectivity.spec +++ /dev/null @@ -1,331 +0,0 @@ -Vendor: Microsoft Corporation -Distribution: Azure Linux -%global qt_module qtconnectivity - -Summary: Qt5 - Connectivity components -Name: qt5-%{qt_module} -Version: 5.14.2 -Release: 2%{?dist} - -# See LICENSE.GPL3, respectively, for exception details -License: LGPLv2 with exceptions or GPLv3 with exceptions -Url: http://qt.io -%global majmin %(echo %{version} | cut -d. -f1-2) -Source0: https://download.qt.io/official_releases/qt/%{majmin}/%{version}/submodules/%{qt_module}-everywhere-src-%{version}.tar.xz - -# filter qml provides -%global __provides_exclude_from ^%{_qt5_archdatadir}/qml/.*\\.so$ - -BuildRequires: qt5-qtbase-devel >= %{version} -BuildRequires: qt5-qtbase-private-devel >= %{version} -%{?_qt5:Requires: %{_qt5}%{?_isa} = %{_qt5_version}} -BuildRequires: qt5-qtdeclarative-devel >= %{version} -BuildRequires: pkgconfig(bluez) - -%description -%{summary}. - -%package devel -Summary: Development files for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} -Requires: qt5-qtbase-devel%{?_isa} -%description devel -%{summary}. - -%package examples -Summary: Programming examples for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} -%description examples -%{summary}. - - -%prep -%setup -q -n %{qt_module}-everywhere-src-%{version} - - -%build -%{qmake_qt5} - -%make_build - - -%install -make install INSTALL_ROOT=%{buildroot} - -%if 0%{?docs} -make install_docs INSTALL_ROOT=%{buildroot} -%endif - -# hardlink files to {_bindir}, add -qt5 postfix to not conflict -mkdir %{buildroot}%{_bindir} -pushd %{buildroot}%{_qt5_bindir} -for i in * ; do - case "${i}" in - *) - ln -v ${i} %{buildroot}%{_bindir}/${i} - ;; - esac -done -popd - -## .prl/.la file love -# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs -pushd %{buildroot}%{_qt5_libdir} -for prl_file in libQt5*.prl ; do - sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} - if [ -f "$(basename ${prl_file} .prl).so" ]; then - rm -fv "$(basename ${prl_file} .prl).la" - sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} - fi -done -popd - - -%ldconfig_scriptlets - -%files -%license LICENSE.GPL* LICENSE.LGPL* -%{_bindir}/sdpscanner -%{_qt5_bindir}/sdpscanner -%{_qt5_libdir}/libQt5Bluetooth.so.5* -%{_qt5_archdatadir}/qml/QtBluetooth/ -%{_qt5_libdir}/libQt5Nfc.so.5* -%{_qt5_archdatadir}/qml/QtNfc/ - -%files devel -%{_qt5_headerdir}/QtBluetooth/ -%{_qt5_libdir}/libQt5Bluetooth.so -%{_qt5_libdir}/libQt5Bluetooth.prl -%dir %{_qt5_libdir}/cmake/Qt5Bluetooth/ -%{_qt5_libdir}/cmake/Qt5Bluetooth/Qt5BluetoothConfig*.cmake -%{_qt5_libdir}/pkgconfig/Qt5Bluetooth.pc -%{_qt5_archdatadir}/mkspecs/modules/qt_lib_bluetooth*.pri -%{_qt5_headerdir}/QtNfc/ -%{_qt5_libdir}/libQt5Nfc.so -%{_qt5_libdir}/libQt5Nfc.prl -%dir %{_qt5_libdir}/cmake/Qt5Nfc/ -%{_qt5_libdir}/cmake/Qt5Nfc/Qt5NfcConfig*.cmake -%{_qt5_libdir}/pkgconfig/Qt5Nfc.pc -%{_qt5_archdatadir}/mkspecs/modules/qt_lib_nfc*.pri - -%files examples -%{_qt5_examplesdir}/ - - -%changelog -* Fri Oct 15 2021 Pawel Winogrodzki - 5.14.2-2 -- Initial CBL-Mariner import from Fedora 32 (license: MIT). - -* Sat Apr 04 2020 Rex Dieter - 5.14.2-1 -- 5.14.2 - -* Thu Jan 30 2020 Fedora Release Engineering - 5.13.2-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Mon Dec 09 2019 Jan Grulich - 5.13.2-1 -- 5.13.2 - -* Tue Sep 24 2019 Jan Grulich - 5.12.5-1 -- 5.12.5 - -* Fri Jul 26 2019 Fedora Release Engineering - 5.12.4-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild - -* Fri Jun 14 2019 Jan Grulich - 5.12.4-1 -- 5.12.4 - -* Tue Jun 04 2019 Jan Grulich - 5.12.3-1 -- 5.12.3 - -* Fri Feb 15 2019 Rex Dieter - 5.12.1-1 -- 5.12.1 - -* Sat Feb 02 2019 Fedora Release Engineering - 5.11.3-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Fri Dec 07 2018 Rex Dieter - 5.11.3-1 -- 5.11.3 - -* Fri Sep 21 2018 Jan Grulich - 5.11.2-1 -- 5.11.2 - -* Sat Jul 14 2018 Fedora Release Engineering - 5.11.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Tue Jun 19 2018 Rex Dieter - 5.11.1-1 -- 5.11.1 - -* Sun May 27 2018 Rex Dieter - 5.11.0-1 -- 5.11.0 -- use %%make_build %%ldconfig_scriptlets - -* Wed Feb 14 2018 Jan Grulich - 5.10.1-1 -- 5.10.1 - -* Fri Feb 09 2018 Fedora Release Engineering - 5.10.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Tue Dec 19 2017 Jan Grulich - 5.10.0-1 -- 5.10.0 - -* Thu Nov 23 2017 Jan Grulich - 5.9.3-1 -- 5.9.3 - -* Mon Oct 09 2017 Jan Grulich - 5.9.2-1 -- 5.9.2 - -* Thu Aug 03 2017 Fedora Release Engineering - 5.9.1-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 5.9.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Wed Jul 19 2017 Rex Dieter - 5.9.1-1 -- 5.9.1 - -* Fri Jun 16 2017 Rex Dieter - 5.9.0-2 -- drop shadow/out-of-tree builds (#1456211,QTBUG-37417) - -* Wed May 31 2017 Helio Chissini de Castro - 5.9.0-1 -- Upstream official release - -* Fri May 26 2017 Helio Chissini de Castro - 5.9.0-0.1.rc -- Upstream Release Candidate retagged - -* Mon May 15 2017 Fedora Release Engineering - 5.9.0-0.beta.3.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_27_Mass_Rebuild - -* Tue May 09 2017 Helio Chissini de Castro - 5.9.0-0.beta.3 -- Upstream beta 3 - -* Mon Jan 30 2017 Helio Chissini de Castro - 5.8.0-1 -- New upstream version - -* Mon Jan 02 2017 Rex Dieter - 5.7.1-3 -- filter qml provides - -* Sat Dec 10 2016 Rex Dieter - 5.7.1-2 -- 5.7.1 dec5 snapshot, drop cmake/pkgconfig style BR - -* Thu Nov 10 2016 Helio Chissini de Castro - 5.7.1-1 -- New upstream version - -* Mon Jul 04 2016 Helio Chissini de Castro - 5.7.0-2 -- Compiled with gcc - -* Tue Jun 14 2016 Helio Chissini de Castro - 5.7.0-1 -=- Qt 5.7.0 release - -* Mon Jun 13 2016 Helio Chissini de Castro - 5.7.0-0.1 -- Prepare 5.7.0 - -* Thu Jun 09 2016 Jan Grulich - 5.6.1-1 -- Update to 5.6.1 - -* Sun Mar 20 2016 Rex Dieter - 5.6.0-4 -- rebuild - -* Sun Mar 20 2016 Rex Dieter - 5.6.0-3 -- rebuild - -* Fri Mar 18 2016 Rex Dieter - 5.6.0-2 -- rebuild - -* Mon Mar 14 2016 Helio Chissini de Castro - 5.6.0-1 -- 5.6.0 final release - -* Tue Feb 23 2016 Helio Chissini de Castro - 5.6.0-0.7.rc -- Update to final RC - -* Mon Feb 15 2016 Helio Chissini de Castro - 5.6.0-0.6 -- Update RC release - -* Thu Feb 04 2016 Fedora Release Engineering - 5.6.0-0.5.beta -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Mon Dec 28 2015 Rex Dieter 5.6.0-0.4.beta -- BR: cmake, use %%license, update Source URL - -* Mon Dec 21 2015 Helio Chissini de Castro - 5.6.0-0.3 -- Update to final beta release - -* Thu Dec 10 2015 Helio Chissini de Castro - 5.6.0-0.2 -- Official beta release - -* Tue Nov 03 2015 Helio Chissini de Castro - 5.6.0-0.1 -- Start to implement 5.6.0 beta - -* Thu Oct 15 2015 Helio Chissini de Castro - 5.5.1-2 -- Update to final release 5.5.1 - -* Tue Sep 29 2015 Helio Chissini de Castro - 5.5.1-1 -- Update to Qt 5.5.1 RC1 - -* Wed Jul 29 2015 Rex Dieter 5.5.0-4 -- -docs: BuildRequires: qt5-qhelpgenerator, standardize bootstrapping macros - -* Sun Jul 26 2015 Helio Chissini de Castro - 5.5.0-3 -- Enabled docs - -* Thu Jul 16 2015 Rex Dieter 5.5.0-2 -- cleaner qtbase dep, .spec cosmetics - -* Wed Jul 1 2015 Helio Chissini de Castro 5.5.0-1 -- New final upstream release Qt 5.5.0 - -* Wed Jun 24 2015 Helio Chissini de Castro - 5.5.0-0.2.rc -- Update for official RC1 released packages - -* Mon Jun 15 2015 Daniel Vrátil - 5.5.0-0.1.rc -- Qt 5.5.0 RC1 - -* Wed Jun 03 2015 Jan Grulich -5.4.2-1 -- 5.4.2 - -* Sat May 02 2015 Kalev Lember - 5.4.1-3 -- Rebuilt for GCC 5 C++11 ABI change - -* Fri Feb 27 2015 Rex Dieter - 5.4.1-2 -- rebuild (gcc5) - -* Tue Feb 24 2015 Jan Grulich 5.4.1-1 -- 5.4.1 - -* Fri Jan 02 2015 Dan Horák 5.4.0-2 -- include the bswap patch in F-20 and F-21 builds too - -* Wed Dec 10 2014 Rex Dieter 5.4.0-1 -- 5.4.0 (final) - -* Fri Nov 28 2014 Rex Dieter 5.4.0-0.3.rc -- 5.4.0-rc - -* Mon Nov 03 2014 Rex Dieter 5.4.0-0.2.beta -- out-of-tree build, use %%qmake_qt5 - -* Sun Oct 19 2014 Rex Dieter 5.4.0-0.1.beta -- 5.4.0-beta - -* Tue Sep 16 2014 Rex Dieter 5.3.2-1 -- 5.3.2 - -* Sun Aug 17 2014 Fedora Release Engineering - 5.3.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Tue Jun 17 2014 Jan Grulich - 5.3.1-1 -- 5.3.1 - -* Sun Jun 08 2014 Fedora Release Engineering - 5.3.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Wed May 21 2014 Jan Grulich 5.3.0-1 -- 5.3.0 - -* Mon May 05 2014 Rex Dieter 5.2.1-2 -- sanitize .prl files - -* Wed Feb 05 2014 Rex Dieter 5.2.1-1 -- 5.2.1 - -* Thu Jan 02 2014 Rex Dieter 5.2.0-1 -- first try diff --git a/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.signatures.json b/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.signatures.json deleted file mode 100644 index 45ad7f9c49a..00000000000 --- a/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "qtsensors-everywhere-src-5.14.2.tar.xz": "bccfca6910b0383d8f65823496ff5011abed2fa8fd446b4b27333d0fd7bb8c61" - } -} diff --git a/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.spec b/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.spec deleted file mode 100644 index feefbe2c2ba..00000000000 --- a/SPECS-EXTENDED/qt5-qtsensors/qt5-qtsensors.spec +++ /dev/null @@ -1,312 +0,0 @@ -Vendor: Microsoft Corporation -Distribution: Azure Linux -%global qt_module qtsensors - -Summary: Qt5 - Sensors component -Name: qt5-%{qt_module} -Version: 5.14.2 -Release: 2%{?dist} - -# See LGPL_EXCEPTIONS.txt, LICENSE.GPL3, respectively, for exception details -License: LGPLv2 with exceptions or GPLv3 with exceptions -Url: http://www.qt.io/ -%global majmin %(echo %{version} | cut -d. -f1-2) -Source0: https://download.qt.io/official_releases/qt/%{majmin}/%{version}/submodules/%{qt_module}-everywhere-src-%{version}.tar.xz - -# filter qml/plugin provides -%global __provides_exclude_from ^(%{_qt5_archdatadir}/qml/.*\\.so|%{_qt5_plugindir}/.*\\.so)$ - -BuildRequires: qt5-qtbase-devel >= %{version} -BuildRequires: qt5-qtbase-private-devel -%{?_qt5:Requires: %{_qt5}%{?_isa} = %{_qt5_version}} -BuildRequires: qt5-qtdeclarative-devel - -%description -The Qt Sensors API provides access to sensor hardware via QML and C++ -interfaces. The Qt Sensors API also provides a motion gesture recognition -API for devices. - -%package devel -Summary: Development files for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} -Requires: qt5-qtbase-devel%{?_isa} -%description devel -%{summary}. - -%package examples -Summary: Programming examples for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} -%description examples -%{summary}. - - -%prep -%setup -q -n %{qt_module}-everywhere-src-%{version} - - -%build -%{qmake_qt5} - -%make_build - - -%install -make install INSTALL_ROOT=%{buildroot} - -## .prl/.la file love -# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs -pushd %{buildroot}%{_qt5_libdir} -for prl_file in libQt5*.prl ; do - sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} - if [ -f "$(basename ${prl_file} .prl).so" ]; then - rm -fv "$(basename ${prl_file} .prl).la" - sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} - fi -done -popd - - -%ldconfig_scriptlets - -%files -%license LICENSE.* -%{_qt5_libdir}/libQt5Sensors.so.5* -%{_qt5_plugindir}/sensorgestures/ -%{_qt5_plugindir}/sensors/ -%{_qt5_archdatadir}/qml/QtSensors/ -%dir %{_qt5_libdir}/cmake/Qt5Sensors/ -%{_qt5_libdir}/cmake/Qt5Sensors/Qt5Sensors_*Plugin.cmake - -%files devel -%{_qt5_headerdir}/QtSensors/ -%{_qt5_libdir}/libQt5Sensors.so -%{_qt5_libdir}/libQt5Sensors.prl -%{_qt5_libdir}/cmake/Qt5Sensors/Qt5SensorsConfig*.cmake -%{_qt5_libdir}/pkgconfig/Qt5Sensors.pc -%{_qt5_archdatadir}/mkspecs/modules/qt_lib_sensors*.pri - -%files examples -%{_qt5_examplesdir}/ - - -%changelog -* Fri Oct 15 2021 Pawel Winogrodzki - 5.14.2-2 -- Initial CBL-Mariner import from Fedora 32 (license: MIT). - -* Sat Apr 04 2020 Rex Dieter - 5.14.2-1 -- 5.14.2 - -* Thu Jan 30 2020 Fedora Release Engineering - 5.13.2-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Mon Dec 09 2019 Jan Grulich - 5.13.2-1 -- 5.13.2 - -* Tue Sep 24 2019 Jan Grulich - 5.12.5-1 -- 5.12.5 - -* Fri Jul 26 2019 Fedora Release Engineering - 5.12.4-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild - -* Fri Jun 14 2019 Jan Grulich - 5.12.4-1 -- 5.12.4 - -* Tue Jun 04 2019 Jan Grulich - 5.12.3-1 -- 5.12.3 - -* Fri Feb 15 2019 Rex Dieter - 5.12.1-1 -- 5.12.1 - -* Sat Feb 02 2019 Fedora Release Engineering - 5.11.3-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Fri Dec 07 2018 Rex Dieter - 5.11.3-1 -- 5.11.3 - -* Fri Sep 21 2018 Jan Grulich - 5.11.2-1 -- 5.11.2 - -* Sat Jul 14 2018 Fedora Release Engineering - 5.11.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Tue Jun 19 2018 Rex Dieter - 5.11.1-1 -- 5.11.1 - -* Sun May 27 2018 Rex Dieter - 5.11.0-1 -- 5.11.0 -- use %%make_build %%ldconfig_scriptlets - -* Wed Feb 14 2018 Jan Grulich - 5.10.1-1 -- 5.10.1 - -* Fri Feb 09 2018 Fedora Release Engineering - 5.10.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Tue Dec 19 2017 Jan Grulich - 5.10.0-1 -- 5.10.0 - -* Thu Nov 23 2017 Jan Grulich - 5.9.3-1 -- 5.9.3 - -* Mon Oct 09 2017 Jan Grulich - 5.9.2-1 -- 5.9.2 - -* Thu Aug 03 2017 Fedora Release Engineering - 5.9.1-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 5.9.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Wed Jul 19 2017 Rex Dieter - 5.9.1-1 -- 5.9.1 - -* Fri Jun 16 2017 Rex Dieter - 5.9.0-2 -- drop shadow/out-of-tree builds (#1456211,QTBUG-37417) - -* Wed May 31 2017 Helio Chissini de Castro - 5.9.0-1 -- Upstream official release - -* Fri May 26 2017 Helio Chissini de Castro - 5.9.0-0.1.rc -- Upstream Release Candidate retagged - -* Tue May 09 2017 Helio Chissini de Castro - 5.9.0-0.beta.3 -- Upstream beta 3 - -* Mon Apr 03 2017 Rex Dieter - 5.8.0-2 -- build docs on all archs - -* Mon Jan 30 2017 Helio Chissini de Castro - 5.8.0-1 -- New upstream version - -* Mon Jan 02 2017 Rex Dieter - 5.7.1-4 -- filter provides, BR: qtbase-private-devel qtdeclarative explicitly - -* Sat Dec 10 2016 Rex Dieter - 5.7.1-3 -- drop BR: cmake (handled by qt5-rpm-macros now) - -* Sat Dec 10 2016 Rex Dieter - 5.7.1-2 -- 5.7.1 dec5 snapshot - -* Wed Nov 09 2016 Helio Chissini de Castro - 5.7.1-1 -- New upstream version - -* Mon Jul 04 2016 Helio Chissini de Castro - 5.7.0-2 -- Compiled with gcc - -* Mon Jun 13 2016 Helio Chissini de Castro - 5.7.0-1 -- Qt 5.7.0 release - -* Thu Jun 09 2016 Jan Grulich - 5.6.1-1 -- Update to 5.6.1 - -* Sun Mar 20 2016 Rex Dieter - 5.6.0-3 -- rebuild - -* Fri Mar 18 2016 Rex Dieter - 5.6.0-2 -- rebuild - -* Mon Mar 14 2016 Helio Chissini de Castro - 5.6.0-1 -- 5.6.0 final release - -* Tue Feb 23 2016 Helio Chissini de Castro - 5.6.0-0.9.rc -- Update to final RC - -* Mon Feb 15 2016 Helio Chissini de Castro - 5.6.0-0.8 -- Update RC release - -* Thu Feb 04 2016 Fedora Release Engineering - 5.6.0-0.7.beta -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Mon Dec 28 2015 Rex Dieter 5.6.0-0.6.beta -- update source URL, BR: cmake, use %%license - -* Mon Dec 21 2015 Helio Chissini de Castro - 5.6.0-0.5 -- Update to final beta release - -* Fri Dec 11 2015 Rex Dieter - 5.6.0-0.4 -- restore bootstrap macro support -- only BR: qt5-qdoc in -doc subpkg - -* Thu Dec 10 2015 Helio Chissini de Castro - 5.6.0-0.3 -- Official beta release - -* Sun Dec 06 2015 Rex Dieter 5.6.0-0.2 -- restore bootstrap macro support - -* Tue Nov 03 2015 Helio Chissini de Castro - 5.6.0-0.1 -- Start to implement 5.6.0 beta - -* Thu Oct 15 2015 Helio Chissini de Castro - 5.5.1-2 -- Update to final release 5.5.1 - -* Tue Sep 29 2015 Helio Chissini de Castro - 5.5.1-1 -- Update to Qt 5.5.1 RC1 - -* Wed Jul 29 2015 Rex Dieter 5.5.0-3 -- -docs: BuildRequires: qt5-qhelpgenerator, standardize bootstrapping - -* Thu Jul 16 2015 Rex Dieter 5.5.0-2 -- tighten qtbase dep (#1233829), (re)enable docs - -* Wed Jul 1 2015 Helio Chissini de Castro 5.5.0-1 -- New final upstream release Qt 5.5.0 - -* Thu Jun 25 2015 Helio Chissini de Castro - 5.5.0-0.2.rc -- Update for official RC1 released packages - -* Wed Jun 03 2015 Jan Grulich - 5.4.2-1 -- 5.4.2 - -* Sat May 02 2015 Kalev Lember - 5.4.1-3 -- Rebuilt for GCC 5 C++11 ABI change - -* Fri Feb 27 2015 Rex Dieter - 5.4.1-2 -- rebuild (gcc5) - -* Tue Feb 24 2015 Jan Grulich 5.4.1-1 -- 5.4.1 - -* Mon Feb 16 2015 Rex Dieter 5.4.0-3 -- rebuild (gcc5) - -* Wed Dec 31 2014 Rex Dieter 5.4.0-2 -- BR: pkgconfig(Qt5Qml) > 5.4.0 (#1177985) - -* Wed Dec 10 2014 Rex Dieter 5.4.0-1 -- 5.4.0 (final) - -* Fri Nov 28 2014 Rex Dieter 5.4.0-0.3.rc -- 5.4.0-rc - -* Mon Nov 03 2014 Rex Dieter 5.4.0-0.2.beta -- out-of-tree build, use %%qmake_qt5 - -* Sun Oct 19 2014 Rex Dieter 5.4.0-0.1.beta -- 5.4.0-beta - -* Tue Sep 16 2014 Rex Dieter 5.3.2-1 -- 5.3.2 - -* Sun Aug 17 2014 Fedora Release Engineering - 5.3.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Tue Jun 17 2014 Jan Grulich - 5.3.1-1 -- 5.3.1 - -* Sun Jun 08 2014 Fedora Release Engineering - 5.3.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Wed May 21 2014 Jan Grulich 5.3.0-1 -- 5.3.0 - -* Mon May 05 2014 Rex Dieter 5.2.1-2 -- sanitize .prl files - -* Thu Feb 06 2014 Rex Dieter 5.2.1-1 -- 5.2.1 - -* Mon Jan 27 2014 Rex Dieter 5.2.0-2 -- -examples subpkg - -* Thu Jan 02 2014 Rex Dieter 5.2.0-1 -- first try diff --git a/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.signatures.json b/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.signatures.json deleted file mode 100644 index 963fde47d5d..00000000000 --- a/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "qt5-qtserialport-5.15.9.tar.xz": "67d20803f08b33973d7eded2e5c75083704e43b005d4c3bc9c6e8a91755bc025" - } -} diff --git a/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.spec b/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.spec deleted file mode 100644 index 6487f740029..00000000000 --- a/SPECS-EXTENDED/qt5-qtserialport/qt5-qtserialport.spec +++ /dev/null @@ -1,360 +0,0 @@ -%global qt_module qtserialport - -Summary: Qt5 - SerialPort component -Name: qt5-%{qt_module} -Version: 5.15.9 -Release: 2%{?dist} -# See LGPL_EXCEPTIONS.txt, LICENSE.GPL3, respectively, for exception details -License: LGPL-3.0-only OR GPL-3.0-only WITH Qt-GPL-exception-1.0 -Vendor: Microsoft Corporation -Distribution: Azure Linux -URL: https://www.qt.io -Source0: https://download.qt.io/official_releases/qt/5.15/%{version}/submodules/%{qt_module}-everywhere-opensource-src-%{version}.tar.xz#/%{name}-%{version}.tar.xz -%{?_qt5:Requires: %{_qt5}%{?_isa} = %{_qt5_version}} -BuildRequires: make -BuildRequires: pkgconfig -BuildRequires: qt5-qtbase-devel >= %{version} -BuildRequires: qt5-qtbase-private-devel -BuildRequires: pkgconfig(libudev) - -%description -Qt Serial Port provides the basic functionality, which includes configuring, -I/O operations, getting and setting the control signals of the RS-232 pinouts. - -%package devel -Summary: Development files for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} -Requires: qt5-qtbase-devel%{?_isa} - -%description devel -%{summary}. - -%package examples -Summary: Programming examples for %{name} -Requires: %{name}%{?_isa} = %{version}-%{release} - -%description examples -%{summary}. - -%prep -%autosetup -n %{qt_module}-everywhere-src-%{version} - - -%build -rm -rf examples -%{qmake_qt5} \ - %{?_qt5_examplesdir:CONFIG+=qt_example_installs} - -%make_build - - -%install -%make_install INSTALL_ROOT=%{buildroot} - -## .prl/.la file love -# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs -pushd %{buildroot}%{_qt5_libdir} -for prl_file in libQt5*.prl ; do - sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} - if [ -f "$(basename ${prl_file} .prl).so" ]; then - rm -fv "$(basename ${prl_file} .prl).la" - sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} - fi -done -popd - - -%ldconfig_scriptlets - -%files -%license LICENSE.* -%{_qt5_libdir}/libQt5SerialPort.so.5* - -%files devel -%{_qt5_headerdir}/QtSerialPort/ -%{_qt5_libdir}/libQt5SerialPort.so -%{_qt5_libdir}/libQt5SerialPort.prl -%dir %{_qt5_libdir}/cmake/Qt5SerialPort/ -%{_qt5_libdir}/cmake/Qt5SerialPort/Qt5SerialPortConfig*.cmake -%{_qt5_libdir}/pkgconfig/Qt5SerialPort.pc -%{_qt5_archdatadir}/mkspecs/modules/qt_lib_serialport*.pri - -%if 0%{?docs} -%files doc -%license LICENSE.FDL -%{_qt5_docdir}/qtserialport.qch -%{_qt5_docdir}/qtserialport/ -%endif - - -%changelog -* Wed Aug 09 2023 Archana Choudhary - 5.15.9-2 -- Initial CBL-Mariner import from Fedora 38 (license: MIT) -- License verified - -* Tue Apr 11 2023 Jan Grulich - 5.15.9-1 -- 5.15.9 - -* Tue Jan 31 2023 Jan Grulich - 5.15.8-3 -- migrated to SPDX license - -* Fri Jan 20 2023 Fedora Release Engineering - 5.15.8-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild - -* Thu Jan 05 2023 Jan Grulich - 5.15.8-1 -- 5.15.8 - -* Mon Oct 31 2022 Jan Grulich - 5.15.7-1 -- 5.15.7 - -* Tue Sep 20 2022 Jan Grulich - 5.15.6-1 -- 5.15.6 - -* Sat Jul 23 2022 Fedora Release Engineering - 5.15.5-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild - -* Wed Jul 13 2022 Jan Grulich - 5.15.5-1 -- 5.15.5 - -* Mon May 16 2022 Jan Grulich - 5.15.4-1 -- 5.15.4 - -* Fri Mar 04 2022 Jan Grulich - 5.15.3-1 -- 5.15.3 - -* Fri Jan 21 2022 Fedora Release Engineering - 5.15.2-5 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild - -* Fri Jul 23 2021 Fedora Release Engineering - 5.15.2-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild - -* Wed Jan 27 2021 Fedora Release Engineering - 5.15.2-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild - -* Tue Nov 24 07:54:15 CET 2020 Jan Grulich - 5.15.2-2 -- Rebuild for qtbase with -no-reduce-relocations option - -* Fri Nov 20 09:30:47 CET 2020 Jan Grulich - 5.15.2-1 -- 5.15.2 - -* Thu Sep 10 2020 Jan Grulich - 5.15.1-1 -- 5.15.1 - -* Sat Aug 01 2020 Fedora Release Engineering - 5.14.2-3 -- Second attempt - Rebuilt for - https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild - -* Wed Jul 29 2020 Fedora Release Engineering - 5.14.2-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild - -* Sat Apr 04 2020 Rex Dieter - 5.14.2-1 -- 5.14.2 - -* Thu Jan 30 2020 Fedora Release Engineering - 5.13.2-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Mon Dec 09 2019 Jan Grulich - 5.13.2-1 -- 5.13.2 - -* Tue Sep 24 2019 Jan Grulich - 5.12.5-1 -- 5.12.5 - -* Fri Jul 26 2019 Fedora Release Engineering - 5.12.4-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild - -* Fri Jun 14 2019 Jan Grulich - 5.12.4-1 -- 5.12.4 - -* Tue Jun 04 2019 Jan Grulich - 5.12.3-1 -- 5.12.3 - -* Fri Feb 15 2019 Rex Dieter - 5.12.1-1 -- 5.12.1 - -* Sat Feb 02 2019 Fedora Release Engineering - 5.11.3-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Fri Dec 07 2018 Rex Dieter - 5.11.3-1 -- 5.11.3 - -* Fri Sep 21 2018 Jan Grulich - 5.11.2-1 -- 5.11.2 - -* Sat Jul 14 2018 Fedora Release Engineering - 5.11.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Tue Jun 19 2018 Rex Dieter - 5.11.1-1 -- 5.11.1 - -* Sun May 27 2018 Rex Dieter - 5.11.0-1 -- 5.11.0, use %%make_build %%ldconfig_scriptlets - -* Wed Feb 14 2018 Jan Grulich - 5.10.1-1 -- 5.10.1 - -* Fri Feb 09 2018 Fedora Release Engineering - 5.10.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Tue Dec 19 2017 Jan Grulich - 5.10.0-1 -- 5.10.0 - -* Thu Nov 23 2017 Jan Grulich - 5.9.3-1 -- 5.9.3 - -* Mon Oct 09 2017 Jan Grulich - 5.9.2-1 -- 5.9.2 - -* Thu Aug 03 2017 Fedora Release Engineering - 5.9.1-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 5.9.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Wed Jul 19 2017 Rex Dieter - 5.9.1-1 -- 5.9.1 - -* Fri Jun 16 2017 Rex Dieter - 5.9.0-2 -- drop shadow/out-of-tree builds (#1456211,QTBUG-37417) - -* Wed May 31 2017 Helio Chissini de Castro - 5.9.0-1 -- Upstream official release - -* Fri May 26 2017 Helio Chissini de Castro - 5.9.0-0.1.rc -- Upstream Release Candidate retagged - -* Wed May 24 2017 Helio Chissini de Castro - 5.9.0-0.rc.1 -- Upstream Release Candidate 1 - -* Wed May 24 2017 Helio Chissini de Castro - 5.9.0-0.rc.1 -- Upstream Release Candidate 1 - -* Fri May 05 2017 Helio Chissini de Castro - 5.9.0-0.beta.3 -- New upstream beta 3 version - -* Sun Apr 16 2017 Helio Chissini de Castro - 5.9.0-0.beta.1 -- New upstream beta version - -* Mon Jan 30 2017 Helio Chissini de Castro - 5.8.0-1 -- New upstream version - -* Sat Dec 10 2016 Rex Dieter - 5.7.1-2 -- 5.7.1 dec5 snapshot -- drop BR: cmake (handled by qt5-rpm-macros now) -- BR: qt5-qtbase-private-devel - -* Wed Nov 09 2016 Helio Chissini de Castro - 5.7.1-1 -- New upstream version - -* Tue Jun 14 2016 Helio Chissini de Castro - 5.7.0-2 -- Compiled with gcc - -* Tue Jun 14 2016 Helio Chissini de Castro - 5.7.0-2 -- Compiled with gcc - -* Tue Jun 14 2016 Helio Chissini de Castro - 5.7.0-2 -- Compiled with gcc - -* Tue Jun 14 2016 Helio Chissini de Castro - 5.7.0-1 -- Qt 5.7.0 release - -* Thu Jun 09 2016 Jan Grulich - 5.6.1-1 -- Update to 5.6.1 - -* Sun Mar 20 2016 Rex Dieter - 5.6.0-3 -- rebuild - -* Fri Mar 18 2016 Rex Dieter - 5.6.0-2 -- rebuild - -* Mon Mar 14 2016 Helio Chissini de Castro - 5.6.0-1 -- 5.6.0 final release - -* Tue Feb 23 2016 Helio Chissini de Castro - 5.6.0-0.7.rc -- Update to final RC - -* Mon Feb 15 2016 Helio Chissini de Castro - 5.6.0-0.6 -- Update RC release - -* Thu Feb 04 2016 Fedora Release Engineering - 5.6.0-0.5.beta3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Mon Dec 28 2015 Rex Dieter 5.6.0-0.4.beta3 -- update source URL, use %%license, BR: cmake - -* Mon Dec 21 2015 Helio Chissini de Castro - 5.6.0-0.3 -- Update to final beta3 release - -* Thu Dec 10 2015 Helio Chissini de Castro - 5.6.0-0.2 -- Official beta3 release - -* Tue Nov 03 2015 Helio Chissini de Castro - 5.6.0-0.1 -- Start to implement 5.6.0 beta3 - -* Thu Oct 15 2015 Helio Chissini de Castro - 5.5.1-2 -- Update to final release 5.5.1 - -* Tue Sep 29 2015 Helio Chissini de Castro - 5.5.1-1 -- Update to Qt 5.5.1 RC1 - -* Wed Jul 29 2015 Rex Dieter 5.5.0-3 -- -docs: BuildRequires: qt5-qhelpgenerator, standardize bootstrapping - -* Thu Jul 16 2015 Rex Dieter 5.5.0-2 -- tighten qtbase dep (#1233829) - -* Wed Jul 1 2015 Helio Chissini de Castro 5.5.0-1 -- New final upstream release Qt 5.5.0 - -* Thu Jun 25 2015 Helio Chissini de Castro - 5.5.0-0.2.rc -- Update for official RC1 released packages - -* Wed Jun 17 2015 Daniel Vrátil - 5.5.0-0.1.rc -- Qt 5.5.0 RC1 - -* Wed Jun 03 2015 Jan Grulich 5.4.2-1 -- 5.4.2 - -* Thu Apr 30 2015 Rex Dieter 5.4.1-2 -- Add qt5-qtserialport-examples (#1190202) - -* Fri Feb 27 2015 Rex Dieter 5.4.1-1 -- 5.4.1 - -* Wed Dec 10 2014 Rex Dieter 5.4.0-1 -- 5.4.0 (final) - -* Fri Nov 28 2014 Rex Dieter 5.4.0-0.3.rc -- 5.4.0-rc - -* Mon Nov 03 2014 Rex Dieter 5.4.0-0.2.beta3 -- out-of-tree build, use %%qmake_qt5 - -* Sun Oct 19 2014 Rex Dieter 5.4.0-0.1.beta3 -- 5.4.0-beta3 - -* Wed Sep 17 2014 Rex Dieter - 5.3.2-1 -- 5.3.2 - -* Sun Aug 17 2014 Fedora Release Engineering - 5.3.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Thu Jul 24 2014 Rex Dieter 5.3.1-1 -- 5.3.1 - -* Sun Jun 08 2014 Fedora Release Engineering - 5.3.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Wed May 21 2014 Jan Grulich 5.3.0-1 -- 5.3.0 - -* Sat Apr 26 2014 Rex Dieter 5.2.1-2 -- clean .prl files (buildroot, excessive deps) (#1091630) - -* Thu Feb 06 2014 Rex Dieter 5.2.1-1 -- 5.2.1 - -* Mon Jan 27 2014 Rex Dieter 5.2.0-2 -- ready -examples subpkg - -* Thu Jan 02 2014 Rex Dieter 5.2.0-1 -- first try diff --git a/SPECS-EXTENDED/qt6-qtconnectivity/CVE-2025-23050-qtconnectivity-6.5.diff b/SPECS-EXTENDED/qt6-qtconnectivity/CVE-2025-23050-qtconnectivity-6.5.diff new file mode 100644 index 00000000000..9468d728b4c --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtconnectivity/CVE-2025-23050-qtconnectivity-6.5.diff @@ -0,0 +1,221 @@ +From 88c30a23dcff5e7c16a54f93accf743847e22617 Mon Sep 17 00:00:00 2001 +From: Ivan Solovev +Date: Thu, 02 Jan 2025 16:48:49 +0100 +Subject: [PATCH] QLowEnergyControllerPrivateBluez: guard against malformed replies + +The QLowEnergyControllerPrivateBluez::l2cpReadyRead() slot reads the +data from a Bluetooth L2CAP socket and then tries to process it +according to ATT protocol specs. + +However, the code was missing length and sanity checks at some +codepaths in processUnsolicitedReply() and processReply() helper +methods, simply relying on the data to be in the proper format. + +This patch adds some minimal checks to make sure that we do not read +past the end of the received array and do not divide by zero. + +This problem was originally pointed out by Marc Mutz in an unrelated +patch. + +Pick-to: 5.15 +Change-Id: I8dcfe031f70ad61fa3d87dc9d771c3fabc6d0edc +Reviewed-by: Alex Blasche +Reviewed-by: Juha Vuolle +(cherry picked from commit aecbd657c841a2a8c74631ceac96b8ff1f03ab5c) +Reviewed-by: Qt Cherry-pick Bot +(cherry picked from commit 53e991671f725c136e9aa824c59ec13934c63fb4) +(cherry picked from commit 465e3f3112a9c158aa6dd2f8b9439ae6c2de336f) +--- + +diff --git a/src/bluetooth/qlowenergycontroller_bluez.cpp b/src/bluetooth/qlowenergycontroller_bluez.cpp +index 1b6f579..2105685 100644 +--- a/src/bluetooth/qlowenergycontroller_bluez.cpp ++++ b/src/bluetooth/qlowenergycontroller_bluez.cpp +@@ -80,14 +80,15 @@ static inline QBluetoothUuid convert_uuid128(const quint128_s *p) + return QBluetoothUuid(qtdst); + } + +-static void dumpErrorInformation(const QByteArray &response) ++/* returns false if the format is incorrect */ ++static bool dumpErrorInformation(const QByteArray &response) + { + const char *data = response.constData(); + if (response.size() != 5 + || (static_cast(data[0]) + != QBluezConst::AttCommand::ATT_OP_ERROR_RESPONSE)) { + qCWarning(QT_BT_BLUEZ) << QLatin1String("Not a valid error response"); +- return; ++ return false; + } + + QBluezConst::AttCommand lastCommand = static_cast(data[1]); +@@ -142,6 +143,8 @@ static void dumpErrorInformation(const QByteArray &response) + + qCDebug(QT_BT_BLUEZ) << "Error:" << errorCode << "Error description:" << errorString + << "last command:" << lastCommand << "handle:" << handle; ++ ++ return true; + } + + static int getUuidSize(const QBluetoothUuid &uuid) +@@ -919,6 +922,7 @@ QLowEnergyHandle parseReadByTypeCharDiscovery( + { + Q_ASSERT(charData); + Q_ASSERT(data); ++ Q_ASSERT(elementLength >= 5); + + QLowEnergyHandle attributeHandle = bt_get_le16(&data[0]); + charData->properties = +@@ -927,7 +931,7 @@ QLowEnergyHandle parseReadByTypeCharDiscovery( + + if (elementLength == 7) // 16 bit uuid + charData->uuid = QBluetoothUuid(bt_get_le16(&data[5])); +- else ++ else if (elementLength == 21) // 128 bit uuid + charData->uuid = convert_uuid128((quint128_s *)&data[5]); + + qCDebug(QT_BT_BLUEZ) << "Found handle:" << Qt::hex << attributeHandle +@@ -944,6 +948,7 @@ QLowEnergyHandle parseReadByTypeIncludeDiscovery( + { + Q_ASSERT(foundServices); + Q_ASSERT(data); ++ Q_ASSERT(elementLength >= 6); + + QLowEnergyHandle attributeHandle = bt_get_le16(&data[0]); + +@@ -953,9 +958,14 @@ QLowEnergyHandle parseReadByTypeIncludeDiscovery( + // data[2] -> included service start handle + // data[4] -> included service end handle + ++ // TODO: Spec v. 5.3, Vol. 3, Part G, 4.5.1 mentions that only ++ // 16-bit UUID can be returned here. If the UUID is 128-bit, ++ // then it is omitted from the response, and should be requested ++ // separately with the ATT_READ_REQ command. ++ + if (elementLength == 8) //16 bit uuid + foundServices->append(QBluetoothUuid(bt_get_le16(&data[6]))); +- else ++ else if (elementLength == 22) // 128 bit uuid + foundServices->append(convert_uuid128((quint128_s *) &data[6])); + + qCDebug(QT_BT_BLUEZ) << "Found included service: " << Qt::hex +@@ -964,17 +974,29 @@ QLowEnergyHandle parseReadByTypeIncludeDiscovery( + return attributeHandle; + } + ++Q_DECL_COLD_FUNCTION ++static void reportMalformedData(QBluezConst::AttCommand cmd, const QByteArray &response) ++{ ++ qCDebug(QT_BT_BLUEZ, "%s malformed data: %s", qt_getEnumName(cmd), ++ response.toHex().constData()); ++} ++ + void QLowEnergyControllerPrivateBluez::processReply( + const Request &request, const QByteArray &response) + { + Q_Q(QLowEnergyController); + ++ // We already have an isEmpty() check at the only calling site that reads ++ // incoming data, so Q_ASSERT is enough. ++ Q_ASSERT(!response.isEmpty()); ++ + QBluezConst::AttCommand command = static_cast(response.constData()[0]); + + bool isErrorResponse = false; + // if error occurred 2. byte is previous request type + if (command == QBluezConst::AttCommand::ATT_OP_ERROR_RESPONSE) { +- dumpErrorInformation(response); ++ if (!dumpErrorInformation(response)) ++ return; + command = static_cast(response.constData()[1]); + isErrorResponse = true; + } +@@ -987,6 +1009,10 @@ void QLowEnergyControllerPrivateBluez::processReply( + if (isErrorResponse) { + mtuSize = ATT_DEFAULT_LE_MTU; + } else { ++ if (response.size() < 3) { ++ reportMalformedData(command, response); ++ break; ++ } + const char *data = response.constData(); + quint16 mtu = bt_get_le16(&data[1]); + mtuSize = mtu; +@@ -1015,8 +1041,15 @@ void QLowEnergyControllerPrivateBluez::processReply( + break; + } + ++ // response[1] == elementLength. According to the spec it should be ++ // at least 4 bytes. See Spec v5.3, Vol 3, Part F, 3.4.4.10 ++ if (response.size() < 2 || response[1] < 4) { ++ reportMalformedData(command, response); ++ break; ++ } ++ + QLowEnergyHandle start = 0, end = 0; +- const quint16 elementLength = response.constData()[1]; ++ const quint16 elementLength = response.constData()[1]; // value checked above + const quint16 numElements = (response.size() - 2) / elementLength; + quint16 offset = 2; + const char *data = response.constData(); +@@ -1092,16 +1125,25 @@ void QLowEnergyControllerPrivateBluez::processReply( + } + + /* packet format: +- * if GATT_CHARACTERISTIC discovery ++ * if GATT_CHARACTERISTIC discovery (Spec 5.3, Vol. 3, Part G, 4.6) + * + * []+ ++ * The minimum elementLength is 7 bytes (uuid is always included) + * +- * if GATT_INCLUDE discovery ++ * if GATT_INCLUDE discovery (Spec 5.3, Vol. 3, Part G, 4.5.1) + * + * []+ ++ * The minimum elementLength is 6 bytes (uuid can be omitted). + * + * The uuid can be 16 or 128 bit. + */ ++ ++ const quint8 minimumElementLength = attributeType == GATT_CHARACTERISTIC ? 7 : 6; ++ if (response.size() < 2 || response[1] < minimumElementLength) { ++ reportMalformedData(command, response); ++ break; ++ } ++ + QLowEnergyHandle lastHandle; + const quint16 elementLength = response.constData()[1]; + const quint16 numElements = (response.size() - 2) / elementLength; +@@ -1298,6 +1340,12 @@ void QLowEnergyControllerPrivateBluez::processReply( + break; + } + ++ // Spec 5.3, Vol. 3, Part F, 3.4.3.2 ++ if (response.size() < 6) { ++ reportMalformedData(command, response); ++ break; ++ } ++ + const quint8 format = response[1]; + quint16 elementLength; + switch (format) { +@@ -1735,9 +1783,18 @@ void QLowEnergyControllerPrivateBluez::discoverServiceDescriptors( + + void QLowEnergyControllerPrivateBluez::processUnsolicitedReply(const QByteArray &payload) + { ++ Q_ASSERT(!payload.isEmpty()); ++ + const char *data = payload.constData(); +- bool isNotification = (static_cast(data[0]) ++ const auto command = static_cast(data[0]); ++ bool isNotification = (command + == QBluezConst::AttCommand::ATT_OP_HANDLE_VAL_NOTIFICATION); ++ ++ if (payload.size() < 3) { ++ reportMalformedData(command, payload); ++ return; ++ } ++ + const QLowEnergyHandle changedHandle = bt_get_le16(&data[1]); + + if (QT_BT_BLUEZ().isDebugEnabled()) { diff --git a/SPECS-EXTENDED/qt6-qtconnectivity/build_fix_quint128.patch b/SPECS-EXTENDED/qt6-qtconnectivity/build_fix_quint128.patch new file mode 100644 index 00000000000..b412ba05059 --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtconnectivity/build_fix_quint128.patch @@ -0,0 +1,888 @@ +From 2319f9574ad40c4e4f8d21c486aafc7bd29621e7 Mon Sep 17 00:00:00 2001 +From: Sandeep Karambelkar +Date: Mon, 1 Dec 2025 16:25:17 +0000 +Subject: [PATCH] Fix quint128 redefinition + +--- + .../devicediscoverybroadcastreceiver.cpp | 12 +++---- + src/bluetooth/bluez/bluez_data_p.h | 16 ++++----- + src/bluetooth/bluez/hcimanager.cpp | 2 +- + src/bluetooth/bluez/hcimanager_p.h | 2 +- + src/bluetooth/darwin/btcentralmanager.mm | 2 +- + src/bluetooth/darwin/btledeviceinquiry.mm | 2 +- + src/bluetooth/darwin/btutility.mm | 6 ++-- + src/bluetooth/lecmaccalculator.cpp | 6 ++-- + src/bluetooth/lecmaccalculator_p.h | 6 ++-- + .../qbluetoothdevicediscoveryagent_winrt.cpp | 10 +++--- + src/bluetooth/qbluetoothsocket_android.cpp | 4 +-- + src/bluetooth/qbluetoothuuid.cpp | 6 ++-- + src/bluetooth/qbluetoothuuid.h | 6 ++-- + src/bluetooth/qleadvertiser_bluez.cpp | 8 ++--- + src/bluetooth/qlowenergycontroller_bluez.cpp | 36 +++++++++---------- + src/bluetooth/qlowenergycontroller_bluez_p.h | 6 ++-- + .../QtBluetooth.5.10.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.11.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.12.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.13.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.14.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.2.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.3.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.4.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.6.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.7.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.8.0.linux-gcc-amd64.txt | 4 +-- + .../QtBluetooth.5.9.0.linux-gcc-amd64.txt | 4 +-- + .../qbluetoothuuid/tst_qbluetoothuuid.cpp | 20 +++++------ + .../tst_qlowenergycontroller-gattserver.cpp | 8 ++--- + 30 files changed, 103 insertions(+), 103 deletions(-) + +diff --git a/src/bluetooth/android/devicediscoverybroadcastreceiver.cpp b/src/bluetooth/android/devicediscoverybroadcastreceiver.cpp +index 67678c1..45e174f 100644 +--- a/src/bluetooth/android/devicediscoverybroadcastreceiver.cpp ++++ b/src/bluetooth/android/devicediscoverybroadcastreceiver.cpp +@@ -221,10 +221,10 @@ enum ADType { + // .. more will be added when required + }; + +-// Endianness conversion for quint128 doesn't exist in qtendian.h +-inline quint128 qbswap(const quint128 src) ++// Endianness conversion for quint128_s doesn't exist in qtendian.h ++inline quint128_s qbswap(const quint128_s src) + { +- quint128 dst; ++ quint128_s dst; + for (int i = 0; i < 16; i++) + dst.data[i] = src.data[15 - i]; + return dst; +@@ -500,7 +500,7 @@ QBluetoothDeviceInfo DeviceDiscoveryBroadcastReceiver::retrieveDeviceInfo(const + case ADType128BitUuidIncomplete: + case ADType128BitUuidComplete: + foundService = +- QBluetoothUuid(qToBigEndian(qFromLittleEndian(dataPtr))); ++ QBluetoothUuid(qToBigEndian(qFromLittleEndian(dataPtr))); + break; + case ADTypeServiceData16Bit: + if (nBytes >= 3) { +@@ -516,8 +516,8 @@ QBluetoothDeviceInfo DeviceDiscoveryBroadcastReceiver::retrieveDeviceInfo(const + break; + case ADTypeServiceData128Bit: + if (nBytes >= 17) { +- info.setServiceData(QBluetoothUuid(qToBigEndian( +- qFromLittleEndian(dataPtr))), ++ info.setServiceData(QBluetoothUuid(qToBigEndian( ++ qFromLittleEndian(dataPtr))), + QByteArray(dataPtr + 16, nBytes - 17)); + } + break; +diff --git a/src/bluetooth/bluez/bluez_data_p.h b/src/bluetooth/bluez/bluez_data_p.h +index f91f581..aa282fe 100644 +--- a/src/bluetooth/bluez/bluez_data_p.h ++++ b/src/bluetooth/bluez/bluez_data_p.h +@@ -119,12 +119,12 @@ struct sockaddr_rc { + + #if __BYTE_ORDER == __LITTLE_ENDIAN + +-static inline void btoh128(const quint128 *src, quint128 *dst) ++static inline void btoh128(const quint128_s *src, quint128_s *dst) + { +- memcpy(dst, src, sizeof(quint128)); ++ memcpy(dst, src, sizeof(quint128_s)); + } + +-static inline void ntoh128(const quint128 *src, quint128 *dst) ++static inline void ntoh128(const quint128_s *src, quint128_s *dst) + { + int i; + +@@ -134,7 +134,7 @@ static inline void ntoh128(const quint128 *src, quint128 *dst) + + #elif __BYTE_ORDER == __BIG_ENDIAN + +-static inline void btoh128(const quint128 *src, quint128 *dst) ++static inline void btoh128(const quint128_s *src, quint128_s *dst) + { + int i; + +@@ -142,9 +142,9 @@ static inline void btoh128(const quint128 *src, quint128 *dst) + dst->data[15 - i] = src->data[i]; + } + +-static inline void ntoh128(const quint128 *src, quint128 *dst) ++static inline void ntoh128(const quint128_s *src, quint128_s *dst) + { +- memcpy(dst, src, sizeof(quint128)); ++ memcpy(dst, src, sizeof(quint128_s)); + } + #else + #error "Unknown byte order" +@@ -164,9 +164,9 @@ template inline void putBtData(T src, void *dst) + { + qToLittleEndian(src, reinterpret_cast(dst)); + } +-template<> inline void putBtData(quint128 src, void *dst) ++template<> inline void putBtData(quint128_s src, void *dst) + { +- btoh128(&src, reinterpret_cast(dst)); ++ btoh128(&src, reinterpret_cast(dst)); + } + + #define hton128(x, y) ntoh128(x, y) +diff --git a/src/bluetooth/bluez/hcimanager.cpp b/src/bluetooth/bluez/hcimanager.cpp +index 65cbd80..c267c19 100644 +--- a/src/bluetooth/bluez/hcimanager.cpp ++++ b/src/bluetooth/bluez/hcimanager.cpp +@@ -516,7 +516,7 @@ void HciManager::handleHciAclPacket(const quint8 *data, int size) + qCWarning(QT_BT_BLUEZ) << "Unexpected key size" << size << "in Signing Information packet"; + return; + } +- quint128 csrk; ++ quint128_s csrk; + memcpy(&csrk, data + 1, sizeof csrk); + const bool isRemoteKey = aclData->pbFlag == 2; + emit signatureResolvingKeyReceived(aclData->handle, isRemoteKey, csrk); +diff --git a/src/bluetooth/bluez/hcimanager_p.h b/src/bluetooth/bluez/hcimanager_p.h +index 161675f..cef9f59 100644 +--- a/src/bluetooth/bluez/hcimanager_p.h ++++ b/src/bluetooth/bluez/hcimanager_p.h +@@ -179,7 +179,7 @@ signals: + void commandCompleted(quint16 opCode, quint8 status, const QByteArray &data); + void connectionComplete(quint16 handle); + void connectionUpdate(quint16 handle, const QLowEnergyConnectionParameters ¶meters); +- void signatureResolvingKeyReceived(quint16 connHandle, bool remoteKey, const quint128 &csrk); ++ void signatureResolvingKeyReceived(quint16 connHandle, bool remoteKey, const quint128_s &csrk); + + private slots: + void _q_readNotify(); +diff --git a/src/bluetooth/darwin/btcentralmanager.mm b/src/bluetooth/darwin/btcentralmanager.mm +index f64dbc3..2103880 100644 +--- a/src/bluetooth/darwin/btcentralmanager.mm ++++ b/src/bluetooth/darwin/btcentralmanager.mm +@@ -341,7 +341,7 @@ using DiscoveryMode = QLowEnergyService::DiscoveryMode; + } + + +- const quint128 qtUuidData(deviceUuid.toUInt128()); ++ const quint128_s qtUuidData(deviceUuid.toUInt128()); + uuid_t uuidData = {}; + std::copy(qtUuidData.data, qtUuidData.data + 16, uuidData); + const ObjCScopedPointer nsUuid([[NSUUID alloc] initWithUUIDBytes:uuidData], RetainPolicy::noInitialRetain); +diff --git a/src/bluetooth/darwin/btledeviceinquiry.mm b/src/bluetooth/darwin/btledeviceinquiry.mm +index 52b4ddb..4fc3c8c 100644 +--- a/src/bluetooth/darwin/btledeviceinquiry.mm ++++ b/src/bluetooth/darwin/btledeviceinquiry.mm +@@ -25,7 +25,7 @@ QBluetoothUuid qt_uuid(NSUUID *nsUuid) + + uuid_t uuidData = {}; + [nsUuid getUUIDBytes:uuidData]; +- quint128 qtUuidData = {}; ++ quint128_s qtUuidData = {}; + std::copy(uuidData, uuidData + 16, qtUuidData.data); + return QBluetoothUuid(qtUuidData); + } +diff --git a/src/bluetooth/darwin/btutility.mm b/src/bluetooth/darwin/btutility.mm +index 91902ec..d8a4512 100644 +--- a/src/bluetooth/darwin/btutility.mm ++++ b/src/bluetooth/darwin/btutility.mm +@@ -83,7 +83,7 @@ BluetoothDeviceAddress iobluetooth_address(const QBluetoothAddress &qAddress) + ObjCStrongReference iobluetooth_uuid(const QBluetoothUuid &uuid) + { + const unsigned nBytes = 128 / std::numeric_limits::digits; +- const quint128 intVal(uuid.toUInt128()); ++ const quint128_s intVal(uuid.toUInt128()); + + const ObjCStrongReference iobtUUID([IOBluetoothSDPUUID uuidWithBytes:intVal.data + length:nBytes], RetainPolicy::doInitialRetain); +@@ -97,7 +97,7 @@ QBluetoothUuid qt_uuid(IOBluetoothSDPUUID *uuid) + return qtUuid; + + // TODO: ensure the correct byte-order!!! +- quint128 uuidVal = {}; ++ quint128_s uuidVal = {}; + const quint8 *const source = static_cast([uuid bytes]); + std::copy(source, source + 16, uuidVal.data); + return QBluetoothUuid(uuidVal); +@@ -161,7 +161,7 @@ QBluetoothUuid qt_uuid(CBUUID *uuid) + const uchar *const src = static_cast(uuid.data.bytes); + return QBluetoothUuid(qFromBigEndian(src)); + } else if (uuid.data.length == 16) { +- quint128 qtUuidData = {}; ++ quint128_s qtUuidData = {}; + const quint8 *const source = static_cast(uuid.data.bytes); + std::copy(source, source + 16, qtUuidData.data); + +diff --git a/src/bluetooth/lecmaccalculator.cpp b/src/bluetooth/lecmaccalculator.cpp +index 95ead47..8020bd0 100644 +--- a/src/bluetooth/lecmaccalculator.cpp ++++ b/src/bluetooth/lecmaccalculator.cpp +@@ -60,12 +60,12 @@ QByteArray LeCmacCalculator::createFullMessage(const QByteArray &message, quint3 + return fullMessage; + } + +-quint64 LeCmacCalculator::calculateMac(const QByteArray &message, const quint128 &csrk) const ++quint64 LeCmacCalculator::calculateMac(const QByteArray &message, const quint128_s &csrk) const + { + #ifdef CONFIG_LINUX_CRYPTO_API + if (m_baseSocket == -1) + return false; +- quint128 csrkMsb; ++ quint128_s csrkMsb; + std::reverse_copy(std::begin(csrk.data), std::end(csrk.data), std::begin(csrkMsb.data)); + qCDebug(QT_BT_BLUEZ) << "CSRK (MSB):" << QByteArray(reinterpret_cast(csrkMsb.data), + sizeof csrkMsb).toHex(); +@@ -127,7 +127,7 @@ quint64 LeCmacCalculator::calculateMac(const QByteArray &message, const quint128 + #endif + } + +-bool LeCmacCalculator::verify(const QByteArray &message, const quint128 &csrk, ++bool LeCmacCalculator::verify(const QByteArray &message, const quint128_s &csrk, + quint64 expectedMac) const + { + #ifdef CONFIG_LINUX_CRYPTO_API +diff --git a/src/bluetooth/lecmaccalculator_p.h b/src/bluetooth/lecmaccalculator_p.h +index c60300b..9a96c4c 100644 +--- a/src/bluetooth/lecmaccalculator_p.h ++++ b/src/bluetooth/lecmaccalculator_p.h +@@ -18,7 +18,7 @@ + + QT_BEGIN_NAMESPACE + +-struct quint128; ++struct quint128_s; + + class Q_AUTOTEST_EXPORT LeCmacCalculator + { +@@ -28,10 +28,10 @@ public: + + static QByteArray createFullMessage(const QByteArray &message, quint32 signCounter); + +- quint64 calculateMac(const QByteArray &message, const quint128 &csrk) const; ++ quint64 calculateMac(const QByteArray &message, const quint128_s &csrk) const; + + // Convenience function. +- bool verify(const QByteArray &message, const quint128 &csrk, quint64 expectedMac) const; ++ bool verify(const QByteArray &message, const quint128_s &csrk, quint64 expectedMac) const; + + private: + int m_baseSocket = -1; +diff --git a/src/bluetooth/qbluetoothdevicediscoveryagent_winrt.cpp b/src/bluetooth/qbluetoothdevicediscoveryagent_winrt.cpp +index a5dd6ab..8dfde47 100644 +--- a/src/bluetooth/qbluetoothdevicediscoveryagent_winrt.cpp ++++ b/src/bluetooth/qbluetoothdevicediscoveryagent_winrt.cpp +@@ -38,10 +38,10 @@ QT_IMPL_METATYPE_EXTERN(ServiceData) + + Q_DECLARE_LOGGING_CATEGORY(QT_BT_WINDOWS) + +-// Endianness conversion for quint128 doesn't exist in qtendian.h +-inline quint128 qbswap(const quint128 src) ++// Endianness conversion for quint128_s doesn't exist in qtendian.h ++inline quint128_s qbswap(const quint128_s src) + { +- quint128 dst; ++ quint128_s dst; + for (int i = 0; i < 16; i++) + dst.data[i] = src.data[15 - i]; + return dst; +@@ -89,8 +89,8 @@ static ServiceData extractServiceData(const BluetoothLEAdvertisement &ad) + bufferData + 4); + } else if (dataType == 0x21) { + Q_ASSERT(bufferData.size() >= 16); +- ret.insert(QBluetoothUuid(qToBigEndian( +- qFromLittleEndian(bufferData.constData()))), ++ ret.insert(QBluetoothUuid(qToBigEndian( ++ qFromLittleEndian(bufferData.constData()))), + bufferData + 16); + } + } +diff --git a/src/bluetooth/qbluetoothsocket_android.cpp b/src/bluetooth/qbluetoothsocket_android.cpp +index b48dd51..ca14f53 100644 +--- a/src/bluetooth/qbluetoothsocket_android.cpp ++++ b/src/bluetooth/qbluetoothsocket_android.cpp +@@ -781,8 +781,8 @@ QBluetoothUuid QBluetoothSocketPrivateAndroid::reverseUuid(const QBluetoothUuid + if (isBaseUuid) + return serviceUuid; + +- const quint128 original = serviceUuid.toUInt128(); +- quint128 reversed; ++ const quint128_s original = serviceUuid.toUInt128(); ++ quint128_s reversed; + for (int i = 0; i < 16; i++) + reversed.data[15-i] = original.data[i]; + return QBluetoothUuid{reversed}; +diff --git a/src/bluetooth/qbluetoothuuid.cpp b/src/bluetooth/qbluetoothuuid.cpp +index ebcde92..d55065d 100644 +--- a/src/bluetooth/qbluetoothuuid.cpp ++++ b/src/bluetooth/qbluetoothuuid.cpp +@@ -529,7 +529,7 @@ Q_CONSTRUCTOR_FUNCTION(registerQBluetoothUuid) + + Note that \a uuid must be in big endian order. + */ +-QBluetoothUuid::QBluetoothUuid(quint128 uuid) ++QBluetoothUuid::QBluetoothUuid(quint128_s uuid) + { + QT_WARNING_PUSH + QT_WARNING_DISABLE_GCC("-Wstrict-aliasing") +@@ -630,9 +630,9 @@ quint32 QBluetoothUuid::toUInt32(bool *ok) const + /*! + Returns the 128 bit representation of this UUID. + */ +-quint128 QBluetoothUuid::toUInt128() const ++quint128_s QBluetoothUuid::toUInt128() const + { +- quint128 uuid; ++ quint128_s uuid; + + quint32 tmp32 = qToBigEndian(data1); + memcpy(&uuid.data[0], &tmp32, 4); +diff --git a/src/bluetooth/qbluetoothuuid.h b/src/bluetooth/qbluetoothuuid.h +index c581820..c81786c 100644 +--- a/src/bluetooth/qbluetoothuuid.h ++++ b/src/bluetooth/qbluetoothuuid.h +@@ -14,7 +14,7 @@ + + QT_BEGIN_NAMESPACE + +-struct quint128 ++struct quint128_s + { + quint8 data[16]; + }; +@@ -345,7 +345,7 @@ public: + explicit constexpr QBluetoothUuid(quint32 uuid) noexcept + : QUuid(uuid, 0x0, 0x1000, 0x80, 0x00, 0x00, 0x80, 0x5f, 0x9b, 0x34, 0xfb) {}; + +- explicit QBluetoothUuid(quint128 uuid); ++ explicit QBluetoothUuid(quint128_s uuid); + explicit QBluetoothUuid(const QString &uuid); + QBluetoothUuid(const QBluetoothUuid &uuid) = default; + QBluetoothUuid(const QUuid &uuid); +@@ -369,7 +369,7 @@ public: + + quint16 toUInt16(bool *ok = nullptr) const; + quint32 toUInt32(bool *ok = nullptr) const; +- quint128 toUInt128() const; ++ quint128_s toUInt128() const; + + static QString serviceClassToString(ServiceClassUuid uuid); + static QString protocolToString(ProtocolUuid uuid); +diff --git a/src/bluetooth/qleadvertiser_bluez.cpp b/src/bluetooth/qleadvertiser_bluez.cpp +index 8d67df7..afcaa43 100644 +--- a/src/bluetooth/qleadvertiser_bluez.cpp ++++ b/src/bluetooth/qleadvertiser_bluez.cpp +@@ -210,7 +210,7 @@ template<> quint8 servicesType(bool dataComplete) + { + return dataComplete ? 0x5 : 0x4; + } +-template<> quint8 servicesType(bool dataComplete) ++template<> quint8 servicesType(bool dataComplete) + { + return dataComplete ? 0x7 : 0x6; + } +@@ -245,7 +245,7 @@ void QLeAdvertiserBluez::setServicesData(const QLowEnergyAdvertisingData &src, A + { + QList services16; + QList services32; +- QList services128; ++ QList services128; + const QList services = src.services(); + for (const QBluetoothUuid &service : services) { + bool ok; +@@ -262,8 +262,8 @@ void QLeAdvertiserBluez::setServicesData(const QLowEnergyAdvertisingData &src, A + + // QBluetoothUuid::toUInt128() is always Big-Endian + // convert it to host order +- quint128 hostOrder; +- quint128 qtUuidOrder = service.toUInt128(); ++ quint128_s hostOrder; ++ quint128_s qtUuidOrder = service.toUInt128(); + ntoh128(&qtUuidOrder, &hostOrder); + services128 << hostOrder; + } +diff --git a/src/bluetooth/qlowenergycontroller_bluez.cpp b/src/bluetooth/qlowenergycontroller_bluez.cpp +index a81ae59..1b6f579 100644 +--- a/src/bluetooth/qlowenergycontroller_bluez.cpp ++++ b/src/bluetooth/qlowenergycontroller_bluez.cpp +@@ -64,9 +64,9 @@ using namespace QBluetooth; + + const int maxPrepareQueueSize = 1024; + +-static inline QBluetoothUuid convert_uuid128(const quint128 *p) ++static inline QBluetoothUuid convert_uuid128(const quint128_s *p) + { +- quint128 dst_hostOrder, dst_bigEndian; ++ quint128_s dst_hostOrder, dst_bigEndian; + + // Bluetooth LE data comes as little endian + // uuids are constructed using high endian +@@ -74,8 +74,8 @@ static inline QBluetoothUuid convert_uuid128(const quint128 *p) + hton128(&dst_hostOrder, &dst_bigEndian); + + // convert to Qt's own data type +- quint128 qtdst; +- memcpy(&qtdst, &dst_bigEndian, sizeof(quint128)); ++ quint128_s qtdst; ++ memcpy(&qtdst, &dst_bigEndian, sizeof(quint128_s)); + + return QBluetoothUuid(qtdst); + } +@@ -160,8 +160,8 @@ template<> void putDataAndIncrement(const QBluetoothUuid &uuid, char *&dst) + if (uuidSize == 2) { + putBtData(uuid.toUInt16(), dst); + } else { +- quint128 hostOrder; +- quint128 qtUuidOrder = uuid.toUInt128(); ++ quint128_s hostOrder; ++ quint128_s qtUuidOrder = uuid.toUInt128(); + ntoh128(&qtUuidOrder, &hostOrder); + putBtData(hostOrder, dst); + } +@@ -211,7 +211,7 @@ void QLowEnergyControllerPrivateBluez::init() + } + ); + connect(hciManager.get(), &HciManager::signatureResolvingKeyReceived, +- [this](quint16 handle, bool remoteKey, const quint128 &csrk) { ++ [this](quint16 handle, bool remoteKey, const quint128_s &csrk) { + if (handle != connectionHandle) + return; + if ((remoteKey && role == QLowEnergyController::CentralRole) +@@ -928,7 +928,7 @@ QLowEnergyHandle parseReadByTypeCharDiscovery( + if (elementLength == 7) // 16 bit uuid + charData->uuid = QBluetoothUuid(bt_get_le16(&data[5])); + else +- charData->uuid = convert_uuid128((quint128 *)&data[5]); ++ charData->uuid = convert_uuid128((quint128_s *)&data[5]); + + qCDebug(QT_BT_BLUEZ) << "Found handle:" << Qt::hex << attributeHandle + << "properties:" << charData->properties +@@ -956,7 +956,7 @@ QLowEnergyHandle parseReadByTypeIncludeDiscovery( + if (elementLength == 8) //16 bit uuid + foundServices->append(QBluetoothUuid(bt_get_le16(&data[6]))); + else +- foundServices->append(convert_uuid128((quint128 *) &data[6])); ++ foundServices->append(convert_uuid128((quint128_s *) &data[6])); + + qCDebug(QT_BT_BLUEZ) << "Found included service: " << Qt::hex + << attributeHandle << "uuid:" << *foundServices; +@@ -1028,7 +1028,7 @@ void QLowEnergyControllerPrivateBluez::processReply( + if (elementLength == 6) //16 bit uuid + uuid = QBluetoothUuid(bt_get_le16(&data[offset+4])); + else if (elementLength == 20) //128 bit uuid +- uuid = convert_uuid128((quint128 *)&data[offset+4]); ++ uuid = convert_uuid128((quint128_s *)&data[offset+4]); + //else -> do nothing + + offset += elementLength; +@@ -1324,7 +1324,7 @@ void QLowEnergyControllerPrivateBluez::processReply( + if (format == 0x01) + uuid = QBluetoothUuid(bt_get_le16(&data[offset+2])); + else if (format == 0x02) +- uuid = convert_uuid128((quint128 *)&data[offset+2]); ++ uuid = convert_uuid128((quint128_s *)&data[offset+2]); + + offset += elementLength; + +@@ -2289,7 +2289,7 @@ void QLowEnergyControllerPrivateBluez::handleReadByTypeRequest(const QByteArray + if (is16BitUuid) { + type = QBluetoothUuid(bt_get_le16(typeStart)); + } else if (is128BitUuid) { +- type = QBluetoothUuid(convert_uuid128(reinterpret_cast(typeStart))); ++ type = QBluetoothUuid(convert_uuid128(reinterpret_cast(typeStart))); + } else { + qCWarning(QT_BT_BLUEZ) << "read by type request has invalid packet size" << packet.size(); + sendErrorResponse(static_cast(packet.at(0)), 0, +@@ -2457,7 +2457,7 @@ void QLowEnergyControllerPrivateBluez::handleReadByGroupTypeRequest(const QByteA + if (is16BitUuid) { + type = QBluetoothUuid(bt_get_le16(typeStart)); + } else if (is128BitUuid) { +- type = QBluetoothUuid(convert_uuid128(reinterpret_cast(typeStart))); ++ type = QBluetoothUuid(convert_uuid128(reinterpret_cast(typeStart))); + } else { + qCWarning(QT_BT_BLUEZ) << "read by group type request has invalid packet size" + << packet.size(); +@@ -3175,14 +3175,14 @@ void QLowEnergyControllerPrivateBluez::loadSigningDataIfNecessary(SigningKeyType + return; + } + const QByteArray keyData = QByteArray::fromHex(keyString); +- if (keyData.size() != qsizetype(sizeof(quint128))) { ++ if (keyData.size() != qsizetype(sizeof(quint128_s))) { + qCWarning(QT_BT_BLUEZ) << "Signing key in settings file has invalid size" + << keyString.size(); + return; + } + qCDebug(QT_BT_BLUEZ) << "CSRK of peer device is" << keyString; + const quint32 counter = settings.value(QLatin1String("Counter"), 0).toUInt(); +- quint128 csrk; ++ quint128_s csrk; + using namespace std; + memcpy(csrk.data, keyData.constData(), keyData.size()); + signingData.insert(remoteDevice.toUInt64(), SigningData(csrk, counter - 1)); +@@ -3228,8 +3228,8 @@ static QByteArray uuidToByteArray(const QBluetoothUuid &uuid) + putBtData(uuid.toUInt16(), ba.data()); + } else { + ba.resize(16); +- quint128 hostOrder; +- quint128 qtUuidOrder = uuid.toUInt128(); ++ quint128_s hostOrder; ++ quint128_s qtUuidOrder = uuid.toUInt128(); + ntoh128(&qtUuidOrder, &hostOrder); + putBtData(hostOrder, ba.data()); + } +@@ -3466,7 +3466,7 @@ QLowEnergyControllerPrivateBluez::checkReadPermissions(QList &attribu + return QBluezConst::AttError::ATT_ERROR_NO_ERROR; + } + +-bool QLowEnergyControllerPrivateBluez::verifyMac(const QByteArray &message, const quint128 &csrk, ++bool QLowEnergyControllerPrivateBluez::verifyMac(const QByteArray &message, const quint128_s &csrk, + quint32 signCounter, quint64 expectedMac) + { + if (!cmacCalculator) +diff --git a/src/bluetooth/qlowenergycontroller_bluez_p.h b/src/bluetooth/qlowenergycontroller_bluez_p.h +index 320349c..3334460 100644 +--- a/src/bluetooth/qlowenergycontroller_bluez_p.h ++++ b/src/bluetooth/qlowenergycontroller_bluez_p.h +@@ -151,10 +151,10 @@ private: + + struct SigningData { + SigningData() = default; +- SigningData(const quint128 &csrk, quint32 signCounter = quint32(-1)) ++ SigningData(const quint128_s &csrk, quint32 signCounter = quint32(-1)) + : key(csrk), counter(signCounter) {} + +- quint128 key; ++ quint128_s key; + quint32 counter = quint32(-1); + }; + QHash signingData; +@@ -280,7 +280,7 @@ private: + QBluezConst::AttError checkReadPermissions(const Attribute &attr); + QBluezConst::AttError checkReadPermissions(QList &attributes); + +- bool verifyMac(const QByteArray &message, const quint128 &csrk, quint32 signCounter, ++ bool verifyMac(const QByteArray &message, const quint128_s &csrk, quint32 signCounter, + quint64 expectedMac); + + void updateLocalAttributeValue( +diff --git a/tests/auto/bic/data/QtBluetooth.5.10.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.10.0.linux-gcc-amd64.txt +index d8b4312..6942026 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.10.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.10.0.linux-gcc-amd64.txt +@@ -4615,10 +4615,10 @@ QBluetoothLocalDevice (0x0x7f0bf572eaf8) 0 + QObject (0x0x7f0bf57df960) 0 + primary-for QBluetoothLocalDevice (0x0x7f0bf572eaf8) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7f0bf57dfd80) 0 ++quint128_s (0x0x7f0bf57dfd80) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.11.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.11.0.linux-gcc-amd64.txt +index ed8c8d2..e3145cf 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.11.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.11.0.linux-gcc-amd64.txt +@@ -4615,10 +4615,10 @@ QBluetoothLocalDevice (0x0x7f4411c70820) 0 + QObject (0x0x7f4411cc2ae0) 0 + primary-for QBluetoothLocalDevice (0x0x7f4411c70820) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7f4411cc2f00) 0 ++quint128_s (0x0x7f4411cc2f00) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.12.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.12.0.linux-gcc-amd64.txt +index 98f1c8c..1d87659 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.12.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.12.0.linux-gcc-amd64.txt +@@ -4680,10 +4680,10 @@ QBluetoothLocalDevice (0x0x7f889c8871a0) 0 + QObject (0x0x7f889c7feb40) 0 + primary-for QBluetoothLocalDevice (0x0x7f889c8871a0) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7f889c7fef60) 0 ++quint128_s (0x0x7f889c7fef60) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.13.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.13.0.linux-gcc-amd64.txt +index ef61ed6..cc14f31 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.13.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.13.0.linux-gcc-amd64.txt +@@ -4890,10 +4890,10 @@ QBluetoothLocalDevice (0x0x7f5eb91038f0) 0 + QObject (0x0x7f5eb8e2b900) 0 + primary-for QBluetoothLocalDevice (0x0x7f5eb91038f0) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7f5eb8e73360) 0 ++quint128_s (0x0x7f5eb8e73360) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.14.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.14.0.linux-gcc-amd64.txt +index d69a6b7..85eed85 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.14.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.14.0.linux-gcc-amd64.txt +@@ -4929,10 +4929,10 @@ QBluetoothLocalDevice (0x0x7faa66f806e8) 0 + QObject (0x0x7faa670b7660) 0 + primary-for QBluetoothLocalDevice (0x0x7faa66f806e8) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7faa670fb0c0) 0 ++quint128_s (0x0x7faa670fb0c0) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.2.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.2.0.linux-gcc-amd64.txt +index be0a7dc..6f17f6f 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.2.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.2.0.linux-gcc-amd64.txt +@@ -3665,10 +3665,10 @@ QBluetoothLocalDevice (0x0x7fd1f5086ea0) 0 + QObject (0x0x7fd1f4e3f900) 0 + primary-for QBluetoothLocalDevice (0x0x7fd1f5086ea0) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7fd1f4e3fd20) 0 ++quint128_s (0x0x7fd1f4e3fd20) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.3.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.3.0.linux-gcc-amd64.txt +index 66fbef0..32f8581 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.3.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.3.0.linux-gcc-amd64.txt +@@ -3676,10 +3676,10 @@ QBluetoothLocalDevice (0x0x7ffec5a14a90) 0 + QObject (0x0x7ffec56bcc00) 0 + primary-for QBluetoothLocalDevice (0x0x7ffec5a14a90) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7ffec5800060) 0 ++quint128_s (0x0x7ffec5800060) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.4.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.4.0.linux-gcc-amd64.txt +index dfdde04..3aa081a 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.4.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.4.0.linux-gcc-amd64.txt +@@ -3708,10 +3708,10 @@ QBluetoothLocalDevice (0x0x7fa3d7dbe618) 0 + QObject (0x0x7fa3d7dedf00) 0 + primary-for QBluetoothLocalDevice (0x0x7fa3d7dbe618) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7fa3d7adc360) 0 ++quint128_s (0x0x7fa3d7adc360) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.6.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.6.0.linux-gcc-amd64.txt +index ddb2eed..f29b3d3 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.6.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.6.0.linux-gcc-amd64.txt +@@ -3972,10 +3972,10 @@ QBluetoothLocalDevice (0x0x7efe9883c270) 0 + QObject (0x0x7efe988e30c0) 0 + primary-for QBluetoothLocalDevice (0x0x7efe9883c270) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7efe988e34e0) 0 ++quint128_s (0x0x7efe988e34e0) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.7.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.7.0.linux-gcc-amd64.txt +index ab85793..8520f33 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.7.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.7.0.linux-gcc-amd64.txt +@@ -4254,10 +4254,10 @@ QBluetoothLocalDevice (0x0x7fcf5e4e0c30) 0 + QObject (0x0x7fcf5e2c40c0) 0 + primary-for QBluetoothLocalDevice (0x0x7fcf5e4e0c30) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7fcf5e2c44e0) 0 ++quint128_s (0x0x7fcf5e2c44e0) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.8.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.8.0.linux-gcc-amd64.txt +index 34e7789..78121ad 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.8.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.8.0.linux-gcc-amd64.txt +@@ -4279,10 +4279,10 @@ QBluetoothLocalDevice (0x0x7fc1f8031958) 0 + QObject (0x0x7fc1f7d983c0) 0 + primary-for QBluetoothLocalDevice (0x0x7fc1f8031958) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7fc1f7d987e0) 0 ++quint128_s (0x0x7fc1f7d987e0) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/bic/data/QtBluetooth.5.9.0.linux-gcc-amd64.txt b/tests/auto/bic/data/QtBluetooth.5.9.0.linux-gcc-amd64.txt +index a5d85e0..85f9ab0 100644 +--- a/tests/auto/bic/data/QtBluetooth.5.9.0.linux-gcc-amd64.txt ++++ b/tests/auto/bic/data/QtBluetooth.5.9.0.linux-gcc-amd64.txt +@@ -4294,10 +4294,10 @@ QBluetoothLocalDevice (0x0x7f66340d64e0) 0 + QObject (0x0x7f6633e3b1e0) 0 + primary-for QBluetoothLocalDevice (0x0x7f66340d64e0) + +-Class quint128 ++Class quint128_s + size=16 align=1 + base size=16 base align=1 +-quint128 (0x0x7f6633e3b600) 0 ++quint128_s (0x0x7f6633e3b600) 0 + + Class QBluetoothUuid + size=16 align=4 +diff --git a/tests/auto/qbluetoothuuid/tst_qbluetoothuuid.cpp b/tests/auto/qbluetoothuuid/tst_qbluetoothuuid.cpp +index 604fca4..1278cd5 100644 +--- a/tests/auto/qbluetoothuuid/tst_qbluetoothuuid.cpp ++++ b/tests/auto/qbluetoothuuid/tst_qbluetoothuuid.cpp +@@ -15,7 +15,7 @@ + + QT_USE_NAMESPACE + +-Q_DECLARE_METATYPE(quint128) ++Q_DECLARE_METATYPE(quint128_s) + + class tst_QBluetoothUuid : public QObject + { +@@ -34,12 +34,12 @@ private slots: + void tst_conversion(); + void tst_comparison_data(); + void tst_comparison(); +- void tst_quint128ToUuid(); ++ void tst_quint128_sToUuid(); + }; + + tst_QBluetoothUuid::tst_QBluetoothUuid() + { +- qRegisterMetaType(); ++ qRegisterMetaType(); + } + + tst_QBluetoothUuid::~tst_QBluetoothUuid() +@@ -155,11 +155,11 @@ void tst_QBluetoothUuid::tst_conversion_data() + QTest::addColumn("constructUuid32"); + QTest::addColumn("uuid32"); + QTest::addColumn("constructUuid128"); +- QTest::addColumn("uuid128"); ++ QTest::addColumn("uuid128"); + QTest::addColumn("uuidS"); + + static const auto uuid128_32 = [](quint8 a, quint8 b, quint8 c, quint8 d) { +- quint128 x = { ++ quint128_s x = { + { + a, b, c, d, + 0x00, 0x00, +@@ -201,7 +201,7 @@ void tst_QBluetoothUuid::tst_conversion_data() + newRow32("0xffffffff", 0xff, 0xff, 0xff, 0xff, "FFFFFFFF" BASEUUID); + + { +- quint128 uuid128 = { ++ quint128_s uuid128 = { + { + 0x00, 0x11, 0x22, 0x33, + 0x44, 0x55, +@@ -224,7 +224,7 @@ void tst_QBluetoothUuid::tst_conversion() + QFETCH(bool, constructUuid32); + QFETCH(quint32, uuid32); + QFETCH(bool, constructUuid128); +- QFETCH(quint128, uuid128); ++ QFETCH(quint128_s, uuid128); + QFETCH(QString, uuidS); + + int minimumSize = 16; +@@ -313,7 +313,7 @@ void tst_QBluetoothUuid::tst_comparison() + QFETCH(bool, constructUuid32); + QFETCH(quint32, uuid32); + QFETCH(bool, constructUuid128); +- QFETCH(quint128, uuid128); ++ QFETCH(quint128_s, uuid128); + + QVERIFY(QBluetoothUuid() == QBluetoothUuid()); + +@@ -351,10 +351,10 @@ void tst_QBluetoothUuid::tst_comparison() + } + } + +-void tst_QBluetoothUuid::tst_quint128ToUuid() ++void tst_QBluetoothUuid::tst_quint128_sToUuid() + { + QBluetoothUuid temp(QString("{67C8770B-44F1-410A-AB9A-F9B5446F13EE}")); +- quint128 array = temp.toUInt128(); ++ quint128_s array = temp.toUInt128(); + QBluetoothUuid u(array); + QVERIFY(temp == u); + +diff --git a/tests/auto/qlowenergycontroller-gattserver/test/tst_qlowenergycontroller-gattserver.cpp b/tests/auto/qlowenergycontroller-gattserver/test/tst_qlowenergycontroller-gattserver.cpp +index 973785b..9b64aed 100644 +--- a/tests/auto/qlowenergycontroller-gattserver/test/tst_qlowenergycontroller-gattserver.cpp ++++ b/tests/auto/qlowenergycontroller-gattserver/test/tst_qlowenergycontroller-gattserver.cpp +@@ -53,7 +53,7 @@ private: + QScopedPointer m_leController; + + #if defined(CHECK_CMAC_SUPPORT) +- bool checkCmacSupport(const quint128& csrkMsb); ++ bool checkCmacSupport(const quint128_s& csrkMsb); + #endif + }; + +@@ -142,7 +142,7 @@ void TestQLowEnergyControllerGattServer::cmacVerifier() + { + #if defined(CONFIG_LINUX_CRYPTO_API) && defined(QT_BUILD_INTERNAL) && defined(CONFIG_BLUEZ_LE) + // Test data comes from spec v4.2, Vol 3, Part H, Appendix D.1 +- const quint128 csrk = { ++ const quint128_s csrk = { + { 0x3c, 0x4f, 0xcf, 0x09, 0x88, 0x15, 0xf7, 0xab, + 0xa6, 0xd2, 0xae, 0x28, 0x16, 0x15, 0x7e, 0x2b } + }; +@@ -168,11 +168,11 @@ void TestQLowEnergyControllerGattServer::cmacVerifier() + #include + #include + +-bool TestQLowEnergyControllerGattServer::checkCmacSupport(const quint128& csrk) ++bool TestQLowEnergyControllerGattServer::checkCmacSupport(const quint128_s& csrk) + { + bool retval = false; + #if defined(CONFIG_LINUX_CRYPTO_API) && defined(QT_BUILD_INTERNAL) && defined(CONFIG_BLUEZ_LE) +- quint128 csrkMsb; ++ quint128_s csrkMsb; + std::reverse_copy(std::begin(csrk.data), std::end(csrk.data), std::begin(csrkMsb.data)); + + int testSocket = socket(AF_ALG, SOCK_SEQPACKET, 0); +-- +2.45.4 + diff --git a/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.signatures.json b/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.signatures.json new file mode 100644 index 00000000000..b86c6f85032 --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "qtconnectivity-everywhere-src-6.5.7.tar.xz": "d391a5a6c2e6156818e96ccaf01dc14ff4ff3e8b594beebfd7f49b5c3496656a" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.spec b/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.spec new file mode 100644 index 00000000000..ef4035c8dcd --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtconnectivity/qt6-qtconnectivity.spec @@ -0,0 +1,189 @@ +%global qt_module qtconnectivity + +#global unstable 1 +%if 0%{?unstable} +%global prerelease rc2 +%endif + +%global examples 1 + +Summary: Qt6 - Connectivity components +Name: qt6-%{qt_module} +Version: 6.5.7 +Vendor: Microsoft Corporation +Distribution: Azure Linux + +%global majmin %(echo %{version} | cut -d. -f1-2) +%global qt_version %(echo %{version} | cut -d~ -f1) + +Release: 1%{?dist} + +# See LICENSE.GPL3, respectively, for exception details +License: LGPL-3.0-only OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +Url: http://qt.io + +%if 0%{?unstable} +Source0: https://download.qt.io/development_releases/qt/%{majmin}/%{qt_version}/submodules/%{qt_module}-everywhere-src-%{qt_version}-%{prerelease}.tar.xz +%else +Source0: https://download.qt.io/official_releases/qt/%{majmin}/%{version}/src/submodules/%{qt_module}-everywhere-opensource-src-%{version}.tar.xz#/%{qt_module}-everywhere-src-%{version}.tar.xz +%endif + +Patch0: build_fix_quint128.patch +Patch1: CVE-2025-23050-qtconnectivity-6.5.diff + +# filter qml provides +%global __provides_exclude_from ^%{_qt_archdatadir}/qml/.*\\.so$ + +BuildRequires: cmake +BuildRequires: gcc-c++ +BuildRequires: ninja-build +BuildRequires: qt-rpm-macros +BuildRequires: qtbase-devel >= %{version} +BuildRequires: qtbase-private-devel >= %{version} +%{?_qt6:Requires: %{_qt6}%{?_isa} = %{_qt6_version}} +BuildRequires: qtdeclarative-devel >= %{version} +BuildRequires: pkgconfig(bluez) +BuildRequires: pkgconfig(xkbcommon) >= 0.4.1 +BuildRequires: openssl-devel + +%description +%{summary}. + +%package devel +Summary: Development files for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +Requires: qtbase-devel%{?_isa} +%description devel +%{summary}. + +%if 0%{?examples} +%package examples +Summary: Programming examples for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +%description examples +%{summary}. +%endif + +%prep +%autosetup -n %{qt_module}-everywhere-src-%{qt_version}%{?unstable:-%{prerelease}} -p1 + + +%build +%cmake_qt -DQT_BUILD_EXAMPLES:BOOL=%{?examples:ON}%{!?examples:OFF} +%cmake_build + +%install +%cmake_install +## .prl/.la file love +# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs +pushd %{buildroot}%{_qt_libdir} +for prl_file in libQt6*.prl ; do + sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} + if [ -f "$(basename ${prl_file} .prl).so" ]; then + rm -fv "$(basename ${prl_file} .prl).la" + sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} + fi +done +popd + + +%ldconfig_scriptlets +%files +%license LICENSES/GPL* LICENSES/LGPL* +%{_qt_libexecdir}/sdpscanner +%{_qt_libdir}/libQt6Bluetooth.so.6* +%{_qt_libdir}/libQt6Nfc.so.6* + +%files devel +%{_qt_headerdir}/QtBluetooth/ +%{_qt_libdir}/libQt6Bluetooth.so +%{_qt_libdir}/libQt6Bluetooth.prl +%{_qt_headerdir}/QtNfc/ +%{_qt_libdir}/libQt6Nfc.so +%{_qt_libdir}/libQt6Nfc.prl +%dir %{_qt_libdir}/cmake/Qt6Bluetooth/ +%dir %{_qt_libdir}/cmake/Qt6Nfc/ +%{_qt_libdir}/cmake/Qt6/FindBlueZ.cmake +%{_qt_libdir}/cmake/Qt6/FindPCSCLITE.cmake +%{_qt_libdir}/cmake/Qt6BuildInternals/StandaloneTests/*.cmake +%{_qt_libdir}/cmake/Qt6Bluetooth/*.cmake +%{_qt_libdir}/cmake/Qt6Nfc/*.cmake +%{_qt_archdatadir}/mkspecs/modules/qt_lib_bluetooth*.pri +%{_qt_archdatadir}/mkspecs/modules/qt_lib_nfc*.pri +/usr/modules/*.json +%{_qt_libdir}/qt6/metatypes/qt6*_metatypes.json +%{_qt_libdir}/pkgconfig/*.pc + +%if 0%{?examples} +%files examples +%{_qt_examplesdir}/ +%endif + +%changelog +* Tue Dec 02 2025 Sandeep Karambelkar - 6.5.7-1 +- Initial Azure Linux import from Fedora 37 (license: MIT) +- Upgrade to 6.5.7 +- License Verified + +* Wed Jul 12 2023 Jan Grulich - 6.5.1-2 +- Rebuild for qtbase private API version change + +* Mon May 22 2023 Jan Grulich - 6.5.1-1 +- 6.5.1 + +* Tue Apr 04 2023 Jan Grulich - 6.5.0-1 +- 6.5.0 + +* Thu Mar 23 2023 Jan Grulich - 6.4.3-1 +- 6.4.3 + +* Tue Jan 31 2023 Jan Grulich - 6.4.2-3 +- migrated to SPDX license + +* Fri Jan 20 2023 Fedora Release Engineering - 6.4.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild + +* Mon Jan 16 2023 Jan Grulich - 6.4.2-1 +- 6.4.2 + +* Wed Nov 23 2022 Jan Grulich - 6.4.1-1 +- 6.4.1 + +* Mon Oct 31 2022 Jan Grulich - 6.4.0-1 +- 6.4.0 + +* Sat Jul 23 2022 Fedora Release Engineering - 6.3.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild + +* Wed Jul 13 2022 Jan Grulich - 6.3.1-1 +- 6.3.1 + +* Wed May 25 2022 Jan Grulich - 6.3.0-2 +- Enable examples + +* Wed Apr 13 2022 Jan Grulich - 6.3.0-1 +- 6.3.0 + +* Fri Feb 25 2022 Jan Grulich - 6.2.3-2 +- Enable s390x builds + +* Mon Jan 31 2022 Jan Grulich - 6.2.3-1 +- 6.2.3 + +* Fri Jan 21 2022 Fedora Release Engineering - 6.2.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild + +* Tue Dec 14 2021 Jan Grulich - 6.2.2-1 +- 6.2.2 + +* Fri Oct 29 2021 Jan Grulich - 6.2.1-1 +- 6.2.1 + +* Thu Sep 30 2021 Jan Grulich - 6.2.0-1 +- 6.2.0 + +* Mon Sep 27 2021 Jan Grulich - 6.2.0~rc2-1 +- 6.2.0 - rc2 + +* Thu Sep 16 2021 Jan Grulich - 6.2.0~rc-1 +- 6.2.0 - rc diff --git a/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.signatures.json b/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.signatures.json new file mode 100644 index 00000000000..28610fafeb2 --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "qtsensors-everywhere-src-6.5.7.tar.xz": "14e91234c7294bd38eb10781d4aae2a73cf7236b2a4074bdfccc7dffe8e63c9a" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.spec b/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.spec new file mode 100644 index 00000000000..3f5377544aa --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtsensors/qt6-qtsensors.spec @@ -0,0 +1,176 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux + +%global qt_module qtsensors + +Summary: Qt6 - Sensors component +Name: qt6-%{qt_module} +Version: 6.5.7 +Release: 1%{?dist} +%global majmin %(echo %{version} | cut -d. -f1-2) +%global qt_version %(echo %{version} | cut -d~ -f1) + +# See LGPL_EXCEPTIONS.txt, LICENSE.GPL3, respectively, for exception details +License: LGPL-3.0-only OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +Url: http://www.qt.io/ +Source0: https://download.qt.io/official_releases/qt/%{majmin}/%{version}/src/submodules/%{qt_module}-everywhere-opensource-src-%{version}.tar.xz#/%{qt_module}-everywhere-src-%{version}.tar.xz + +# filter qml/plugin provides +%global __provides_exclude_from ^(%{_qt_archdatadir}/qml/.*\\.so|%{_qt_plugindir}/.*\\.so)$ + +BuildRequires: cmake +BuildRequires: gcc-c++ +BuildRequires: ninja-build +BuildRequires: qt-rpm-macros +BuildRequires: qtbase-devel >= %{version} +BuildRequires: qtbase-private-devel +%{?_qt6:Requires: %{_qt6}%{?_isa} = %{_qt6_version}} +BuildRequires: qtdeclarative-devel >= %{version} +BuildRequires: qtsvg-devel >= %{version} + +BuildRequires: pkgconfig(xkbcommon) >= 0.5.0 +BuildRequires: openssl-devel + +# provides a plugin that can use iio-sensor-proxy +Recommends: iio-sensor-proxy + +%description +The Qt Sensors API provides access to sensor hardware via QML and C++ +interfaces. The Qt Sensors API also provides a motion gesture recognition +API for devices. + +%package devel +Summary: Development files for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +Requires: qtbase-devel%{?_isa} +%description devel +%{summary}. + +%if 0%{?examples} +%package examples +Summary: Programming examples for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +# BuildRequires: qt6-qtsensors-devel >= %{version} +%description examples +%{summary}. +%endif + +%prep +%autosetup -n %{qt_module}-everywhere-src-%{qt_version}%{?unstable:-%{prerelease}} -p1 + + +%build +%cmake_qt -DQT_BUILD_EXAMPLES:BOOL=%{?examples:ON}%{!?examples:OFF} + +%cmake_build + + +%install +%cmake_install + +## .prl/.la file love +# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs +pushd %{buildroot}%{_qt_libdir} +for prl_file in libQt6*.prl ; do + sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} + if [ -f "$(basename ${prl_file} .prl).so" ]; then + rm -fv "$(basename ${prl_file} .prl).la" + sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} + fi +done +popd + +%ldconfig_scriptlets + +%files +%license LICENSES/* +%{_qt_libdir}/libQt6Sensors.so.6* +%{_qt_plugindir}/sensors/ + +%files devel +%{_qt_headerdir}/QtSensors/ +%{_qt_libdir}/libQt6Sensors.so +%{_qt_libdir}/libQt6Sensors.prl +%{_qt_libdir}/cmake/Qt6/FindSensorfw.cmake +%{_qt_libdir}/cmake/Qt6BuildInternals/StandaloneTests/QtSensorsTestsConfig.cmake +%dir %{_qt_libdir}/cmake/Qt6Sensors/ +%{_qt_libdir}/cmake/Qt6Sensors/*.cmake +%{_qt_archdatadir}/mkspecs/modules/qt_lib_sensors*.pri +/usr/modules/*.json +%{_qt_libdir}/qt6/metatypes/qt6*_metatypes.json +%{_qt_libdir}/pkgconfig/*.pc + +%if 0%{?examples} +%files examples +%{_qt_examplesdir}/ +%endif + + +%changelog +* Tue Dec 02 2025 Sandeep Karambelkar - 6.5.7-1 +- Initial Azure Linux import from Fedora 37 (license: MIT) +- Upgrade to 6.5.7 +- License Verified + +* Wed Jul 12 2023 Jan Grulich - 6.5.1-2 +- Rebuild for qtbase private API version change + +* Mon May 22 2023 Jan Grulich - 6.5.1-1 +- 6.5.1 + +* Tue Apr 04 2023 Jan Grulich - 6.5.0-1 +- 6.5.0 + +* Thu Mar 23 2023 Jan Grulich - 6.4.3-1 +- 6.4.3 + +* Tue Jan 31 2023 Jan Grulich - 6.4.2-3 +- migrated to SPDX license + +* Fri Jan 20 2023 Fedora Release Engineering - 6.4.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild + +* Mon Jan 16 2023 Jan Grulich - 6.4.2-1 +- 6.4.2 + +* Wed Nov 23 2022 Jan Grulich - 6.4.1-1 +- 6.4.1 + +* Mon Oct 31 2022 Jan Grulich - 6.4.0-1 +- 6.4.0 + +* Sat Jul 23 2022 Fedora Release Engineering - 6.3.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild + +* Wed Jul 13 2022 Jan Grulich - 6.3.1-1 +- 6.3.1 + +* Wed May 25 2022 Jan Grulich - 6.3.0-2 +- Enable examples + +* Wed Apr 13 2022 Jan Grulich - 6.3.0-1 +- 6.3.0 + +* Fri Feb 25 2022 Jan Grulich - 6.2.3-2 +- Enable s390x builds + +* Mon Jan 31 2022 Jan Grulich - 6.2.3-1 +- 6.2.3 + +* Fri Jan 21 2022 Fedora Release Engineering - 6.2.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild + +* Tue Dec 14 2021 Jan Grulich - 6.2.2-1 +- 6.2.2 + +* Fri Oct 29 2021 Jan Grulich - 6.2.1-1 +- 6.2.1 + +* Thu Sep 30 2021 Jan Grulich - 6.2.0-1 +- 6.2.0 + +* Mon Sep 27 2021 Jan Grulich - 6.2.0~rc2-1 +- 6.2.0 - rc2 + +* Thu Sep 16 2021 Jan Grulich - 6.2.0~rc-1 +- 6.2.0 - rc diff --git a/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.signatures.json b/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.signatures.json new file mode 100644 index 00000000000..554a7b6dec7 --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "qtserialport-everywhere-src-6.5.7.tar.xz": "b09404629bef9443e543e771a8f8545b2b7d5c65bcb3525740c208154c2872cb" + } +} \ No newline at end of file diff --git a/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.spec b/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.spec new file mode 100644 index 00000000000..5cd6ab28dbe --- /dev/null +++ b/SPECS-EXTENDED/qt6-qtserialport/qt6-qtserialport.spec @@ -0,0 +1,167 @@ +Vendor: Microsoft Corporation +Distribution: Azure Linux + +%global qt_module qtserialport + +%global examples 1 + +Summary: Qt6 - SerialPort component +Name: qt6-%{qt_module} +Version: 6.5.7 +Release: 1%{?dist} + +# See LGPL_EXCEPTIONS.txt, LICENSE.GPL3, respectively, for exception details +License: LGPL-3.0-only OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +Url: http://www.qt.io +%global majmin %(echo %{version} | cut -d. -f1-2) +%global qt_version %(echo %{version} | cut -d~ -f1) + +Source0: https://download.qt.io/official_releases/qt/%{majmin}/%{version}/src/submodules/%{qt_module}-everywhere-opensource-src-%{version}.tar.xz#/%{qt_module}-everywhere-src-%{version}.tar.xz + +BuildRequires: cmake +BuildRequires: gcc-c++ +BuildRequires: ninja-build +BuildRequires: qt-rpm-macros +BuildRequires: qtbase-devel >= %{version} +BuildRequires: pkgconfig(libudev) + +BuildRequires: qtbase-private-devel +%{?_qt6:Requires: %{_qt6}%{?_isa} = %{_qt6_version}} + +%description +Qt Serial Port provides the basic functionality, which includes configuring, +I/O operations, getting and setting the control signals of the RS-232 pinouts. + +%package devel +Summary: Development files for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +Requires: qtbase-devel%{?_isa} +%description devel +%{summary}. + +%if 0%{?examples} +%package examples +Summary: Programming examples for %{name} +Requires: %{name}%{?_isa} = %{version}-%{release} +# BuildRequires: qt-qtserialport-devel >= %{version} +%description examples +%{summary}. +%endif + +%prep +%autosetup -n %{qt_module}-everywhere-src-%{qt_version}%{?unstable:-%{prerelease}} -p1 + + +%build +%cmake_qt -DQT_BUILD_EXAMPLES:BOOL=%{?examples:ON}%{!?examples:OFF} + +%cmake_build + + +%install +%cmake_install + +## .prl/.la file love +# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs +pushd %{buildroot}%{_qt_libdir} +for prl_file in libQt6*.prl ; do + sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file} + if [ -f "$(basename ${prl_file} .prl).so" ]; then + rm -fv "$(basename ${prl_file} .prl).la" + sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file} + fi +done +popd + + +%ldconfig_scriptlets + +%files +%license LICENSES/* +%{_qt_libdir}/libQt6SerialPort.so.6* + +%files devel +%{_qt_headerdir}/QtSerialPort/ +%{_qt_libdir}/libQt6SerialPort.so +%{_qt_libdir}/libQt6SerialPort.prl +%{_qt_libdir}/cmake/Qt6BuildInternals/StandaloneTests/QtSerialPortTestsConfig.cmake +%dir %{_qt_libdir}/cmake/Qt6SerialPort/ +%{_qt_libdir}/cmake/Qt6SerialPort/*.cmake +%{_qt_archdatadir}/mkspecs/modules/qt_lib_serialport*.pri +%{_qt_libdir}/qt6/metatypes/qt6*_metatypes.json +/usr/modules/*.json +%{_qt_libdir}/pkgconfig/*.pc + +%if 0%{?examples} +%files examples +%{_qt_examplesdir}/ +%endif + +%changelog +* Fri Dec 05 2025 Sandeep Karambelkar - 6.5.7-1 +- Initial Azure Linux import from Fedora 37 (license: MIT) +- Upgrade to 6.5.7 +- License Verified + +* Wed Jul 12 2023 Jan Grulich - 6.5.1-2 +- Rebuild for qtbase private API version change + +* Mon May 22 2023 Jan Grulich - 6.5.1-1 +- 6.5.1 + +* Tue Apr 04 2023 Jan Grulich - 6.5.0-1 +- 6.5.0 + +* Thu Mar 23 2023 Jan Grulich - 6.4.3-1 +- 6.4.3 + +* Tue Jan 31 2023 Jan Grulich - 6.4.2-3 +- migrated to SPDX license + +* Fri Jan 20 2023 Fedora Release Engineering - 6.4.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild + +* Mon Jan 16 2023 Jan Grulich - 6.4.2-1 +- 6.4.2 + +* Wed Nov 23 2022 Jan Grulich - 6.4.1-1 +- 6.4.1 + +* Mon Oct 31 2022 Jan Grulich - 6.4.0-1 +- 6.4.0 + +* Sat Jul 23 2022 Fedora Release Engineering - 6.3.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild + +* Wed Jul 13 2022 Jan Grulich - 6.3.1-1 +- 6.3.1 + +* Wed May 25 2022 Jan Grulich - 6.3.0-2 +- Enable examples + +* Wed Apr 13 2022 Jan Grulich - 6.3.0-1 +- 6.3.0 + +* Fri Feb 25 2022 Jan Grulich - 6.2.3-2 +- Enable s390x builds + +* Mon Jan 31 2022 Jan Grulich - 6.2.3-1 +- 6.2.3 + +* Fri Jan 21 2022 Fedora Release Engineering - 6.2.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild + +* Tue Dec 14 2021 Jan Grulich - 6.2.2-1 +- 6.2.2 + +* Fri Oct 29 2021 Jan Grulich - 6.2.1-1 +- 6.2.1 + +* Thu Sep 30 2021 Jan Grulich - 6.2.0-1 +- 6.2.0 + +* Mon Sep 27 2021 Jan Grulich - 6.2.0~rc2-1 +- 6.2.0 - rc2 + +* Thu Sep 16 2021 Jan Grulich - 6.2.0~rc-1 +- 6.2.0 - rc diff --git a/SPECS-EXTENDED/quagga/quagga.spec b/SPECS-EXTENDED/quagga/quagga.spec index 8e10ee69cc0..281dbb33153 100644 --- a/SPECS-EXTENDED/quagga/quagga.spec +++ b/SPECS-EXTENDED/quagga/quagga.spec @@ -12,11 +12,11 @@ Distribution: Azure Linux Name: quagga Version: 1.2.4 -Release: 15%{?dist} +Release: 16%{?dist} Summary: Routing daemon License: GPLv2+ URL: http://www.quagga.net -Source0: http://download.savannah.gnu.org/releases/quagga/%{name}-%{version}.tar.gz +Source0: https://github.com/%{name}/%{name}/releases/download/%{name}-%{version}/%{name}-%{version}.tar.gz Source1: quagga-filter-perl-requires.sh Source2: quagga-tmpfs.conf BuildRequires: perl-generators pkgconfig @@ -183,7 +183,8 @@ fi %systemd_preun ripngd.service %files -%doc AUTHORS COPYING +%license COPYING COPYING.LIB +%doc AUTHORS %doc zebra/zebra.conf.sample %doc isisd/isisd.conf.sample %doc ripd/ripd.conf.sample @@ -213,10 +214,10 @@ fi %{_unitdir}/*.service %files contrib -%doc AUTHORS COPYING %attr(0644,root,root) tools +%license COPYING COPYING.LIB +%doc AUTHORS %attr(0644,root,root) tools %files devel -%doc AUTHORS COPYING %dir %{_libdir}/quagga/ %{_libdir}/quagga/*.so %dir %{_includedir}/quagga @@ -225,6 +226,11 @@ fi %{_includedir}/quagga/ospfd/*.h %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 1.2.4-16 +- Bumping release to rebuild with new 'net-snmp' libs. +- Fixed source URL. +- License verified. + * Thu Jun 17 2021 Thomas Crain - 1.2.4-15 - Conditionalize building of tex-based documentation diff --git a/SPECS-EXTENDED/rdma-core/rdma-core.signatures.json b/SPECS-EXTENDED/rdma-core/rdma-core.signatures.json index 5ea5fabfee1..9f118bffa4a 100644 --- a/SPECS-EXTENDED/rdma-core/rdma-core.signatures.json +++ b/SPECS-EXTENDED/rdma-core/rdma-core.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "rdma-core-55.0.tar.gz": "6F8B97267807CDAE54845F542EE3D75DE80FDC24FE2632F5DB1573ECEF132D0F" + "rdma-core-59.0.tar.gz": "e2c169b13b318cdc3b5ff957cd8b6dcc8bdf96dd9c5d42324129c163bb057ce5" } } diff --git a/SPECS-EXTENDED/rdma-core/rdma-core.spec b/SPECS-EXTENDED/rdma-core/rdma-core.spec index d45848fc7ed..e7be8da57e3 100644 --- a/SPECS-EXTENDED/rdma-core/rdma-core.spec +++ b/SPECS-EXTENDED/rdma-core/rdma-core.spec @@ -1,6 +1,6 @@ Summary: RDMA core userspace libraries and daemons Name: rdma-core -Version: 55.0 +Version: 59.0 Release: 1%{?dist} URL: https://github.com/linux-rdma/rdma-core Vendor: Microsoft Corporation @@ -625,6 +625,9 @@ fi %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 59.0-1 +- Upgrade version to 59.0. + * Tue Jan 07 2025 Elaheh Dehghani - 55.0-1 - Upgrade to version 55.0 - Add rdma-core to PMC extended repo diff --git a/SPECS-EXTENDED/rhino/rhino-1.7.15.1.pom b/SPECS-EXTENDED/rhino/rhino-1.7.15.1.pom new file mode 100644 index 00000000000..908f0c46be7 --- /dev/null +++ b/SPECS-EXTENDED/rhino/rhino-1.7.15.1.pom @@ -0,0 +1,40 @@ + + + 4.0.0 + org.mozilla + rhino + 1.7.15.1 + rhino + + Rhino is an open-source implementation of JavaScript written entirely in Java. + It is typically embedded into Java applications to provide scripting to end users. + Full jar including tools, excluding the JSR-223 Script Engine wrapper. + + https://mozilla.github.io/rhino/ + + org.sonatype.oss + oss-parent + 7 + + + + Mozilla Public License, Version 2.0 + http://www.mozilla.org/MPL/2.0/index.txt + + + + scm:git:git@github.com:mozilla/rhino.git + scm:git:git@github.com:mozilla/rhino.git + git@github.com:mozilla/rhino.git + + + The Mozilla Foundation + http://www.mozilla.org + + + + Greg Brail + gbrail@users.noreply.github.com + + + diff --git a/SPECS-EXTENDED/rhino/rhino-addOrbitManifest.patch b/SPECS-EXTENDED/rhino/rhino-addOrbitManifest.patch deleted file mode 100644 index c3f2aad3ee0..00000000000 --- a/SPECS-EXTENDED/rhino/rhino-addOrbitManifest.patch +++ /dev/null @@ -1,33 +0,0 @@ -Index: src/manifest -=================================================================== ---- src/manifest.orig -+++ src/manifest -@@ -4,3 +4,28 @@ Implementation-Version: 1.7.7.1 - Implementation-Title: Mozilla Rhino 1.7.7.1 - Implementation-Vendor: Mozilla Foundation - Implementation-URL: http://www.mozilla.org/rhino -+Bundle-Vendor: %Bundle-Vendor.0 -+Bundle-Localization: plugin -+Bundle-RequiredExecutionEnvironment: J2SE-1.5 -+Bundle-Name: %Bundle-Name.0 -+Bundle-SymbolicName: org.mozilla.javascript -+Bundle-Version: 1.7.12 -+Export-Package: org.mozilla.classfile,org.mozilla.javascript,org.mozil -+ la.javascript.debug,org.mozilla.javascript.jdk13,org.mozilla.javascri -+ pt.jdk15,org.mozilla.javascript.optimizer,org.mozilla.javascript.rege -+ xp,org.mozilla.javascript.serialize,org.mozilla.javascript.tools,org. -+ mozilla.javascript.tools.debugger,org.mozilla.javascript.tools.debugg -+ er.treetable,org.mozilla.javascript.tools.idswitch,org.mozilla.javasc -+ ript.tools.jsc,org.mozilla.javascript.tools.shell,org.mozilla.javascr -+ ipt.xml,org.mozilla.javascript.xml.impl.xmlbeans,org.mozilla.javascri -+ pt.xmlimpl -+Bundle-ManifestVersion: 2 -+Import-Package: javax.swing;resolution:=optional,javax.swing.border;re -+ solution:=optional,javax.swing.event;resolution:=optional,javax.swing -+ .filechooser;resolution:=optional,javax.swing.table;resolution:=optio -+ nal,javax.swing.text;resolution:=optional,javax.swing.tree;resolution -+ :=optional,javax.xml.namespace;resolution:=optional,javax.xml.parsers -+ ;resolution:=optional,javax.xml.transform;resolution:=optional,javax. -+ xml.transform.dom;resolution:=optional,javax.xml.transform.stream;res -+ olution:=optional,org.w3c.dom;resolution:=optional,org.xml.sax;resolu -+ tion:=optional diff --git a/SPECS-EXTENDED/rhino/rhino-build.patch b/SPECS-EXTENDED/rhino/rhino-build.patch deleted file mode 100644 index b6d9e2c92dc..00000000000 --- a/SPECS-EXTENDED/rhino/rhino-build.patch +++ /dev/null @@ -1,175 +0,0 @@ ---- examples/Matrix.java -+++ examples/Matrix.java -@@ -218,7 +218,7 @@ public class Matrix implements Scriptabl - * Use the convenience method from Context that takes care of calling - * toString, etc. - */ -- public Object getDefaultValue(Class typeHint) { -+ public Object getDefaultValue(Class typeHint) { - return "[object Matrix]"; - } - ---- examples/PrimitiveWrapFactory.java -+++ examples/PrimitiveWrapFactory.java -@@ -25,7 +25,7 @@ import org.mozilla.javascript.*; - public class PrimitiveWrapFactory extends WrapFactory { - @Override - public Object wrap(Context cx, Scriptable scope, Object obj, -- Class staticType) -+ Class staticType) - { - if (obj instanceof String || obj instanceof Number || - obj instanceof Boolean) ---- src/org/mozilla/javascript/WrapFactory.java -+++ src/org/mozilla/javascript/WrapFactory.java -@@ -42,7 +42,7 @@ public class WrapFactory - * @return the wrapped value. - */ - public Object wrap(Context cx, Scriptable scope, -- Object obj, Class staticType) -+ Object obj, Class staticType) - { - if (obj == null || obj == Undefined.instance - || obj instanceof Scriptable) ---- toolsrc/org/mozilla/javascript/tools/debugger/build.xml -+++ toolsrc/org/mozilla/javascript/tools/debugger/build.xml -@@ -8,7 +8,7 @@ - - - -- - - -@@ -16,9 +16,9 @@ - -- -- -- -+ -+ -+ - - - -@@ -26,58 +26,58 @@ - - - -- -+ - import java.awt.Component; - --package org.mozilla.javascript.tools.debugger.downloaded; -+package org.mozilla.javascript.tools.debugger.treetable; - - -- -+ - import java.awt.event.*; - - -- -+ - import java.awt.AWTEvent; - - -- -+ - import java.io.Serializable; - - -- -+ - import javax.swing.*; - -- package org.mozilla.javascript.tools.debugger.downloaded; -+ package org.mozilla.javascript.tools.debugger.treetable; - import javax.swing.*; - - -- -+ - class ListToTreeSelectionModelWrapper - public class ListToTreeSelectionModelWrapper - -- -+ - ListSelectionModel getListSelectionModel - public ListSelectionModel getListSelectionModel - -- -+ - import java.awt.Rectangle; - - -- -+ - import javax.swing.tree.TreeModel; - -- package org.mozilla.javascript.tools.debugger.downloaded; -+ package org.mozilla.javascript.tools.debugger.treetable; - import javax.swing.tree.TreeModel; - - -- -+ - import javax.swing.JTree; - -- package org.mozilla.javascript.tools.debugger.downloaded; -+ package org.mozilla.javascript.tools.debugger.treetable; - import javax.swing.JTree; - - -- -+ - - - ---- toolsrc/org/mozilla/javascript/tools/shell/JavaPolicySecurity.java -+++ toolsrc/org/mozilla/javascript/tools/shell/JavaPolicySecurity.java -@@ -36,7 +36,7 @@ public class JavaPolicySecurity extends - return super.defineClass(name, data, 0, data.length, domain); - } - -- public void linkClass(Class cl) { -+ public void linkClass(Class cl) { - resolveClass(cl); - } - } ---- xmlimplsrc/org/mozilla/javascript/xmlimpl/Namespace.java -+++ xmlimplsrc/org/mozilla/javascript/xmlimpl/Namespace.java -@@ -86,7 +86,7 @@ class Namespace extends IdScriptableObje - } - - @Override -- public Object getDefaultValue(Class hint) { -+ public Object getDefaultValue(Class hint) { - return uri(); - } - ---- xmlimplsrc/org/mozilla/javascript/xmlimpl/QName.java -+++ xmlimplsrc/org/mozilla/javascript/xmlimpl/QName.java -@@ -116,7 +116,7 @@ final class QName extends IdScriptableOb - } - - @Override -- public Object getDefaultValue(Class hint) { -+ public Object getDefaultValue(Class hint) { - return toString(); - } - ---- xmlimplsrc/org/mozilla/javascript/xmlimpl/XMLObjectImpl.java -+++ xmlimplsrc/org/mozilla/javascript/xmlimpl/XMLObjectImpl.java -@@ -105,7 +105,7 @@ abstract class XMLObjectImpl extends XML - } - - @Override -- public final Object getDefaultValue(Class hint) { -+ public final Object getDefaultValue(Class hint) { - return this.toString(); - } - diff --git a/SPECS-EXTENDED/rhino/rhino-build.xml b/SPECS-EXTENDED/rhino/rhino-build.xml new file mode 100644 index 00000000000..daf7c2e73f4 --- /dev/null +++ b/SPECS-EXTENDED/rhino/rhino-build.xml @@ -0,0 +1,159 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/rhino/rhino-debugger.script b/SPECS-EXTENDED/rhino/rhino-debugger.script deleted file mode 100644 index 2a71441e3bd..00000000000 --- a/SPECS-EXTENDED/rhino/rhino-debugger.script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# rhino script -# JPackage Project - -# Source functions library -. /usr/share/java-utils/java-functions - -# Source system prefs -if [ -f /etc/rhino.conf ] ; then - . /etc/rhino.conf -fi - -# Source user prefs -if [ -f $HOME/.rhinorc ] ; then - . $HOME/.rhinorc -fi - -# Configuration -MAIN_CLASS=org.mozilla.javascript.tools.debugger.Main -BASE_FLAGS="-Xbootclasspath/p:$(build-classpath rhino jline xmlbeans/xbean)" -BASE_JARS="rhino jline xmlbeans/xbean" - -# Set parameters -set_jvm -set_classpath $BASE_JARS -set_flags $BASE_FLAGS -set_options $BASE_OPTIONS - -# Let's start -run "$@" diff --git a/SPECS-EXTENDED/rhino/rhino-engine-1.7.15.1.pom b/SPECS-EXTENDED/rhino/rhino-engine-1.7.15.1.pom new file mode 100644 index 00000000000..ce9a989538e --- /dev/null +++ b/SPECS-EXTENDED/rhino/rhino-engine-1.7.15.1.pom @@ -0,0 +1,45 @@ + + + 4.0.0 + org.mozilla + rhino-engine + 1.7.15.1 + rhino-engine + + Rhino Javascript JSR-223 Script Engine wrapper. + + https://mozilla.github.io/rhino/ + + org.sonatype.oss + oss-parent + 7 + + + + Mozilla Public License, Version 2.0 + http://www.mozilla.org/MPL/2.0/index.txt + + + + scm:git:git@github.com:mozilla/rhino.git + scm:git:git@github.com:mozilla/rhino.git + git@github.com:mozilla/rhino.git + + + The Mozilla Foundation + http://www.mozilla.org + + + + org.mozilla + rhino + 1.7.15.1 + + + + + Greg Brail + gbrail@users.noreply.github.com + + + diff --git a/SPECS-EXTENDED/rhino/rhino-idswitch.script b/SPECS-EXTENDED/rhino/rhino-idswitch.script deleted file mode 100644 index d02e3dd7a7f..00000000000 --- a/SPECS-EXTENDED/rhino/rhino-idswitch.script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# rhino script -# JPackage Project - -# Source functions library -. /usr/share/java-utils/java-functions - -# Source system prefs -if [ -f /etc/rhino.conf ] ; then - . /etc/rhino.conf -fi - -# Source user prefs -if [ -f $HOME/.rhinorc ] ; then - . $HOME/.rhinorc -fi - -# Configuration -MAIN_CLASS=org.mozilla.javascript.tools.idswitch.Main -BASE_FLAGS="-Xbootclasspath/p:$(build-classpath rhino xmlbeans/xbean)" -BASE_JARS="rhino xmlbeans/xbean" - -# Set parameters -set_jvm -set_classpath $BASE_JARS -set_flags $BASE_FLAGS -set_options $BASE_OPTIONS - -# Let's start -run "$@" diff --git a/SPECS-EXTENDED/rhino/rhino-jsc.script b/SPECS-EXTENDED/rhino/rhino-jsc.script deleted file mode 100644 index 8b4a075d8fd..00000000000 --- a/SPECS-EXTENDED/rhino/rhino-jsc.script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# rhino script -# JPackage Project - -# Source functions library -. /usr/share/java-utils/java-functions - -# Source system prefs -if [ -f /etc/rhino.conf ] ; then - . /etc/rhino.conf -fi - -# Source user prefs -if [ -f $HOME/.rhinorc ] ; then - . $HOME/.rhinorc -fi - -# Configuration -MAIN_CLASS=org.mozilla.javascript.tools.jsc.Main -BASE_FLAGS="-Xbootclasspath/p:$(build-classpath rhino1 xmlbeans/xbean)" -BASE_JARS="rhino xmlbeans/xbean" - -# Set parameters -set_jvm -set_classpath $BASE_JARS -set_flags $BASE_FLAGS -set_options $BASE_OPTIONS - -# Let's start -run "$@" diff --git a/SPECS-EXTENDED/rhino/rhino-1.7.7.1.pom b/SPECS-EXTENDED/rhino/rhino-runtime-1.7.15.1.pom similarity index 53% rename from SPECS-EXTENDED/rhino/rhino-1.7.7.1.pom rename to SPECS-EXTENDED/rhino/rhino-runtime-1.7.15.1.pom index 16d97b84be0..29c5de555a4 100644 --- a/SPECS-EXTENDED/rhino/rhino-1.7.7.1.pom +++ b/SPECS-EXTENDED/rhino/rhino-runtime-1.7.15.1.pom @@ -1,41 +1,38 @@ - + + 4.0.0 - + org.mozilla + rhino-runtime + 1.7.15.1 + rhino-runtime + + Rhino JavaScript runtime jar, excludes tools & JSR-223 Script Engine wrapper. + + https://mozilla.github.io/rhino/ org.sonatype.oss oss-parent 7 - - org.mozilla - rhino - Mozilla Rhino - 1.7.7.1 - - jar - - Rhino is an open-source implementation of JavaScript written entirely in Java. It is typically - embedded into Java applications to provide scripting to end users. - - https://developer.mozilla.org/en/Rhino - Mozilla Public License, Version 2.0 http://www.mozilla.org/MPL/2.0/index.txt - scm:git:git@github.com:mozilla/rhino.git scm:git:git@github.com:mozilla/rhino.git git@github.com:mozilla/rhino.git - The Mozilla Foundation http://www.mozilla.org + + + Greg Brail + gbrail@users.noreply.github.com + + diff --git a/SPECS-EXTENDED/rhino/rhino.script b/SPECS-EXTENDED/rhino/rhino.script deleted file mode 100644 index 4dd95ec67bc..00000000000 --- a/SPECS-EXTENDED/rhino/rhino.script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# rhino script -# JPackage Project - -# Source functions library -. /usr/share/java-utils/java-functions - -# Source system prefs -if [ -f /etc/rhino.conf ] ; then - . /etc/rhino.conf -fi - -# Source user prefs -if [ -f $HOME/.rhinorc ] ; then - . $HOME/.rhinorc -fi - -# Configuration -MAIN_CLASS=org.mozilla.javascript.tools.shell.Main -BASE_FLAGS="-Xbootclasspath/p:$(build-classpath rhino jline1 xmlbeans/xbean)" -BASE_JARS="rhino jline xmlbeans/xbean" - -# Set parameters -set_jvm -set_classpath $BASE_JARS -set_flags $BASE_FLAGS -set_options $BASE_OPTIONS - -# Let's start -run "$@" diff --git a/SPECS-EXTENDED/rhino/rhino.signatures.json b/SPECS-EXTENDED/rhino/rhino.signatures.json index dbc16c752a1..cb773d58541 100644 --- a/SPECS-EXTENDED/rhino/rhino.signatures.json +++ b/SPECS-EXTENDED/rhino/rhino.signatures.json @@ -1,10 +1,9 @@ { "Signatures": { - "Rhino1_7_7_1_RELEASE.tar.gz": "277478f81c160953976c198beceab61a0b23b5d2633a84effd9236284aa301b1", - "rhino-1.7.7.1.pom": "bd274ebb729e421e83028f05a31c701e95fb755c03d1911258bff35d779bea3f", - "rhino-debugger.script": "abf22a063ef36feb0f09da72952a25d5d4ab1001afa6f5974654ebbc322d6191", - "rhino-idswitch.script": "334586fd570b6c5e8c52787808a12f071f30efc1ff20254a062fa5128ca37cca", - "rhino-jsc.script": "65cc0ac893193a84e024d3b66811afbe747f63c5910ac3ae7691b251ac32bd1c", - "rhino.script": "550cbb089c12752a888ecc1bda8d5384095e948699c49ffc995c98bb907cfb7f" + "Rhino1_7_15_1_Release.tar.gz": "729c4ffc0405fad09b22190c2dd95da8c6a79fe353d667dc852352464bfe5955", + "rhino-1.7.15.1.pom": "b40decfa0e24c80bfd957ba3adfac7f6e3eed7730a06b551c01493faa2e76883", + "rhino-build.xml": "db62671eb039c402b203b08d7ecb6dfcfc575be75c7828e01013733727fa4531", + "rhino-engine-1.7.15.1.pom": "2794a4adbecd9837b5e406c0f4a7e40d8620105a44ff11ccd3cae1a2de4615aa", + "rhino-runtime-1.7.15.1.pom": "f96738c51ba7b081ebebec9fa298e950b18b2ed8acf4099cf7c54ec16b8ec493" } } diff --git a/SPECS-EXTENDED/rhino/rhino.spec b/SPECS-EXTENDED/rhino/rhino.spec index d4e03b7518e..b0760ee88c3 100644 --- a/SPECS-EXTENDED/rhino/rhino.spec +++ b/SPECS-EXTENDED/rhino/rhino.spec @@ -1,9 +1,10 @@ +%global debug_package %{nil} Vendor: Microsoft Corporation Distribution: Azure Linux # # spec file for package rhino # -# Copyright (c) 2020 SUSE LLC +# Copyright (c) 2025 SUSE LLC and contributors # Copyright (c) 2000-2009, JPackage Project # # All modifications and additions to the file contributed by third parties @@ -19,35 +20,43 @@ Distribution: Azure Linux # -%define scm_version 1_7_7_1 +%define scm_version 1_7_15_1 Name: rhino -Version: 1.7.7.1 -Release: 2%{?dist} +Version: 1.7.15.1 +Release: 1%{?dist} Summary: JavaScript for Java License: MPL-2.0 Group: Development/Libraries/Java -URL: https://github.com/mozilla/rhino -Source0: https://github.com/mozilla/rhino/archive/Rhino%{scm_version}_RELEASE.tar.gz +URL: https://www.mozilla.org/rhino/ +Source0: https://github.com/mozilla/rhino/archive/Rhino%{scm_version}_Release.tar.gz Source1: https://repo1.maven.org/maven2/org/mozilla/rhino/%{version}/rhino-%{version}.pom -Source2: rhino.script -Source3: rhino-debugger.script -Source4: rhino-idswitch.script -Source5: rhino-jsc.script -Patch0: rhino-build.patch -# Add OSGi metadata from Eclipse Orbit project -Patch1: rhino-addOrbitManifest.patch +Source2: https://repo1.maven.org/maven2/org/mozilla/rhino-engine/%{version}/rhino-engine-%{version}.pom +Source3: https://repo1.maven.org/maven2/org/mozilla/rhino-runtime/%{version}/rhino-runtime-%{version}.pom +Source10: %{name}-build.xml BuildRequires: ant -BuildRequires: java-devel >= 1.7 +BuildRequires: fdupes +BuildRequires: java-devel >= 1.8 BuildRequires: javapackages-local-bootstrap Requires: javapackages-tools -Requires: jline -BuildArch: noarch %description Rhino is an open-source implementation of JavaScript written entirely in Java. It is typically embedded into Java applications to provide scripting to end users. +%package engine +Summary: Rhino Engine +Requires: %{name} = %{version} + +%description engine +Rhino Javascript JSR-223 Script Engine wrapper. + +%package runtime +Summary: Rhino Runtime + +%description runtime +Rhino JavaScript runtime jar, excludes tools & JSR-223 Script Engine wrapper. + %package demo Summary: Examples for %{name} Group: Development/Libraries/Java @@ -56,76 +65,79 @@ Group: Development/Libraries/Java Examples for %{name} %prep -%setup -q -n %{name}-Rhino%{scm_version}_RELEASE -%patch 0 -b .build -%patch 1 -b .fixManifest -cp %{SOURCE1} pom.xml -%pom_remove_parent - -# Fix manifest -sed -i -e '/^Class-Path:.*$/d' src/manifest - -# Add jpp release info to version -sed -i -e 's|^implementation.version: Rhino .* release .* \${implementation.date}|implementation.version: Rhino %{version} release %{release} \${implementation.date}|' build.properties +%setup -q -n %{name}-Rhino%{scm_version}_Release +cp %{SOURCE10} build.xml %build -%{ant} \ - -Dtarget-jvm=6 -Dsource-level=6 \ - deepclean jar copy-all +%{ant} jar pushd examples -export CLASSPATH=../build/%{name}%{version}/js.jar -SOURCEPATH=../build/%{name}%{version}/src -%javac -sourcepath ${SOURCEPATH} -source 6 -target 6 *.java -%jar cvf ../build/%{name}%{version}/%{name}-examples.jar *.class +export CLASSPATH=../target/%{name}-%{version}.jar +SOURCEPATH=../src +javac -sourcepath ${SOURCEPATH} -source 8 -target 8 *.java +jar --create --verbose \ +%if %{?pkg_vcmp:%pkg_vcmp java-devel >= 17}%{!?pkg_vcmp:0} + --date="$(date -u -d @${SOURCE_DATE_EPOCH:-$(date +%%s)} +%%Y-%%m-%%dT%%H:%%M:%%SZ)" \ +%endif + --file=../target/%{name}-examples-%{version}.jar *.class popd %install -# man page -mkdir -p %{buildroot}%{_mandir}/man1/ -install -m 644 man/%{name}.1 %{buildroot}%{_mandir}/man1/%{name}.1 - # jars -mkdir -p %{buildroot}%{_javadir} -cp -a build/%{name}%{version}/js.jar %{buildroot}%{_javadir}/%{name}.jar +install -dm 0755 %{buildroot}%{_javadir} +install -pm 0644 target/%{name}-%{version}.jar %{buildroot}%{_javadir}/%{name}.jar ln -s %{name}.jar %{buildroot}%{_javadir}/js.jar +install -pm 0644 target/%{name}-engine-%{version}.jar %{buildroot}%{_javadir}/%{name}-engine.jar +install -pm 0644 target/%{name}-runtime-%{version}.jar %{buildroot}%{_javadir}/%{name}-runtime.jar # pom -mkdir -p %{buildroot}%{_mavenpomdir} -cp -a pom.xml %{buildroot}%{_mavenpomdir}/%{name}.pom +install -dm 0755 %{buildroot}%{_mavenpomdir} +cp -a %{SOURCE1} %{buildroot}%{_mavenpomdir}/%{name}.pom %add_maven_depmap %{name}.pom %{name}.jar -a "rhino:js" +cp -a %{SOURCE2} %{buildroot}%{_mavenpomdir}/%{name}-engine.pom +%add_maven_depmap %{name}-engine.pom %{name}-engine.jar -f engine +cp -a %{SOURCE3} %{buildroot}%{_mavenpomdir}/%{name}-runtime.pom +%add_maven_depmap %{name}-runtime.pom %{name}-runtime.jar -f runtime # scripts -mkdir -p %{buildroot}%{_bindir} -install -m 0755 %{SOURCE2} %{buildroot}%{_bindir}/%{name} -install -m 0755 %{SOURCE3} %{buildroot}%{_bindir}/%{name}-debugger -install -m 0755 %{SOURCE4} %{buildroot}%{_bindir}/%{name}-idswitch -install -m 0755 %{SOURCE5} %{buildroot}%{_bindir}/%{name}-jsc +%jpackage_script org.mozilla.javascript.tools.shell.Main "" "" rhino rhino true +%jpackage_script org.mozilla.javascript.tools.debugger.Main "" "" rhino rhino-debugger true +%jpackage_script org.mozilla.javascript.tools.jsc.Main "" "" rhino rhino-jsc true # examples -mkdir -p %{buildroot}%{_datadir}/%{name} +install -dm 0755 %{buildroot}%{_datadir}/%{name} cp -a examples/* %{buildroot}%{_datadir}/%{name} -cp -a build/%{name}%{version}/%{name}-examples.jar %{buildroot}%{_javadir}/%{name}-examples.jar - -find %{buildroot}%{_datadir}/%{name} -name '*.build' -delete +install -pm 0644 target/%{name}-examples-%{version}.jar %{buildroot}%{_javadir}/%{name}-examples.jar +%fdupes -s %{buildroot}%{_datadir}/%{name} %files -f .mfiles -%license LICENSE.txt %attr(0755,root,root) %{_bindir}/%{name} %attr(0755,root,root) %{_bindir}/%{name}-debugger -%attr(0755,root,root) %{_bindir}/%{name}-idswitch %attr(0755,root,root) %{_bindir}/%{name}-jsc %{_javadir}/js.jar %{_javadir}/%{name}-examples.jar -%{_mandir}/man1/%{name}.1* +%license LICENSE.txt NOTICE.txt NOTICE-tools.txt +%doc README.md CODE_OF_CONDUCT.md RELEASE-NOTES.md + +%files engine -f .mfiles-engine +%license LICENSE.txt +%doc README.md CODE_OF_CONDUCT.md RELEASE-NOTES.md + +%files runtime -f .mfiles-runtime +%license LICENSE.txt NOTICE.txt +%doc README.md CODE_OF_CONDUCT.md RELEASE-NOTES.md %files demo %{_datadir}/%{name} %changelog +* Tue Dec 23 2025 Sumit Jena - 1.7.15.1-1 +- Upgrade to version 1.7.15.1 +- License Verified. + * Thu Oct 14 2021 Pawel Winogrodzki - 1.7.7.1-2 - Converting the 'Release' tag to the '[number].[distribution]' format. diff --git a/SPECS-EXTENDED/ripgrep/ripgrep.spec b/SPECS-EXTENDED/ripgrep/ripgrep.spec index 339b08380a8..15db81037ed 100644 --- a/SPECS-EXTENDED/ripgrep/ripgrep.spec +++ b/SPECS-EXTENDED/ripgrep/ripgrep.spec @@ -20,7 +20,7 @@ Name: ripgrep Version: 13.0.0 -Release: 10%{?dist} +Release: 11%{?dist} Summary: A search tool that combines ag with grep License: MIT AND Unlicense Vendor: Microsoft Corporation @@ -104,6 +104,9 @@ install -Dm 644 complete/_rg %{buildroot}%{_datadir}/zsh/site-functions/_rg %{_datadir}/zsh %changelog +* Wed Oct 15 2025 Kavya Sree Kaitepalli - 13.0.0-11 +- Bump release to rebuild with rust + * Fri Aug 08 2025 Azure Linux Security Servicing Account - 13.0.0-10 - Bump release to rebuild with rust diff --git a/SPECS-EXTENDED/rust-cbindgen/rust-cbindgen.spec b/SPECS-EXTENDED/rust-cbindgen/rust-cbindgen.spec index 467f2c777c7..06c237c8ef8 100644 --- a/SPECS-EXTENDED/rust-cbindgen/rust-cbindgen.spec +++ b/SPECS-EXTENDED/rust-cbindgen/rust-cbindgen.spec @@ -2,7 +2,7 @@ Summary: Tool for generating C bindings to Rust code Name: rust-cbindgen Version: 0.24.3 -Release: 6%{?dist} +Release: 7%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -96,6 +96,9 @@ RUSTFLAGS=%{rustflags} cargo test --release %endif %changelog +* Wed Oct 15 2025 Kavya Sree Kaitepalli - 0.24.3-7 +- Bump release to rebuild with rust + * Fri Aug 08 2025 Azure Linux Security Servicing Account - 0.24.3-6 - Bump release to rebuild with rust diff --git a/SPECS-EXTENDED/satyr/satyr.signatures.json b/SPECS-EXTENDED/satyr/satyr.signatures.json index 32aad7c9766..f217db0e0d8 100644 --- a/SPECS-EXTENDED/satyr/satyr.signatures.json +++ b/SPECS-EXTENDED/satyr/satyr.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "satyr-0.30.tar.gz": "2e4c67a09e1b47a5bf9813b2af4e2cdbefe47264a449e429710952f21b7c9709" + "satyr-0.43.tar.gz": "b001c90b404308d962858b95cbd7cb1e7f13bd5bcf2249a66321d4db406b4268" } } diff --git a/SPECS-EXTENDED/satyr/satyr.spec b/SPECS-EXTENDED/satyr/satyr.spec index c975e653b1b..f5551edf511 100644 --- a/SPECS-EXTENDED/satyr/satyr.spec +++ b/SPECS-EXTENDED/satyr/satyr.spec @@ -2,25 +2,21 @@ Vendor: Microsoft Corporation Distribution: Azure Linux %bcond_without python3 +%define libdw_devel elfutils-devel +%define libelf_devel elfutils-libelf-devel -%if 0%{?suse_version} - %define libdw_devel libdw-devel - %define libelf_devel libelf-devel -%else - %define libdw_devel elfutils-devel - %define libelf_devel elfutils-libelf-devel -%endif +%define glib_ver 2.43.4 Name: satyr -Version: 0.30 -Release: 3%{?dist} +Version: 0.43 +Release: 1%{?dist} Summary: Tools to create anonymous, machine-friendly problem reports -License: GPLv2+ +License: GPL-2.0-or-later URL: https://github.com/abrt/satyr -Source0: https://github.com/abrt/%{name}/archive/%{version}/%{name}-%{version}.tar.gz +Source0: https://github.com/abrt/%{name}/releases/download/%{version}/%{name}-%{version}.tar.gz %if %{with python3} BuildRequires: python3-devel -%endif # with python3 +%endif BuildRequires: %{libdw_devel} BuildRequires: %{libelf_devel} BuildRequires: binutils-devel @@ -28,17 +24,18 @@ BuildRequires: rpm-devel BuildRequires: libtool BuildRequires: doxygen BuildRequires: pkgconfig +BuildRequires: make BuildRequires: automake BuildRequires: gcc-c++ BuildRequires: gdb BuildRequires: gperf -BuildRequires: nettle-devel -BuildRequires: pkgconfig(json-c) +BuildRequires: json-c-devel +BuildRequires: glib2-devel %if %{with python3} BuildRequires: python3-sphinx -%endif # with python3 -Requires: json-c -Requires: nettle +%endif +Requires: json-c%{?_isa} +Requires: glib2%{?_isa} >= %{glib_ver} %description Satyr is a library that can be used to create and process microreports. @@ -64,24 +61,24 @@ Requires: %{name}%{?_isa} = %{version}-%{release} %description -n python3-satyr Python 3 bindings for %{name}. -%endif # if with python3 +%endif %prep %setup -q %build +autoreconf + %configure \ %if %{without python3} --without-python3 \ -%endif # with python3 +%endif --disable-static \ --enable-doxygen-docs %make_build - %install %make_install - # Remove all libtool archives (*.la) from modules directory. find %{buildroot} -name "*.la" -delete @@ -89,6 +86,7 @@ find %{buildroot} -name "*.la" -delete make check|| { # find and print the logs of failed test # do not cat tests/testsuite.log because it contains a lot of bloat + cat tests/test-suite.log find tests/testsuite.dir -name "testsuite.log" -print -exec cat '{}' \; exit 1 } @@ -97,7 +95,7 @@ make check|| { %postun -p /sbin/ldconfig %files -%doc README NEWS +%doc README.md NEWS %license COPYING %{_bindir}/satyr %{_mandir}/man1/%{name}.1* @@ -117,6 +115,10 @@ make check|| { %endif %changelog +* Tue Nov 12 2024 Sumit Jena - 0.43-1 +- Update to version 0.43 +- License verified. + * Tue Jan 12 2021 Joe Schmitt - 0.30-3 - Initial CBL-Mariner import from Fedora 31 (license: MIT). - Build with python3 diff --git a/SPECS-EXTENDED/suitesparse/suitesparse.signatures.json b/SPECS-EXTENDED/suitesparse/suitesparse.signatures.json index 1fab5c4eeb4..2f949f40150 100644 --- a/SPECS-EXTENDED/suitesparse/suitesparse.signatures.json +++ b/SPECS-EXTENDED/suitesparse/suitesparse.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "SuiteSparse-5.4.0.tar.gz": "374dd136696c653e34ef3212dc8ab5b61d9a67a6791d5ec4841efb838e94dbd1" + "suitesparse-7.11.0.tar.gz": "93ed4c4e546a49fc75884c3a8b807d5af4a91e39d191fbbc60a07380b12a35d1" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/suitesparse/suitesparse.spec b/SPECS-EXTENDED/suitesparse/suitesparse.spec index 2588b412dfe..3cd09a49578 100644 --- a/SPECS-EXTENDED/suitesparse/suitesparse.spec +++ b/SPECS-EXTENDED/suitesparse/suitesparse.spec @@ -1,38 +1,112 @@ -%global amd_version_major 2 -%global btf_version_major 1 -%global camd_version_major 2 -%global ccolamd_version_major 2 -%global cholmod_version_major 3 -%global colamd_version_major 2 -%global csparse_version_major 3 -%global cxsparse_version_major 3 -%global klu_version_major 1 -%global ldl_version_major 2 -%global rbio_version_major 2 -%global spqr_version_major 2 -%global SuiteSparse_config_major 5 -%global umfpack_version_major 5 +%global amd_version_major 3 +%global btf_version_major 2 +%global camd_version_major 3 +%global ccolamd_version_major 3 +%global cholmod_version_major 5 +%global colamd_version_major 3 +%global csparse_version_major 4 +%global cxsparse_version_major 4 +%global gpuqrengine_version_major 3 +%global graphblas_version_major 10 +%global klu_cholmod_version_major 2 +%global klu_version_major 2 +%global lagraph_version_major 1 +%global lagraphx_version_major 1 +%global ldl_version_major 3 +%global paru_version_major 1 +%global rbio_version_major 4 +%global spex_version_major 3 +%global spqr_version_major 4 +%global SuiteSparse_config_major 7 +%global SuiteSparse_gpuruntime_major 3 +%global SuiteSparse_metis_major 5 +%global umfpack_version_major 6 + ### CXSparse is a superset of CSparse, and the two share common header ### names, so it does not make sense to build both. CXSparse is built ### by default, but CSparse can be built instead by defining ### enable_csparse as 1 below. %global enable_csparse 0 -Summary: A collection of sparse matrix libraries +# Whether to build a separate version of libraries linked against an ILP64 BLAS +%if 0%{?__isa_bits} == 64 +%global build64 1 +%endif + +%global suitesparse_builds SuiteSparse %{?build64:SuiteSparse64 SuiteSparse64_} + +%global blaslib openblas + +# SuiteSparse uses a modified version of metis, so use it +%bcond_with system_metis + +%global commit 6ab1e9eb9e67264218ffbdfc25010650da449a39 + Name: suitesparse -Version: 5.4.0 -Release: 5%{?dist} -License: (LGPLv2+ OR BSD) AND LGPLv2+ AND GPLv2+ +Version: 7.11.0 +Release: 1%{?dist} +Summary: A collection of sparse matrix libraries Vendor: Microsoft Corporation Distribution: Azure Linux -URL: https://faculty.cse.tamu.edu/davis/suitesparse.html -Source0: https://faculty.cse.tamu.edu/davis/SuiteSparse/SuiteSparse-%{version}.tar.gz + +# See LICENSE.txt for a breakdown of all licenses: +# Shipped modules licenses: +# * AMD - BSD-3-Clause +# * BTF - LGPL-2.1-or-later +# * CAMD - BSD-3-Clause +# * COLAMD - BSD-3-Clause +# * CCOLAMD - BSD-3-Clause +# * CHOLMOD - LGPL-2.1-or-later AND GPL-2.0-or-later +# * CSparse - LGPL-2.1-or-later AND BSD-3-Clause +# * CXSparse - LGPL-2.1-or-later AND BSD-3-Clause +# * KLU - LGPL-2.1-or-later +# * LDL - LGPL-2.1-or-later +# * RBio - GPL-2.0-or-later +# * SPQR - GPL-2.0-or-later +# * UMFPACK - GPL-2.0-or-later +# +# Not shipped modules licenses: +# * GPUQREngine - GPL-2.0-or-later +# * GraphBLAS - Apache-2.0 AND GPL-3.0-or-later +# * SLIP_LU - LGPL-3.0-or-later OR GPL-2.0-or-later OR (LGPL-3.0-or-later AND GPL-2.0-or-later) +# * MATLAB_Tools - BSD-3-Clause AND GPL-2.0-or-later +# * Mongoose - GPL-3.0-only +# * ssget - BSD-3-Clause +# * SuiteSparse_GPURuntime - GPL-2.0-or-later + +License: BSD-3-Clause AND LGPL-2.1-or-later AND GPL-2.0-or-later +URL: http://faculty.cse.tamu.edu/davis/suitesparse.html +Source0: https://github.com/DrTimothyAldenDavis/SuiteSparse/archive/v%{version}/%{name}-%{version}.tar.gz + +BuildRequires: cmake BuildRequires: gcc BuildRequires: gcc-c++ -BuildRequires: hardlink +BuildRequires: gcc-gfortran +BuildRequires: make + +BuildRequires: gmp-devel +%if %{with system_metis} BuildRequires: metis-devel +%else +Provides: bundled(metis) = 5.1.0 +%endif +BuildRequires: %{blaslib}-devel +BuildRequires: mpfr-devel +# openblas is still required for 64-bit suffixed versions BuildRequires: openblas-devel +BuildRequires: lapack-devel +BuildRequires: mpfr-devel BuildRequires: tbb-devel +BuildRequires: hardlink + +# Not packaged in Fedora +Provides: bundled(cpu_features) = 0.6.0 +# GraphBLAS redefines malloc() so must use bundled versions +Provides: bundled(lz4) = 1.9.3 +Provides: bundled(zstd) = 1.5.5 + +Obsoletes: umfpack <= 5.0.1 +Obsoletes: ufsparse <= 2.1.1 Provides: ufsparse = %{version}-%{release} %description @@ -54,15 +128,19 @@ matrices. The package includes the following libraries: SuiteSparse_config configuration file for all the above packages. RBio read/write files in Rutherford/Boeing format + %package devel Summary: Development headers for SuiteSparse Requires: %{name} = %{version}-%{release} +Obsoletes: umfpack-devel <= 5.0.1 +Obsoletes: ufsparse-devel <= 2.1.1 Provides: ufsparse-devel = %{version}-%{release} %description devel The suitesparse-devel package contains files needed for developing applications which use the suitesparse libraries. + %package static Summary: Static version of SuiteSparse libraries Requires: %{name}-devel = %{version}-%{release} @@ -72,12 +150,15 @@ Provides: ufsparse-static = %{version}-%{release} The suitesparse-static package contains the statically linkable version of the suitesparse libraries. + +%if 0%{?build64} %package -n %{name}64 Summary: A collection of sparse matrix libraries (ILP64 version) %description -n %{name}64 The suitesparse collection compiled against an ILP64 BLAS library. + %package -n %{name}64-devel Summary: Development headers for SuiteSparse (ILP64 version) Requires: %{name}-devel = %{version}-%{release} @@ -87,6 +168,7 @@ Requires: %{name}64 = %{version}-%{release} The suitesparse64-devel package contains files needed for developing applications which use the suitesparse libraries (ILP64 version). + %package -n %{name}64-static Summary: Static version of SuiteSparse libraries (ILP64 version) Requires: %{name}-devel = %{version}-%{release} @@ -95,12 +177,14 @@ Requires: %{name}-devel = %{version}-%{release} The suitesparse64-static package contains the statically linkable version of the suitesparse libraries (ILP64 version). + %package -n %{name}64_ Summary: A collection of sparse matrix libraries (ILP64 version) %description -n %{name}64_ The suitesparse collection compiled against an ILP64 BLAS library. + %package -n %{name}64_-devel Summary: Development headers for SuiteSparse (ILP64 version) Requires: %{name}-devel = %{version}-%{release} @@ -112,6 +196,7 @@ applications which use the suitesparse libraries (ILP64 version) compiled against a BLAS library with the "64_" symbol name suffix (see openblas-*64_ packages). + %package -n %{name}64_-static Summary: Static version of SuiteSparse libraries (ILP64 version) Requires: %{name}-devel = %{version}-%{release} @@ -120,22 +205,34 @@ Requires: %{name}-devel = %{version}-%{release} The suitesparse64_-static package contains the statically linkable version of the suitesparse libraries (ILP64 version) compiled against a BLAS library with the "64_" symbol name suffix (see openblas-*64_ packages). +%endif + %package doc Summary: Documentation files for SuiteSparse -Requires: %{name} = %{version}-%{release} BuildArch: noarch +Requires: %{name} = %{version}-%{release} %description doc This package contains documentation files for %{name}. + %prep -%setup -q -c -pushd SuiteSparse +%setup -c -q +mkdir Doc Licenses +pushd SuiteSparse-%{version} +#patch 0 -p1 -b .postfix +%if !0%{?enable_csparse} + sed -i -e /CSparse/d Makefile +%endif + # Build fails + sed -i -e /Mongoose/d Makefile +%if %{with system_metis} # Remove bundled metis - rm -r metis* - # Makefiles look for metis.h specifically - ln -s %{_includedir}/metis/*.h include/ + rm -r SuiteSparse_metis + # SuiteSparse looks for SuiteSparse_metis.h specifically + ln -s %{_includedir}/metis/metis.h include/SuiteSparse_metis.h +%endif # Fix pragma ivdep so gcc understands it. for fil in $(grep -Frl 'pragma ivdep' .); do @@ -144,239 +241,94 @@ pushd SuiteSparse rm -f ${fil}.orig done - # drop non-standard -O3 and duplicate -fexceptions from default CFLAGS - sed -i -e '/^ CF =/ s/ -O3 -fexceptions//' SuiteSparse_config/SuiteSparse_config.mk + # drop non-standard -O3 + sed -i -e '/OPTS.*-O3/d' CHOLMOD/SuiteSparse_metis/GKlib/GKlibSystem.cmake + + # collect docs and licenses in one place to ship + find -iname lesser.txt -o -iname lesserv3.txt -o -iname license.txt -o \ + -iname gpl.txt -o -iname GPLv2.txt -o -iname CONTRIBUTOR-LICENSE.txt -o -iname "SuiteSparse Individual Contributor License Agreement (20241011).pdf" -o -iname license \ + -a -not -type d | while read f; do + b="${f%%/*}" + r="${f#$b}" + x="$(echo "$r" | sed 's|/doc/|/|gi')" + install -m0644 -D "$f" "../Licenses/$b/$x" + done - # Allow adding a suffix to the library name - sed -i -e '/SO.*=/s/$(LIBRARY).so/$(LIBRARY)$(LIBRARY_SUFFIX).so/' \ - -e '/AR_TARGET *=/s/$(LIBRARY).a/$(LIBRARY)$(LIBRARY_SUFFIX).a/' SuiteSparse_config/SuiteSparse_config.mk - sed -i -e 's/-l\(amd\|btf\|camd\|ccolamd\|cholmod\|colamd\|csparse\|cxsparse\|klu\|ldl\|rbio\|spqr\|suitesparseconfig\|umfpack\)/-l\1$(LIBRARY_SUFFIX)/g' \ - $(find -name Makefile\* -o -name \*.mk) + # Copy documentation files but EXCLUDE License.txt, gpl.txt, GPLv2.txt, lesserv3.txt + find . -type f \( \ + -iname "*.pdf" -o \ + -iname "ChangeLog" -o \ + -iname "README*" -o \ + -iname "*.txt" \ + \) \ + ! -iname "License.txt" \ + ! -iname "gpl.txt" \ + ! -iname "GPLv2.txt" \ + ! -iname "lesserv3.txt" \ + ! -iname "CONTRIBUTOR-LICENSE.txt" \ + ! -iname "SuiteSparse Individual Contributor License Agreement (20241011).pdf" \ + | while read f; do + b="${f%%/*}" + r="${f#$b}" + x="$(echo "$r" | sed 's|/doc/|/|gi')" + install -m0644 -D "$f" "../Doc/$b/$x" + done popd -cp -a SuiteSparse SuiteSparse64 -cp -a SuiteSparse SuiteSparse64_ +%if 0%{?build64} +cp -al SuiteSparse-%{version} SuiteSparse64-%{version} +cp -al SuiteSparse-%{version} SuiteSparse64_-%{version} +%endif -%build -export AUTOCC=no -export CC=gcc +# hardlink duplicate documentation files +hardlink -cv Licenses/ -for build in SuiteSparse SuiteSparse64 SuiteSparse64_ +%build +# FindSuiteSparse_config looks for "build" +%global _vpath_builddir build +for build in %{suitesparse_builds} do - pushd $build - - # TODO - Try to use upstream makefile - will build more components - mkdir -p Doc/{AMD,BTF,CAMD,CCOLAMD,CHOLMOD,COLAMD,KLU,LDL,UMFPACK,SPQR,RBio} Include - - export CFLAGS="%{optflags} -I%{_includedir}/metis" - export LAPACK="" - # Set flags for ILP64 build - if [ $build = SuiteSparse64 ] - then - export CFLAGS="$CFLAGS -DBLAS64" - export BLAS=-lopenblas64 - export LIBRARY_SUFFIX=64 - elif [ $build = SuiteSparse64_ ] - then - export CFLAGS="$CFLAGS -DBLAS64 -DSUN64" - export BLAS=-lopenblas64_ - export LIBRARY_SUFFIX=64_ - else - export BLAS=-lopenblas - fi - - # SuiteSparse_config needs to come first - pushd SuiteSparse_config - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - cp -p *.h ../Include - popd - - pushd AMD - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/License.txt Doc/lesser.txt Doc/ChangeLog Doc/*.pdf ../Doc/AMD - popd - - pushd BTF - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/* ../Doc/BTF - popd - - pushd CAMD - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/ChangeLog Doc/License.txt Doc/*.pdf ../Doc/CAMD - popd - - pushd CCOLAMD - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/* ../Doc/CCOLAMD - popd - - pushd COLAMD - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/* ../Doc/COLAMD - popd - - pushd CHOLMOD - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/*.pdf ../Doc/CHOLMOD - cp -p Cholesky/lesser.txt ../Doc/CHOLMOD/Cholesky_License.txt - cp -p Core/lesser.txt ../Doc/CHOLMOD/Core_License.txt - cp -p MatrixOps/gpl.txt ../Doc/CHOLMOD/MatrixOps_License.txt - cp -p Partition/lesser.txt ../Doc/CHOLMOD/Partition_License.txt - cp -p Supernodal/gpl.txt ../Doc/CHOLMOD/Supernodal_License.txt - popd - - %if "%{?enable_csparse}" == "1" - pushd CSparse - pushd Source - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - cp -p cs.h ../../Include - popd - mkdir ../Doc/CSparse/ - cp -p Doc/* ../Doc/CSparse - popd - - %else - pushd CXSparse - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/cs.h ../Include - mkdir ../Doc/CXSparse/ - cp -p Doc/* ../Doc/CXSparse - popd - %endif - - pushd KLU - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/lesser.txt ../Doc/KLU - popd - - pushd LDL - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/ChangeLog Doc/lesser.txt Doc/*.pdf ../Doc/LDL - popd - - pushd UMFPACK - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/License.txt Doc/ChangeLog Doc/gpl.txt Doc/*.pdf ../Doc/UMFPACK - popd - - pushd SPQR - pushd Lib - %make_build CFLAGS="$CFLAGS -DHAVE_TBB -DNPARTITION" TBB=-ltbb BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h* ../Include - cp -p README{,_SPQR}.txt - cp -p README_SPQR.txt Doc/* ../Doc/SPQR - popd - - pushd RBio - pushd Lib - %make_build CFLAGS="$CFLAGS" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" - popd - cp -p Include/*.h ../Include - cp -p README.txt Doc/ChangeLog Doc/License.txt ../Doc/RBio - popd - + pushd $build-%{version} + %set_build_flags + CMAKE_OPTIONS="-DCMAKE_C_FLAGS_RELEASE:STRING=-DNDEBUG -DCMAKE_CXX_FLAGS_RELEASE:STRING=-DNDEBUG -DCMAKE_Fortran_FLAGS_RELEASE:STRING=-DNDEBUG -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_INSTALL_DO_STRIP:BOOL=OFF \ + -DCMAKE_INSTALL_PREFIX:PATH=%{_prefix} -DCMAKE_INSTALL_LIBDIR=%{_libdir} -DCOMPACT=ON" +%if %{with system_metis} + CMAKE_OPTIONS="$CMAKE_OPTIONS -DSUITESPARSE_METIS_FOUND=true -DSUITESPARSE_METIS_INCLUDE_DIR=%{_includedir}/metis -DSUITESPARSE_METIS_LIBRARIES=%{_libdir}/libmetis.so" +%endif + # Set flags for ILP64 build + if [ $build = SuiteSparse64 ] + then + CMAKE_OPTIONS="$CMAKE_OPTIONS -DSUITESPARSE_INCLUDEDIR_POSTFIX=$build -DSUITESPARSE_PKGFILEDIR=%{_libdir}/$build -DCMAKE_RELEASE_POSTFIX=64 -DBLA_VENDOR=OpenBLAS -DALLOW_64BIT_BLAS=yes" + export CFLAGS="$CFLAGS -DBLAS_OPENBLAS_64" + elif [ $build = SuiteSparse64_ ] + then + CMAKE_OPTIONS="$CMAKE_OPTIONS -DSUITESPARSE_INCLUDEDIR_POSTFIX=$build -DSUITESPARSE_PKGFILEDIR=%{_libdir}/$build -DCMAKE_RELEASE_POSTFIX=64_ -DBLA_VENDOR=OpenBLAS -DALLOW_64BIT_BLAS=yes -DBLAS_LIBRARIES=%{_libdir}/libopenblas64_.so" + export CFLAGS="$CFLAGS -DBLAS_OPENBLAS_64" + else + CMAKE_OPTIONS="$CMAKE_OPTIONS -DSUITESPARSE_INCLUDEDIR_POSTFIX=suitesparse -DBLA_VENDOR=OpenBLAS" + fi + %make_build CMAKE_OPTIONS="$CMAKE_OPTIONS" JOBS=%{_smp_build_ncpus} popd done %install -mkdir -p %{buildroot}%{_libdir} -mkdir -p %{buildroot}%{_includedir}/%{name} -cp -a SuiteSparse/Include/*.{h,hpp} %{buildroot}%{_includedir}/%{name}/ -for build in SuiteSparse SuiteSparse64 SuiteSparse64_ +for build in %{suitesparse_builds} do - pushd $build - cp -a */Lib/*.a lib/*.so* %{buildroot}%{_libdir}/ - chmod 755 %{buildroot}%{_libdir}/*.so.* - - # collect licenses in one place to ship as base package documentation - rm -rf Licenses - mkdir Licenses - find */ -iname lesser.txt -o -iname license.txt -o -iname gpl.txt -o \ - -iname license | while read f; do - b="${f%%/*}" - r="${f#$b}" - x="$(echo "$r" | sed 's|/doc/|/|gi')" - install -m0644 -D "$f" "./Licenses/$b/$x" - done - - # hardlink duplicate documentation files - hardlink -cv Docs/ Licenses/ + pushd $build-%{version} + %make_install popd done - %check -check_status=0 -export AUTOCC=no -export CC=gcc -TESTDIRS="AMD CAMD CCOLAMD CHOLMOD COLAMD KLU LDL SPQR RBio UMFPACK" -%if "%{?enable_csparse}" == "1" -TESTDIRS="$TESTDIRS CSparse" -%else -TESTDIRS="$TESTDIRS CXSparse" -%endif -for build in SuiteSparse SuiteSparse64 SuiteSparse64_ +# Build demos as a check +for build in %{suitesparse_builds} do - pushd $build - export CFLAGS="%{optflags} -I%{_includedir}/metis" - export LAPACK="" - # Set flags for ILP64 build - if [ $build = SuiteSparse64 ] - then - export CFLAGS="$CFLAGS -DBLAS64" - export BLAS=-lopenblas64 - export LIBRARY_SUFFIX=64 - elif [ $build = SuiteSparse64_ ] - then - export CFLAGS="$CFLAGS -DBLAS64 -DSUN64" - export BLAS=-lopenblas64_ - export LIBRARY_SUFFIX=64_ - else - export BLAS=-lopenblas - fi - - for d in $TESTDIRS ; do - %make_build -C $d/Demo CFLAGS="$CFLAGS" LIB="%{?__global_ldflags} -lm -lrt" BLAS="$BLAS" LIBRARY_SUFFIX="$LIBRARY_SUFFIX" SPQR_CONFIG=-DHAVE_TBB TBB=-ltbb - if [[ $? -ne 0 ]]; then - check_status=1 - fi - done + pushd $build-%{version} + make install DESTDIR=%{buildroot} popd done -[[ $check_status -eq 0 ]] %files -%license SuiteSparse/Licenses +%license Licenses %{_libdir}/libamd.so.%{amd_version_major}* %{_libdir}/libbtf.so.%{btf_version_major}* %{_libdir}/libcamd.so.%{camd_version_major}* @@ -387,24 +339,75 @@ done %{_libdir}/libcsparse.so.%{csparse_version_major}* %endif %{_libdir}/libcxsparse.so.%{cxsparse_version_major}* +%{_libdir}/libgraphblas.so.%{graphblas_version_major}* +%{_libdir}/libklu_cholmod.so.%{klu_cholmod_version_major}* %{_libdir}/libklu.so.%{klu_version_major}* +%{_libdir}/liblagraph.so.%{lagraph_version_major}* +%{_libdir}/liblagraphx.so.%{lagraphx_version_major}* %{_libdir}/libldl.so.%{ldl_version_major}* +%{_libdir}/libparu.so.%{paru_version_major}* %{_libdir}/librbio.so.%{rbio_version_major}* +%{_libdir}/libspex.so.%{spex_version_major}* +%{_libdir}/libspexpython.so.%{spex_version_major}* %{_libdir}/libspqr.so.%{spqr_version_major}* %{_libdir}/libsuitesparseconfig.so.%{SuiteSparse_config_major}* %{_libdir}/libumfpack.so.%{umfpack_version_major}* %files devel -%{_includedir}/%{name} +%{_includedir}/%{name}/ +%{_libdir}/cmake/AMD/ +%{_libdir}/cmake/BTF/ +%{_libdir}/cmake/CAMD/ +%{_libdir}/cmake/CCOLAMD/ +%{_libdir}/cmake/CHOLMOD/ +%{_libdir}/cmake/COLAMD/ +%{_libdir}/cmake/CXSparse/ +%{_libdir}/cmake/GraphBLAS/ +%{_libdir}/cmake/KLU/ +%{_libdir}/cmake/KLU_CHOLMOD/ +%{_libdir}/cmake/LAGraph/ +%{_libdir}/cmake/LDL/ +%{_libdir}/cmake/ParU/ +%{_libdir}/cmake/RBio/ +%{_libdir}/cmake/SPEX/ +%{_libdir}/cmake/SPQR/ +%{_libdir}/cmake/SuiteSparse_config/ +%{_libdir}/cmake/SuiteSparse/ +%{_libdir}/cmake/UMFPACK/ +%exclude %{_libdir}/cmake/*/*_static*.cmake +%{_libdir}/pkgconfig/AMD.pc +%{_libdir}/pkgconfig/BTF.pc +%{_libdir}/pkgconfig/CAMD.pc +%{_libdir}/pkgconfig/CCOLAMD.pc +%{_libdir}/pkgconfig/CHOLMOD.pc +%{_libdir}/pkgconfig/COLAMD.pc +%{_libdir}/pkgconfig/CXSparse.pc +%{_libdir}/pkgconfig/GraphBLAS.pc +%{_libdir}/pkgconfig/KLU.pc +%{_libdir}/pkgconfig/KLU_CHOLMOD.pc +%{_libdir}/pkgconfig/LAGraph.pc +%{_libdir}/pkgconfig/LDL.pc +%{_libdir}/pkgconfig/ParU.pc +%{_libdir}/pkgconfig/RBio.pc +%{_libdir}/pkgconfig/SPEX.pc +%{_libdir}/pkgconfig/SPQR.pc +%{_libdir}/pkgconfig/SuiteSparse_config.pc +%{_libdir}/pkgconfig/UMFPACK.pc %{_libdir}/lib*.so +%if 0%{?build64} %exclude %{_libdir}/lib*64*.so +%endif %files static +%{_libdir}/cmake/*/*_static*.cmake %{_libdir}/lib*.a +%if 0%{?build64} %exclude %{_libdir}/lib*64*.a +%endif +%if 0%{?build64} %files -n %{name}64 -%license SuiteSparse64/Licenses +%license Licenses %{_libdir}/libamd64.so.%{amd_version_major}* %{_libdir}/libbtf64.so.%{btf_version_major}* %{_libdir}/libcamd64.so.%{camd_version_major}* @@ -415,21 +418,30 @@ done %{_libdir}/libcsparse64.so.%{csparse_version_major}* %endif %{_libdir}/libcxsparse64.so.%{cxsparse_version_major}* +%{_libdir}/libgraphblas64.so.%{graphblas_version_major}* +%{_libdir}/libklu_cholmod64.so.%{klu_cholmod_version_major}* %{_libdir}/libklu64.so.%{klu_version_major}* +%{_libdir}/liblagraph64.so.%{lagraph_version_major}* +%{_libdir}/liblagraphx64.so.%{lagraphx_version_major}* %{_libdir}/libldl64.so.%{ldl_version_major}* +%{_libdir}/libparu64.so.%{paru_version_major}* %{_libdir}/librbio64.so.%{rbio_version_major}* +%{_libdir}/libspex64.so.%{spex_version_major}* +%{_libdir}/libspexpython64.so.%{spex_version_major}* %{_libdir}/libspqr64.so.%{spqr_version_major}* %{_libdir}/libsuitesparseconfig64.so.%{SuiteSparse_config_major}* %{_libdir}/libumfpack64.so.%{umfpack_version_major}* %files -n %{name}64-devel +%{_includedir}/SuiteSparse64/ %{_libdir}/lib*64.so +%{_libdir}/SuiteSparse64 %files -n %{name}64-static %{_libdir}/lib*64.a %files -n %{name}64_ -%license SuiteSparse64_/Licenses +%license Licenses %{_libdir}/libamd64_.so.%{amd_version_major}* %{_libdir}/libbtf64_.so.%{btf_version_major}* %{_libdir}/libcamd64_.so.%{camd_version_major}* @@ -440,23 +452,37 @@ done %{_libdir}/libcsparse64_.so.%{csparse_version_major}* %endif %{_libdir}/libcxsparse64_.so.%{cxsparse_version_major}* +%{_libdir}/libgraphblas64_.so.%{graphblas_version_major}* +%{_libdir}/libklu_cholmod64_.so.%{klu_cholmod_version_major}* %{_libdir}/libklu64_.so.%{klu_version_major}* +%{_libdir}/liblagraph64_.so.%{lagraph_version_major}* +%{_libdir}/liblagraphx64_.so.%{lagraphx_version_major}* %{_libdir}/libldl64_.so.%{ldl_version_major}* +%{_libdir}/libparu64_.so.%{paru_version_major}* %{_libdir}/librbio64_.so.%{rbio_version_major}* +%{_libdir}/libspex64_.so.%{spex_version_major}* +%{_libdir}/libspexpython64_.so.%{spex_version_major}* %{_libdir}/libspqr64_.so.%{spqr_version_major}* %{_libdir}/libsuitesparseconfig64_.so.%{SuiteSparse_config_major}* %{_libdir}/libumfpack64_.so.%{umfpack_version_major}* %files -n %{name}64_-devel +%{_includedir}/SuiteSparse64_/ %{_libdir}/lib*64_.so +%{_libdir}/SuiteSparse64_ %files -n %{name}64_-static %{_libdir}/lib*64_.a +%endif %files doc -%doc SuiteSparse/Doc/* +%doc Doc/* %changelog +* Mon Dec 15 2025 Durga Jagadeesh Palli - 7.11.0-1 +- Upgrade to 7.11.0 (Reference: Fedora 42) +- License verified + * Mon Nov 28 2022 Muhammad Falak - 5.4.0-5 - License verified - Lint spec diff --git a/SPECS-EXTENDED/tardev-snapshotter/tardev-snapshotter.spec b/SPECS-EXTENDED/tardev-snapshotter/tardev-snapshotter.spec index bce19b03549..f3fc5fe3968 100644 --- a/SPECS-EXTENDED/tardev-snapshotter/tardev-snapshotter.spec +++ b/SPECS-EXTENDED/tardev-snapshotter/tardev-snapshotter.spec @@ -3,7 +3,7 @@ Summary: Tardev Snapshotter for containerd Name: tardev-snapshotter Version: 3.2.0.tardev1 -Release: 4%{?dist} +Release: 5%{?dist} License: ASL 2.0 Group: Tools/Container Vendor: Microsoft Corporation @@ -67,6 +67,9 @@ fi %config(noreplace) %{_unitdir}/%{name}.service %changelog +* Wed Oct 15 2025 Kavya Sree Kaitepalli - 3.2.0.tardev1-5 +- Bump release to rebuild with rust + * Fri Aug 08 2025 Azure Linux Security Servicing Account - 3.2.0.tardev1-4 - Bump release to rebuild with rust diff --git a/SPECS-EXTENDED/trilead-ssh2/0001-Remove-the-dependency-on-google-tink.patch b/SPECS-EXTENDED/trilead-ssh2/0001-Remove-the-dependency-on-google-tink.patch new file mode 100644 index 00000000000..11c9b5f90e4 --- /dev/null +++ b/SPECS-EXTENDED/trilead-ssh2/0001-Remove-the-dependency-on-google-tink.patch @@ -0,0 +1,161 @@ +From 933d197b30e797d4b82eeef1953fd82e617f4cf0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Fridrich=20=C5=A0trba?= +Date: Wed, 13 Mar 2024 07:05:36 +0100 +Subject: [PATCH] Remove the dependency on google tink + +--- + .../ssh2/crypto/dh/Curve25519Exchange.java | 85 ------------------- + .../ssh2/crypto/dh/GenericDhExchange.java | 3 - + .../trilead/ssh2/transport/KexManager.java | 9 +- + 3 files changed, 1 insertion(+), 96 deletions(-) + delete mode 100644 src/com/trilead/ssh2/crypto/dh/Curve25519Exchange.java + +diff --git a/src/com/trilead/ssh2/crypto/dh/Curve25519Exchange.java b/src/com/trilead/ssh2/crypto/dh/Curve25519Exchange.java +deleted file mode 100644 +index 01d4ab4..0000000 +--- a/src/com/trilead/ssh2/crypto/dh/Curve25519Exchange.java ++++ /dev/null +@@ -1,85 +0,0 @@ +-package com.trilead.ssh2.crypto.dh; +- +-import com.google.crypto.tink.subtle.X25519; +- +-import java.io.IOException; +-import java.math.BigInteger; +-import java.security.InvalidKeyException; +- +-/** +- * Created by Kenny Root on 1/23/16. +- */ +-public class Curve25519Exchange extends GenericDhExchange { +- public static final String NAME = "curve25519-sha256"; +- public static final String ALT_NAME = "curve25519-sha256@libssh.org"; +- public static final int KEY_SIZE = 32; +- +- private byte[] clientPublic; +- private byte[] clientPrivate; +- private byte[] serverPublic; +- +- public Curve25519Exchange() { +- super(); +- } +- +- /* +- * Used to test known vectors. +- */ +- public Curve25519Exchange(byte[] secret) throws InvalidKeyException { +- if (secret.length != KEY_SIZE) { +- throw new AssertionError("secret must be key size"); +- } +- clientPrivate = secret.clone(); +- } +- +- @Override +- public void init(String name) throws IOException { +- if (!NAME.equals(name) && !ALT_NAME.equals(name)) { +- throw new IOException("Invalid name " + name); +- } +- +- clientPrivate = X25519.generatePrivateKey(); +- try { +- clientPublic = X25519.publicFromPrivate(clientPrivate); +- } catch (InvalidKeyException e) { +- throw new IOException(e); +- } +- } +- +- @Override +- public byte[] getE() { +- return clientPublic.clone(); +- } +- +- @Override +- protected byte[] getServerE() { +- return serverPublic.clone(); +- } +- +- @Override +- public void setF(byte[] f) throws IOException { +- if (f.length != KEY_SIZE) { +- throw new IOException("Server sent invalid key length " + f.length + " (expected " + +- KEY_SIZE + ")"); +- } +- serverPublic = f.clone(); +- try { +- byte[] sharedSecretBytes = X25519.computeSharedSecret(clientPrivate, serverPublic); +- int allBytes = 0; +- for (int i = 0; i < sharedSecretBytes.length; i++) { +- allBytes |= sharedSecretBytes[i]; +- } +- if (allBytes == 0) { +- throw new IOException("Invalid key computed; all zeroes"); +- } +- sharedSecret = new BigInteger(1, sharedSecretBytes); +- } catch (InvalidKeyException e) { +- throw new IOException(e); +- } +- } +- +- @Override +- public String getHashAlgo() { +- return "SHA-256"; +- } +-} +diff --git a/src/com/trilead/ssh2/crypto/dh/GenericDhExchange.java b/src/com/trilead/ssh2/crypto/dh/GenericDhExchange.java +index c2436e3..a63b9fd 100644 +--- a/src/com/trilead/ssh2/crypto/dh/GenericDhExchange.java ++++ b/src/com/trilead/ssh2/crypto/dh/GenericDhExchange.java +@@ -29,9 +29,6 @@ public abstract class GenericDhExchange + } + + public static GenericDhExchange getInstance(String algo) { +- if (Curve25519Exchange.NAME.equals(algo) || Curve25519Exchange.ALT_NAME.equals(algo)) { +- return new Curve25519Exchange(); +- } + if (algo.startsWith("ecdh-sha2-")) { + return new EcDhExchange(); + } else { +diff --git a/src/com/trilead/ssh2/transport/KexManager.java b/src/com/trilead/ssh2/transport/KexManager.java +index c2ec2b0..2c8056a 100644 +--- a/src/com/trilead/ssh2/transport/KexManager.java ++++ b/src/com/trilead/ssh2/transport/KexManager.java +@@ -17,7 +17,6 @@ import com.trilead.ssh2.crypto.CryptoWishList; + import com.trilead.ssh2.crypto.KeyMaterial; + import com.trilead.ssh2.crypto.cipher.BlockCipher; + import com.trilead.ssh2.crypto.cipher.BlockCipherFactory; +-import com.trilead.ssh2.crypto.dh.Curve25519Exchange; + import com.trilead.ssh2.crypto.dh.DhGroupExchange; + import com.trilead.ssh2.crypto.dh.GenericDhExchange; + import com.trilead.ssh2.crypto.digest.MessageMac; +@@ -397,8 +396,6 @@ public class KexManager implements MessageHandler + + if ("ecdh-sha2-nistp521".equals(algo)) + continue; +- if (Curve25519Exchange.NAME.equals(algo)||Curve25519Exchange.ALT_NAME.equals(algo)) +- continue; + throw new IllegalArgumentException("Unknown kex algorithm '" + algo + "'"); + } + } +@@ -489,8 +486,6 @@ public class KexManager implements MessageHandler + } + + if (kxs.np.kex_algo.equals("diffie-hellman-group1-sha1") +- || kxs.np.kex_algo.equals(Curve25519Exchange.NAME) +- || kxs.np.kex_algo.equals(Curve25519Exchange.ALT_NAME) + || kxs.np.kex_algo.equals("diffie-hellman-group14-sha1") + || kxs.np.kex_algo.equals("ecdh-sha2-nistp521") + || kxs.np.kex_algo.equals("ecdh-sha2-nistp384") +@@ -630,9 +625,7 @@ public class KexManager implements MessageHandler + || kxs.np.kex_algo.equals("diffie-hellman-group14-sha1") + || kxs.np.kex_algo.equals("ecdh-sha2-nistp256") + || kxs.np.kex_algo.equals("ecdh-sha2-nistp384") +- || kxs.np.kex_algo.equals("ecdh-sha2-nistp521") +- || kxs.np.kex_algo.equals(Curve25519Exchange.NAME) +- || kxs.np.kex_algo.equals(Curve25519Exchange.ALT_NAME)) ++ || kxs.np.kex_algo.equals("ecdh-sha2-nistp521")) + { + if (kxs.state == 1) + { +-- +2.44.0 + diff --git a/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2-build.xml b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2-build.xml new file mode 100644 index 00000000000..185ad5ae06e --- /dev/null +++ b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2-build.xml @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.signatures.json b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.signatures.json index f9cd1a1fd34..be189b068a3 100644 --- a/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.signatures.json +++ b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.signatures.json @@ -1,5 +1,6 @@ { "Signatures": { - "trilead-ssh2-build217-jenkins-8.tar.gz": "2ad363bbeba25f4e53c2d7a0c3755ba7e531ef074408a61369a01bf2170076bf" + "build-217-jenkins-371.vc1d30dc5a_b_32.tar.gz": "d073e97c33b20131f15075db4e5a1996e75e9236ee407232997ca8e1a846863f", + "trilead-ssh2-build.xml": "26a06a32d27d8a16982222150a6c8c53eeef63d0ef1d7bd9e2dfdcff35f0b618" } -} +} \ No newline at end of file diff --git a/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.spec b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.spec index 16799356d0f..cb8a1281d3c 100644 --- a/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.spec +++ b/SPECS-EXTENDED/trilead-ssh2/trilead-ssh2.spec @@ -3,7 +3,7 @@ Distribution: Azure Linux # # spec file for package trilead-ssh2 # -# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany. +# Copyright (c) 2024 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -19,18 +19,26 @@ Distribution: Azure Linux %global buildver 217 -%global patchlvl 8 +%global patchlvl 371 +%global githash vc1d30dc5a_b_32 + Name: trilead-ssh2 -Version: %{buildver}.%{patchlvl} -Release: 2%{?dist} +Version: %{buildver}.%{patchlvl}.%{githash} +Release: 1%{?dist} Summary: SSH-2 protocol implementation in pure Java License: BSD-3-Clause AND MIT Group: Development/Libraries/Java URL: https://github.com/jenkinsci/trilead-ssh2 -Source0: https://github.com/jenkinsci/%{name}/archive/%{name}-build%{buildver}-jenkins-%{patchlvl}.tar.gz +Source0: https://github.com/jenkinsci/%{name}/archive/refs/tags/build-%{buildver}-jenkins-%{patchlvl}.%{githash}.tar.gz +Source1: %{name}-build.xml +Patch0: 0001-Remove-the-dependency-on-google-tink.patch +BuildRequires: ant +BuildRequires: ed25519-java BuildRequires: fdupes -BuildRequires: java-devel -BuildRequires: javapackages-local-bootstrap +BuildRequires: java-devel >= 1.9 +BuildRequires: javapackages-local-bootstrap >= 6 +BuildRequires: javapackages-tools +BuildRequires: jbcrypt BuildArch: noarch %description @@ -49,19 +57,21 @@ Group: Documentation/HTML API documentation for %{name}. %prep -%setup -q -n %{name}-%{name}-build%{buildver}-jenkins-%{patchlvl} +%autosetup -n %{name}-build-%{buildver}-jenkins-%{patchlvl}.%{githash} -p1 +cp %{SOURCE1} build.xml + +%pom_remove_dep :tink +%pom_xpath_set pom:project/pom:version "build-%{buildver}-jenkins-%{patchlvl}.%{githash}" %build -mkdir -p build/classes -javac -d build/classes -source 6 -target 6 $(find src -name \*.java | xargs) -(cd build/classes && jar cf ../%{name}-%{version}.jar $(find . -name \*.class)) -mkdir -p build/docs -javadoc -d build/docs -source 6 $(find src -name \*.java | xargs) -Xdoclint:none +mkdir -p lib +build-jar-repository -s lib eddsa jbcrypt +%{ant} package javadoc %install # jars install -d -m 0755 %{buildroot}%{_javadir} -install -m 644 build/%{name}-%{version}.jar %{buildroot}%{_javadir}/%{name}.jar +install -m 644 target/%{name}-*.jar %{buildroot}%{_javadir}/%{name}.jar # pom install -d -m 755 %{buildroot}%{_mavenpomdir} @@ -70,7 +80,9 @@ install -pm 644 pom.xml %{buildroot}%{_mavenpomdir}/%{name}.pom # javadoc install -d -m 755 %{buildroot}%{_javadocdir}/%{name} -cp -aL build/docs/* %{buildroot}%{_javadocdir}/%{name} +cp -aL target/site/apidocs/* %{buildroot}%{_javadocdir}/%{name} +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}/%{_javadocdir}/%{name}/legal/LICENSE . %fdupes -s %{buildroot}%{_javadocdir}/%{name} %files -f .mfiles @@ -79,9 +91,15 @@ cp -aL build/docs/* %{buildroot}%{_javadocdir}/%{name} %files javadoc %license LICENSE.txt +%license LICENSE ADDITIONAL_LICENSE_INFO %{_javadocdir}/%{name} + %changelog +* Fri Dec 19 2025 BinduSri Adabala - 217.371.vc1d30dc5a_b_32-1 +- Upgrade to 217.371.vc1d30dc5a_b_32 +- License verified + * Thu Oct 14 2021 Pawel Winogrodzki - 217.8-2 - Converting the 'Release' tag to the '[number].[distribution]' format. @@ -94,4 +112,4 @@ cp -aL build/docs/* %{buildroot}%{_javadocdir}/%{name} - Fix the license tag and clean up the spec file a bit * Wed Oct 24 2018 Fridrich Strba - Initial packaging built manually without maven. Spec file adapted - from Fedora rpm. + from Fedora rpm. \ No newline at end of file diff --git a/SPECS-EXTENDED/ucx/ucx.signatures.json b/SPECS-EXTENDED/ucx/ucx.signatures.json index bc42282b5a9..8622d28fa0a 100644 --- a/SPECS-EXTENDED/ucx/ucx.signatures.json +++ b/SPECS-EXTENDED/ucx/ucx.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ucx-1.18.0.tar.gz": "C5DDBF6ADE53415CC2402FA540281C92958B67D3B1DD2B4D721D4019616B88D2" + "ucx-1.19.0.tar.gz": "9af07d55281059542f20c5b411db668643543174e51ac71f53f7ac839164f285" } } diff --git a/SPECS-EXTENDED/ucx/ucx.spec b/SPECS-EXTENDED/ucx/ucx.spec index 323df9f82b8..5dc2f2bc58f 100644 --- a/SPECS-EXTENDED/ucx/ucx.spec +++ b/SPECS-EXTENDED/ucx/ucx.spec @@ -10,18 +10,20 @@ %bcond_without xpmem %bcond_with vfs %bcond_with mad +%bcond_with ze %bcond_without mlx5 +%bcond_with efa Summary: UCX is a communication library implementing high-performance messaging Name: ucx -Version: 1.18.0 -Release: 2%{?dist} +Version: 1.19.0 +Release: 1%{?dist} License: BSD Vendor: Microsoft Corporation Distribution: Azure Linux Group: System Environment/Security URL: http://www.openucx.org -Source0: https://github.com/openucx/%{name}/releases/download/v%{version}-rc3/ucx-%{version}.tar.gz +Source0: https://github.com/openucx/%{name}/releases/download/v%{version}/ucx-%{version}.tar.gz # UCX currently supports only the following architectures @@ -50,6 +52,9 @@ BuildRequires: libibverbs-devel %if %{with mlx5} BuildRequires: rdma-core-devel %endif +%if %{with efa} +BuildRequires: rdma-core-devel +%endif %if %{with knem} BuildRequires: knem %endif @@ -119,6 +124,7 @@ Provides header files and examples for developing with UCX. %_with_arg gdrcopy gdrcopy \ %_with_arg ib verbs \ %_with_arg mlx5 mlx5 \ + %_with_arg efa efa \ %_with_arg knem knem \ %_with_arg rdmacm rdmacm \ %_with_arg rocm rocm \ @@ -194,6 +200,9 @@ Provides static libraries required for developing with UCX. %if %{with mlx5} %{_libdir}/pkgconfig/ucx-ib-mlx5.pc %endif +%if %{with efa} +%{_libdir}/pkgconfig/ucx-ib-efa.pc +%endif %if %{with rdmacm} %{_libdir}/pkgconfig/ucx-rdmacm.pc %endif @@ -277,6 +286,19 @@ devices. %{_libdir}/ucx/libuct_ib_mlx5.so.* %endif +%if %{with efa} +%package ib-efa +Requires: %{name}%{?_isa} = %{version}-%{release} +Summary: UCX EFA device RDMA support +Group: System Environment/Libraries + +%description ib-efa +Provides support for EFA device as an IBTA transport for UCX. + +%files ib-efa +%{_libdir}/ucx/libuct_ib_efa.so.* +%endif + %if %{with mad} %package mad Requires: %{name} = %{version}-%{release} @@ -392,7 +414,7 @@ library internals, protocol objects, transports status, and more. %if %{with ze} %package ze Requires: %{name}%{?_isa} = %{version}-%{release} -Summary: UCX Virtual Filesystem support. +Summary: UCX ZE GPU support. Group: System Environment/Libraries %description ze @@ -400,11 +422,15 @@ Provides oneAPI Level Zero (ZE) Runtime support for UCX. %files ze %{_libdir}/ucx/libuct_ze.so.* -%{_bindir}/ucx/libucm_ze.so.* +%{_libdir}/ucx/libucm_ze.so.* %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 1.19.0-1 +- Upgrade version to 1.19.0. +- Update source path + * Fri Jan 31 2025 Alberto David Perez Guevara - 1.18.0-2 - Enable knem and xpmem flags diff --git a/SPECS-EXTENDED/xbean/0001-Remove-unused-import.patch b/SPECS-EXTENDED/xbean/0001-Remove-unused-import.patch deleted file mode 100644 index c0a9a2fe254..00000000000 --- a/SPECS-EXTENDED/xbean/0001-Remove-unused-import.patch +++ /dev/null @@ -1,25 +0,0 @@ -From c7b2913a7d552929a3182901466db69ba40e4340 Mon Sep 17 00:00:00 2001 -From: Mikolaj Izdebski -Date: Thu, 12 Dec 2019 08:51:36 +0100 -Subject: [PATCH 1/3] Remove unused import - ---- - .../java/org/apache/xbean/propertyeditor/PropertyEditors.java | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/xbean-reflect/src/main/java/org/apache/xbean/propertyeditor/PropertyEditors.java b/xbean-reflect/src/main/java/org/apache/xbean/propertyeditor/PropertyEditors.java -index dda3bf4d..5c6285f2 100644 ---- a/xbean-reflect/src/main/java/org/apache/xbean/propertyeditor/PropertyEditors.java -+++ b/xbean-reflect/src/main/java/org/apache/xbean/propertyeditor/PropertyEditors.java -@@ -19,8 +19,6 @@ package org.apache.xbean.propertyeditor; - import java.beans.PropertyEditorManager; - import java.lang.reflect.Type; - --import com.sun.org.apache.regexp.internal.RE; -- - /** - * The property editor manager. This orchestrates Geronimo usage of - * property editors, allowing additional search paths to be added and --- -2.21.0 - diff --git a/SPECS-EXTENDED/xbean/0002-Unbundle-ASM.patch b/SPECS-EXTENDED/xbean/0001-Unbundle-ASM.patch similarity index 100% rename from SPECS-EXTENDED/xbean/0002-Unbundle-ASM.patch rename to SPECS-EXTENDED/xbean/0001-Unbundle-ASM.patch diff --git a/SPECS-EXTENDED/xbean/0003-Remove-dependency-on-log4j-and-commons-logging.patch b/SPECS-EXTENDED/xbean/0002-Remove-dependency-on-log4j-and-commons-logging.patch similarity index 100% rename from SPECS-EXTENDED/xbean/0003-Remove-dependency-on-log4j-and-commons-logging.patch rename to SPECS-EXTENDED/xbean/0002-Remove-dependency-on-log4j-and-commons-logging.patch diff --git a/SPECS-EXTENDED/xbean/build.xml b/SPECS-EXTENDED/xbean/build.xml new file mode 100644 index 00000000000..945965a98dd --- /dev/null +++ b/SPECS-EXTENDED/xbean/build.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xbean/common.xml b/SPECS-EXTENDED/xbean/common.xml new file mode 100644 index 00000000000..40b7773a13c --- /dev/null +++ b/SPECS-EXTENDED/xbean/common.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xbean/downgrading-asm-version.patch b/SPECS-EXTENDED/xbean/downgrading-asm-version.patch deleted file mode 100644 index a27b41837a2..00000000000 --- a/SPECS-EXTENDED/xbean/downgrading-asm-version.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 4385ef6397fab079aad4748d889d87293a41073d Mon Sep 17 00:00:00 2001 -From: Pawel Winogrodzki -Date: Fri, 18 Feb 2022 13:46:20 -0800 -Subject: [PATCH] Downgrading ASM version. - ---- - .../org/apache/xbean/asm9/original/commons/AsmConstants.java | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/xbean-asm-util/src/main/java/org/apache/xbean/asm9/original/commons/AsmConstants.java b/xbean-asm-util/src/main/java/org/apache/xbean/asm9/original/commons/AsmConstants.java -index 313b08f..a2dd2e6 100644 ---- a/xbean-asm-util/src/main/java/org/apache/xbean/asm9/original/commons/AsmConstants.java -+++ b/xbean-asm-util/src/main/java/org/apache/xbean/asm9/original/commons/AsmConstants.java -@@ -19,5 +19,5 @@ package org.apache.xbean.asm9.original.commons; - import org.objectweb.asm.Opcodes; - - public interface AsmConstants { -- int ASM_VERSION = Integer.getInteger(AsmConstants.class.getName() + ".ASM_VERSION", Opcodes.ASM9); -+ int ASM_VERSION = Integer.getInteger(AsmConstants.class.getName() + ".ASM_VERSION", Opcodes.ASM7); - } --- -2.17.1 - diff --git a/SPECS-EXTENDED/xbean/jdk-11-fix.patch b/SPECS-EXTENDED/xbean/jdk-11-fix.patch deleted file mode 100644 index 8fa5604738c..00000000000 --- a/SPECS-EXTENDED/xbean/jdk-11-fix.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 92c820748ea53dbf7c0250e8ff7798bb846eeafc Mon Sep 17 00:00:00 2001 -From: Pawel Winogrodzki -Date: Fri, 18 Feb 2022 14:09:27 -0800 -Subject: [PATCH] JDK 11 fix. - ---- - .../java/org/apache/xbean/classpath/SunURLClassPath.java | 6 +++--- - .../java/org/apache/xbean/classpath/SystemClassPath.java | 2 +- - .../java/org/apache/xbean/classpath/TomcatClassPath.java | 2 +- - 3 files changed, 5 insertions(+), 5 deletions(-) - -diff --git a/xbean-classpath/src/main/java/org/apache/xbean/classpath/SunURLClassPath.java b/xbean-classpath/src/main/java/org/apache/xbean/classpath/SunURLClassPath.java -index 99fe014..24d0780 100644 ---- a/xbean-classpath/src/main/java/org/apache/xbean/classpath/SunURLClassPath.java -+++ b/xbean-classpath/src/main/java/org/apache/xbean/classpath/SunURLClassPath.java -@@ -49,15 +49,15 @@ public abstract class SunURLClassPath implements ClassPath { - jars[j] = new File(dir, jarNames[j]).toURI().toURL(); - } - -- sun.misc.URLClassPath path = getURLClassPath(loader); -+ jdk.internal.loader.URLClassPath path = getURLClassPath(loader); - for (int i = 0; i < jars.length; i++) { - //System.out.println("URL "+jars[i]); - path.addURL(jars[i]); - } - } - -- protected sun.misc.URLClassPath getURLClassPath(URLClassLoader loader) throws Exception { -- return (sun.misc.URLClassPath) getUcpField().get(loader); -+ protected jdk.internal.loader.URLClassPath getURLClassPath(URLClassLoader loader) throws Exception { -+ return (jdk.internal.loader.URLClassPath) getUcpField().get(loader); - } - - private java.lang.reflect.Field getUcpField() throws Exception { -diff --git a/xbean-classpath/src/main/java/org/apache/xbean/classpath/SystemClassPath.java b/xbean-classpath/src/main/java/org/apache/xbean/classpath/SystemClassPath.java -index e8ba28b..9e387ee 100644 ---- a/xbean-classpath/src/main/java/org/apache/xbean/classpath/SystemClassPath.java -+++ b/xbean-classpath/src/main/java/org/apache/xbean/classpath/SystemClassPath.java -@@ -51,7 +51,7 @@ public class SystemClassPath extends SunURLClassPath { - } - - private void rebuildJavaClassPathVariable() throws Exception { -- sun.misc.URLClassPath cp = getURLClassPath(getSystemLoader()); -+ jdk.internal.loader.URLClassPath cp = getURLClassPath(getSystemLoader()); - URL[] urls = cp.getURLs(); - //for (int i=0; i < urls.length; i++){ - // System.out.println(urls[i].toExternalForm()); -diff --git a/xbean-classpath/src/main/java/org/apache/xbean/classpath/TomcatClassPath.java b/xbean-classpath/src/main/java/org/apache/xbean/classpath/TomcatClassPath.java -index 3fe31a5..d099f95 100644 ---- a/xbean-classpath/src/main/java/org/apache/xbean/classpath/TomcatClassPath.java -+++ b/xbean-classpath/src/main/java/org/apache/xbean/classpath/TomcatClassPath.java -@@ -102,7 +102,7 @@ public class TomcatClassPath extends SunURLClassPath { - - protected void rebuild() { - try { -- sun.misc.URLClassPath cp = getURLClassPath((URLClassLoader) getClassLoader()); -+ jdk.internal.loader.URLClassPath cp = getURLClassPath((URLClassLoader) getClassLoader()); - URL[] urls = cp.getURLs(); - //for (int i=0; i < urls.length; i++){ - // System.out.println(urls[i].toExternalForm()); --- -2.17.1 - diff --git a/SPECS-EXTENDED/xbean/xbean-asm-util-build.xml b/SPECS-EXTENDED/xbean/xbean-asm-util-build.xml new file mode 100644 index 00000000000..fa8d2ff597e --- /dev/null +++ b/SPECS-EXTENDED/xbean/xbean-asm-util-build.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xbean/xbean-finder-build.xml b/SPECS-EXTENDED/xbean/xbean-finder-build.xml new file mode 100644 index 00000000000..d446c49dac3 --- /dev/null +++ b/SPECS-EXTENDED/xbean/xbean-finder-build.xml @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xbean/xbean-reflect-build.xml b/SPECS-EXTENDED/xbean/xbean-reflect-build.xml new file mode 100644 index 00000000000..96f73ac5947 --- /dev/null +++ b/SPECS-EXTENDED/xbean/xbean-reflect-build.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xbean/xbean.signatures.json b/SPECS-EXTENDED/xbean/xbean.signatures.json index 59c36392159..bcb59d107f1 100644 --- a/SPECS-EXTENDED/xbean/xbean.signatures.json +++ b/SPECS-EXTENDED/xbean/xbean.signatures.json @@ -1,5 +1,10 @@ { "Signatures": { - "xbean-4.18-source-release.zip": "c23355e3584961addb3d04ba3777e70314d37e3cf265992a961974f1eb762e50" + "build.xml": "ea7d1e03fddfd101b70c2440cae7d62a9638e32a8b7f7a76aefdd4fd185cbdf4", + "common.xml": "66d29c5b2bebc9226ff5073bb3ec655fc1cba5a49dca4a8d8f60fc56fa1f9196", + "xbean-4.24-source-release.zip": "e4b64b40641ffe64eb5d27361addf132ab0db5d712da9e4b67c7a71b78be2b5b", + "xbean-asm-util-build.xml": "c755f2810f93664211bcccb2863ead961d43165f39ec9c3821a27fcaa8b2e5e6", + "xbean-finder-build.xml": "12aa5696fc9dd8694c219432155ab3a7cd11b0bf78b4052577812447f32b43cd", + "xbean-reflect-build.xml": "c7fcd36cd6db1098e0dd9ace2a44efcd6a572fd7d61b05d30f83944b9d951b6a" } } diff --git a/SPECS-EXTENDED/xbean/xbean.spec b/SPECS-EXTENDED/xbean/xbean.spec index d7e2ef0f97b..56a373d1f59 100644 --- a/SPECS-EXTENDED/xbean/xbean.spec +++ b/SPECS-EXTENDED/xbean/xbean.spec @@ -3,7 +3,7 @@ Distribution: Azure Linux # # spec file for package xbean # -# Copyright (c) 2020 SUSE LLC +# Copyright (c) 2024 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -19,34 +19,31 @@ Distribution: Azure Linux Name: xbean -Version: 4.18 +Version: 4.24 Release: 1%{?dist} Summary: Java plugin based web server License: ASL 2.0 Group: Development/Libraries/Java URL: https://geronimo.apache.org/xbean/ -Source0: http://repo2.maven.org/maven2/org/apache/%{name}/%{name}/%{version}/%{name}-%{version}-source-release.zip -Patch1: 0001-Remove-unused-import.patch -# Fix dependency on xbean-asm4-shaded to original objectweb-asm -Patch2: 0002-Unbundle-ASM.patch -Patch3: 0003-Remove-dependency-on-log4j-and-commons-logging.patch -Patch4: downgrading-asm-version.patch -Patch5: jdk-11-fix.patch - +Source0: https://repo1.maven.org/maven2/org/apache/%{name}/%{name}/%{version}/%{name}-%{version}-source-release.zip +Source1: build.xml +Source2: common.xml +Source3: xbean-asm-util-build.xml +Source4: xbean-finder-build.xml +Source5: xbean-reflect-build.xml +Patch0: 0001-Unbundle-ASM.patch +Patch1: 0002-Remove-dependency-on-log4j-and-commons-logging.patch +BuildRequires: ant +BuildRequires: ant-junit +BuildRequires: junit BuildRequires: fdupes BuildRequires: java-devel >= 1.8 -BuildRequires: javapackages-local-bootstrap -BuildRequires: objectweb-asm >= 5 +BuildRequires: javapackages-local-bootstrap >= 6 +BuildRequires: javapackages-tools +BuildRequires: objectweb-asm >= 9 BuildRequires: slf4j BuildRequires: unzip -# The code uses sun.misc.URLClassloader -BuildConflicts: java-devel >= 9 -BuildConflicts: java-headless >= 9 -# Avoid build cycles -BuildConflicts: java-devel-openj9 -BuildConflicts: java-headless-openj9 -Requires: objectweb-asm >= 5 -Requires: slf4j +BuildRequires: xml-commons-apis BuildArch: noarch %description @@ -65,101 +62,82 @@ Group: Documentation/HTML This package provides API documentation for xbean. %prep -%setup -q -# build failing on this due to doxia-sitetools problems -rm src/site/site.xml - -%patch 1 -p1 -%patch 2 -p1 -%patch 3 -p1 -%patch 4 -p1 -%patch 5 -p1 - -%pom_remove_parent -%pom_remove_dep mx4j:mx4j - -%pom_remove_dep -r :xbean-finder-shaded -%pom_disable_module xbean-finder-shaded +%autosetup -p1 -%pom_xpath_remove pom:scope xbean-asm-util -%pom_xpath_remove pom:optional xbean-asm-util +mkdir -p xbean-asm-util xbean-finder xbean-reflect +cp -f %{SOURCE1} . +cp -f %{SOURCE2} . +cp -f %{SOURCE3} xbean-asm-util/build.xml +cp -f %{SOURCE4} xbean-finder/build.xml +cp -f %{SOURCE5} xbean-reflect/build.xml +cp xbean-asm-util/src/main/java/org/apache/xbean/asm9/original/commons/AsmConstants.java xbean-reflect/src/main/java/org/apache/xbean/recipe/ -# Prevent modules depending on springframework from building. -%pom_remove_dep org.springframework: %pom_disable_module xbean-classloader -%pom_disable_module xbean-spring -%pom_disable_module maven-xbean-plugin -rm -rf maven-xbean-plugin -# blueprint FTBFS, disable for now -%pom_disable_module xbean-blueprint - -%pom_remove_dep :xbean-bundleutils xbean-finder -rm -r xbean-finder/src/main/java/org/apache/xbean/finder{,/archive}/Bundle* +%pom_disable_module xbean-classpath %pom_disable_module xbean-bundleutils - +%pom_disable_module xbean-asm9-shaded +%pom_disable_module xbean-finder-shaded +%pom_disable_module xbean-naming +%pom_disable_module xbean-blueprint +%pom_disable_module xbean-spring %pom_disable_module xbean-telnet +%pom_disable_module maven-xbean-plugin -# maven-xbean-plugin invocation makes no sense as there are no namespaces -%pom_remove_plugin :maven-xbean-plugin xbean-classloader - -# As auditing tool RAT is useful for upstream only. -%pom_remove_plugin :apache-rat-plugin - -# disable copy of internal aries-blueprint -sed -i "s|||" xbean-blueprint/pom.xml - -%pom_change_dep -r -f ::::: ::::: - -# Removing dependency on Apache commons logging %pom_remove_dep :commons-logging-api xbean-reflect -find -name CommonsLoggingConverter.java -delete - -# Removing dependency on log4j. %pom_remove_dep :log4j xbean-reflect +%pom_remove_dep :xbean-asm9-shaded xbean-reflect +find -name CommonsLoggingConverter.java -delete find -name Log4jConverter.java -delete +# Plugins useful for upstream only +%pom_remove_plugin :apache-rat-plugin +%pom_remove_plugin :maven-source-plugin + +%pom_remove_dep :xbean-bundleutils xbean-finder +%pom_remove_dep org.osgi:org.osgi.core xbean-finder +rm -r xbean-finder/src/main/java/org/apache/xbean/finder{,/archive}/Bundle* %build -for i in xbean-asm-util xbean-classpath xbean-finder xbean-naming xbean-reflect; do - pushd $i - mkdir -p build/classes - javac -d build/classes -encoding utf-8 -source 6 -target 6 \ - -cp $(build-classpath commons-logging-api slf4j/api objectweb-asm/asm objectweb-asm/asm-commons):../xbean-asm-util/xbean-asm-util.jar \ - $(find src/main/java -name *.java) - jar cf $i.jar -C build/classes . - popd -done -mkdir -p build/apidoc -javadoc -d build/apidoc -source 6 -encoding utf-8 \ - -Xdoclint:none \ - -classpath $(build-classpath commons-logging-api slf4j/api objectweb-asm/asm objectweb-asm/asm-commons) \ - $(for i in xbean-asm-util xbean-classpath xbean-finder xbean-naming xbean-reflect; do find $i/src/main/java -name *.java; done | xargs) +mkdir -p lib +build-jar-repository -s lib objectweb-asm slf4j +%{ant} package javadoc %install -# jars && poms +# jars install -dm 755 %{buildroot}%{_javadir}/%{name} +for i in xbean-asm-util xbean-finder xbean-reflect; do + install -m 0644 ${i}/target/${i}-%{version}.jar %{buildroot}%{_javadir}/%{name}/${i}.jar +done + +# poms install -dm 755 %{buildroot}%{_mavenpomdir}/%{name} -for i in xbean-asm-util xbean-classpath xbean-finder xbean-naming xbean-reflect; do - install -m 0644 $i/$i.jar %{buildroot}%{_javadir}/%{name} - %pom_remove_parent ${i} - %pom_xpath_inject pom:project "org.apache.xbean%{version}" ${i} - install -m 0644 $i/pom.xml %{buildroot}%{_mavenpomdir}/%{name}/$i.pom - %add_maven_depmap %{name}/$i.pom %{name}/$i.jar +for i in xbean-asm-util xbean-finder xbean-reflect; do + install -pm 644 ${i}/pom.xml %{buildroot}%{_mavenpomdir}/%{name}/${i}.pom + %add_maven_depmap %{name}/${i}.pom %{name}/${i}.jar done # javadoc install -dm 755 %{buildroot}/%{_javadocdir}/%{name} -cp -aL build/apidoc/* %{buildroot}/%{_javadocdir}/%{name} +for i in xbean-asm-util xbean-finder xbean-reflect; do + cp -r ${i}/target/site/apidocs %{buildroot}/%{_javadocdir}/%{name}/${i} + mv %{buildroot}/%{_javadocdir}/%{name}/${i}/legal/ADDITIONAL_LICENSE_INFO . + mv %{buildroot}/%{_javadocdir}/%{name}/${i}/legal/LICENSE . +done %fdupes -s %{buildroot}/%{_javadocdir}/%{name} %files -f .mfiles %license LICENSE -%doc NOTICE +%license NOTICE %files javadoc +%license LICENSE +%license ADDITIONAL_LICENSE_INFO %{_javadocdir}/%{name} %changelog +* Mon Dec 08 2025 Aditya Singh - 4.24-1 +- Upgrade to version 4.24. +- License verified. + * Mon Jan 31 2022 Pawel Winogrodzki - 4.18-1 - Updating to version 4.18. - Removing dependency on "log4j12". diff --git a/SPECS-EXTENDED/xmldb-api/build.xml b/SPECS-EXTENDED/xmldb-api/build.xml new file mode 100644 index 00000000000..a25d4e7189e --- /dev/null +++ b/SPECS-EXTENDED/xmldb-api/build.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SPECS-EXTENDED/xmldb-api/xmldb-api.signatures.json b/SPECS-EXTENDED/xmldb-api/xmldb-api.signatures.json index 78f8b2e9c10..5735137371b 100644 --- a/SPECS-EXTENDED/xmldb-api/xmldb-api.signatures.json +++ b/SPECS-EXTENDED/xmldb-api/xmldb-api.signatures.json @@ -1,5 +1,6 @@ { "Signatures": { - "xmldb-xapi-20041010-src.tar.bz2": "cb610fef52281e746114c46de19a60316200cea138d567ee9d1815bbfa955a31" + "build.xml": "a4bfc1f1d751d47028bf586557affdbf8aee19f34fd726385b71fcc76799695f", + "xmldb-api-1.7.tar.gz": "1a9fdcd3e854092efaf3718ceb71249afb1ffc4f80e65a36f113e3e0eee90f08" } } diff --git a/SPECS-EXTENDED/xmldb-api/xmldb-api.spec b/SPECS-EXTENDED/xmldb-api/xmldb-api.spec index b25e9a99dac..7acfb2d894f 100644 --- a/SPECS-EXTENDED/xmldb-api/xmldb-api.spec +++ b/SPECS-EXTENDED/xmldb-api/xmldb-api.spec @@ -3,7 +3,7 @@ Distribution: Azure Linux # # spec file for package xmldb-api # -# Copyright (c) 2017 SUSE LINUX GmbH, Nuernberg, Germany. +# Copyright (c) 2024 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -14,28 +14,24 @@ Distribution: Azure Linux # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. -# Please submit bugfixes or comments via http://bugs.opensuse.org/ +# Please submit bugfixes or comments via https://bugs.opensuse.org/ # - %define bname xmldb -%define cvs_version 20041010 +%global gh_version 1.7 Name: xmldb-api -Version: 0.1 -Release: 29%{?dist} +Version: %{gh_version}.0 +Release: 1%{?dist} Summary: XML:DB API for Java License: Apache-1.1 -Url: http://xmldb-org.sourceforge.net -# cvs -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/xmldb-org login -# cvs -z3 -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/xmldb-org export -D 2004-10-10 xapi -Source0: %{_distro_sources_url}/xmldb-xapi-%{cvs_version}-src.tar.bz2 -Patch0: xmldb-api-java5-enum.patch +Group: Development/Libraries/Java +URL: https://github.com/xmldb-org/%{name} +Source0: %{url}/archive/%{name}-%{gh_version}.tar.gz +Source1: build.xml BuildRequires: ant >= 1.6 BuildRequires: javapackages-tools -BuildRequires: junit BuildRequires: xalan-j2 Requires: xalan-j2 -BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildArch: noarch %description @@ -67,17 +63,6 @@ test cases that can be used to help validate the driver while it is being developed. The test cases are still in development but there are enough tests currently to be useful. -%package -n xmldb-common -Summary: XML:DB API for Java -Group: Development/Libraries/Java -Requires: %{name} = %{version} - -%description -n xmldb-common -The API interfaces are what driver developers must implement when -creating a new driver, and are the interfaces that applications are -developed against. Along with the interfaces, a concrete DriverManager -implementation is also provided. - %package javadoc Summary: Documentation for XML:DB API for Java Group: Documentation/HTML @@ -89,39 +74,33 @@ developed against. Along with the interfaces, a concrete DriverManager implementation is also provided. %prep -%setup -q -n xapi -%patch 0 -p1 -find . -name "*.jar" | xargs -t rm -# FIXME: (dwalluck): These use org.apache.xalan.xpath -rm src/common/org/xmldb/common/xml/queries/xalan/XPathQueryImpl.java -rm src/common/org/xmldb/common/xml/queries/xalan/XObjectImpl.java -rm src/common/org/xmldb/common/xml/queries/xalan/XPathQueryFactoryImpl.java -rm src/common/org/xmldb/common/xml/queries/xt/XPathQueryImpl.java -rm src/common/org/xmldb/common/xml/queries/xt/XPathQueryFactoryImpl.java +%autosetup -n %{name}-%{name}-%{gh_version} +cp -f %{SOURCE1} build.xml %build -export CLASSPATH=$(build-classpath junit xalan-j2) +export CLASSPATH=$(build-classpath xalan-j2) export OPT_JAR_LIST=: ant \ - -Dant.build.javac.source=1.6 -Dant.build.javac.target=1.6 \ + -Dant.build.javac.source=1.8 -Dant.build.javac.target=1.8 \ -Djarname=%{name} -Dsdk.jarname=%{name}-sdk \ - dist + dist javadoc %install # jars install -d -m 755 %{buildroot}%{_javadir} install -m 644 dist/xmldb/%{name}.jar %{buildroot}%{_javadir}/%{name}-%{version}.jar install -m 644 dist/xmldb/%{name}-sdk.jar %{buildroot}%{_javadir}/%{name}-sdk-%{version}.jar -install -m 644 dist/xmldb/%{bname}-common.jar %{buildroot}%{_javadir}/%{bname}-common-%{version}.jar (cd %{buildroot}%{_javadir} && for jar in *-%{version}*; do ln -sf ${jar} ${jar/-%{version}/}; done) # javadoc install -d -m 755 %{buildroot}%{_javadocdir}/%{name} cp -pr src/build/javadoc/full/* %{buildroot}%{_javadocdir}/%{name} +mv %{buildroot}%{_javadocdir}/%{name}/legal/ADDITIONAL_LICENSE_INFO . +mv %{buildroot}%{_javadocdir}/%{name}/legal/LICENSE . %files %defattr(0644,root,root,0755) -%license src/LICENSE -%doc src/{AUTHORS,README,config.xml} +%license LICENSE +%doc {AUTHORS,README.adoc} %{_javadir}/%{name}-%{version}.jar %{_javadir}/%{name}.jar @@ -130,16 +109,17 @@ cp -pr src/build/javadoc/full/* %{buildroot}%{_javadocdir}/%{name} %{_javadir}/%{name}-sdk-%{version}.jar %{_javadir}/%{name}-sdk.jar -%files -n %{bname}-common -%defattr(0644,root,root,0755) -%{_javadir}/%{bname}-common-%{version}.jar -%{_javadir}/%{bname}-common.jar - %files javadoc %defattr(0644,root,root,0755) %{_javadocdir}/%{name} +%license ADDITIONAL_LICENSE_INFO +%license LICENSE %changelog +* Fri Dec 12 2025 Aditya Singh - 1.7.0-1 +- Upgrade to version 1.7.0 +- License verified. + * Thu Feb 22 2024 Pawel Winogrodzki - 0.1-29 - Updating naming for 3.0 version of Azure Linux. diff --git a/SPECS-SIGNED/edk2-hvloader-signed/edk2-hvloader-signed.spec b/SPECS-SIGNED/edk2-hvloader-signed/edk2-hvloader-signed.spec index fe13c511d2c..bd1f2ba18e8 100644 --- a/SPECS-SIGNED/edk2-hvloader-signed/edk2-hvloader-signed.spec +++ b/SPECS-SIGNED/edk2-hvloader-signed/edk2-hvloader-signed.spec @@ -11,7 +11,7 @@ Summary: Signed HvLoader.efi for %{buildarch} systems Name: edk2-hvloader-signed-%{buildarch} Version: %{GITDATE}git%{GITCOMMIT} -Release: 10%{?dist} +Release: 14%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -74,6 +74,18 @@ popd /boot/efi/HvLoader.efi %changelog +* Tue Feb 03 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-14 +- Bump release for consistency with edk2 spec. + +* Sun Feb 01 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-13 +- Bump release for consistency with edk2 spec. + +* Fri Jan 30 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-12 +- Bump release for consistency with edk2 spec. + +* Thu Dec 18 2025 Jyoti kanase - 20240524git3e722403cd16-11 +- Bump release for consistency with edk2 spec. + * Fri Oct 03 2025 Azure Linux Security Servicing Account - 20240524git3e722403cd16-10 - Bump release for consistency with edk2 spec. diff --git a/SPECS-SIGNED/fwctl-hwe-signed/fwctl-hwe-signed.spec b/SPECS-SIGNED/fwctl-hwe-signed/fwctl-hwe-signed.spec deleted file mode 100644 index 48f07e28156..00000000000 --- a/SPECS-SIGNED/fwctl-hwe-signed/fwctl-hwe-signed.spec +++ /dev/null @@ -1,196 +0,0 @@ -# -# Copyright (c) 2024 Nvidia Inc. All rights reserved. -# -# This software is available to you under a choice of one of two -# licenses. You may choose to be licensed under the terms of the GNU -# General Public License (GPL) Version 2, available from the file -# COPYING in the main directory of this source tree, or the -# OpenIB.org BSD license below: -# -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the following -# conditions are met: -# -# - Redistributions of source code must retain the above -# copyright notice, this list of conditions and the following -# disclaimer. -# -# - Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials -# provided with the distribution. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# - -%global debug_package %{nil} -# The default %%__os_install_post macro ends up stripping the signatures off of the kernel module. -%define __os_install_post %{__os_install_post_leave_signatures} %{nil} - -# hard code versions due to ADO bug:58993948 -%global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 -%global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} -%global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} - -%global KVERSION %{target_kernel_version_full} - -%{!?_name: %define _name fwctl-hwe} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} - -Summary: %{_name} Driver -Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} -License: GPLv2 -Url: http://nvidia.com -Group: System Environment/Base - -# -# To populate these sources: -# 1. Build the unsigned packages as normal -# 2. Sign the desired binary -# 3. Place the unsigned package and signed binary in this spec's folder -# 4. Build this spec - -Source0: %{_name}-%{version}-%{release}.%{_arch}.rpm -Source1: fwctl.ko -Source2: mlx5_fwctl.ko - -Vendor: Microsoft Corporation -Distribution: Azure Linux -ExclusiveArch: aarch64 - -%description -fwctl signed kernel modules - -%package -n %{_name} -Summary: %{summary} -Requires: mlnx-ofa_kernel -Requires: mlnx-ofa_kernel-hwe-modules = %{_mofed_full_version} -Requires: kernel-hwe = %{target_kernel_version_full} -Requires: kmod -Conflicts: fwctl - -%description -n %{_name} -%{description} - -%prep - -%build -mkdir rpm_contents -pushd rpm_contents - -# This spec's whole purpose is to inject the signed modules -rpm2cpio %{SOURCE0} | cpio -idmv - -cp -rf %{SOURCE1} ./lib/modules/%{KVERSION}/updates/%{_name}/fwctl.ko -cp -rf %{SOURCE2} ./lib/modules/%{KVERSION}/updates/%{_name}/mlx5/mlx5_fwctl.ko - -popd - -%install -pushd rpm_contents - -# Don't use * wildcard. It does not copy over hidden files in the root folder... -cp -rp ./. %{buildroot}/ - -popd - -%post -n %{_name} -if [ $1 -ge 1 ]; then # 1 : This package is being installed or reinstalled - /sbin/depmod %{KVERSION} -fi # 1 : closed -# END of post - -%postun -n %{_name} -/sbin/depmod %{KVERSION} - -%files -n %{_name} -%defattr(-,root,root,-) -%license %{_datadir}/licenses/%{_name}/copyright -/lib/modules/%{KVERSION}/updates/ -%config(noreplace) %{_sysconfdir}/depmod.d/zz02-fwctl-*.conf - -%changelog -* Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 -- Bump to match kernel-hwe - -* Fri Oct 10 2025 Pawel Winogrodzki - 24.10-23_6.12.50.2-1 -- Adjusted package dependencies on user space components. - -* Fri Oct 06 2025 Siddharth Chintamaneni - 24.10-22_6.12.50.2-1 -- Bump to match kernel-hwe -- Fix signed spec for -hwe variant - -* Fri Sep 12 2025 Rachel Menge - 24.10-21 -- Bump to match kernel-hwe - -* Mon Sep 08 2025 Elaheh Dehghani - 24.10-20 -- Build using kernel-hwe for aarch64 architecture - -* Fri May 23 2025 CBL-Mariner Servicing Account - 24.10-19 -- Bump release to rebuild for new kernel release - -* Tue May 13 2025 Siddharth Chintamaneni - 24.10-18 -- Bump release to rebuild for new kernel release - -* Tue Apr 29 2025 Siddharth Chintamaneni - 24.10-17 -- Bump release to rebuild for new kernel release - -* Fri Apr 25 2025 Chris Co - 24.10-16 -- Bump release to rebuild for new kernel release - -* Tue Apr 08 2025 Pawel Winogrodzki - 24.10-15 -- Re-naming the package to de-duplicate the SRPM name. - -* Sat Apr 05 2025 CBL-Mariner Servicing Account - 24.10-14 -- Bump release to rebuild for new kernel release - -* Fri Mar 14 2025 CBL-Mariner Servicing Account - 24.10-13 -- Bump release to rebuild for new kernel release - -* Tue Mar 11 2025 CBL-Mariner Servicing Account - 24.10-12 -- Bump release to rebuild for new kernel release - -* Mon Mar 10 2025 Chris Co - 24.10-11 -- Bump release to rebuild for new kernel release - -* Wed Mar 05 2025 Rachel Menge - 24.10-10 -- Bump release to rebuild for new kernel release - -* Tue Mar 04 2025 Rachel Menge - 24.10-9 -- Bump release to rebuild for new kernel release - -* Wed Feb 19 2025 Chris Co - 24.10-8 -- Bump release to rebuild for new kernel release - -* Tue Feb 11 2025 Rachel Menge - 24.10-7 -- Bump release to rebuild for new kernel release - -* Wed Feb 05 2025 Tobias Brick - 24.10-6 -- Bump release to rebuild for new kernel release - -* Tue Feb 04 2025 Alberto David Perez Guevara - 24.10-5 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-4 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-3 -- Bump release to match kernel - -* Thu Jan 30 2025 Rachel Menge - 24.10-2 -- Bump release to match kernel - -* Sat Jan 18 2025 Binu Jose Philip - 24.10-1 -- Creating signed spec -- Initial Azure Linux import from NVIDIA (license: GPLv2) -- License verified diff --git a/SPECS-SIGNED/fwctl-signed/fwctl-signed.spec b/SPECS-SIGNED/fwctl-signed/fwctl-signed.spec deleted file mode 100644 index 82b30535089..00000000000 --- a/SPECS-SIGNED/fwctl-signed/fwctl-signed.spec +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright (c) 2024 Nvidia Inc. All rights reserved. -# -# This software is available to you under a choice of one of two -# licenses. You may choose to be licensed under the terms of the GNU -# General Public License (GPL) Version 2, available from the file -# COPYING in the main directory of this source tree, or the -# OpenIB.org BSD license below: -# -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the following -# conditions are met: -# -# - Redistributions of source code must retain the above -# copyright notice, this list of conditions and the following -# disclaimer. -# -# - Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials -# provided with the distribution. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# - -%global debug_package %{nil} -# The default %%__os_install_post macro ends up stripping the signatures off of the kernel module. -%define __os_install_post %{__os_install_post_leave_signatures} %{nil} - -%global target_kernel_version_full %(/bin/rpm -q --queryformat '%{RPMTAG_VERSION}-%{RPMTAG_RELEASE}' $(/bin/rpm -q --whatprovides kernel-headers)) -%global target_azl_build_kernel_version %(/bin/rpm -q --queryformat '%{RPMTAG_VERSION}' $(/bin/rpm -q --whatprovides kernel-headers)) -%global target_kernel_release %(/bin/rpm -q --queryformat '%{RPMTAG_RELEASE}' $(/bin/rpm -q --whatprovides kernel-headers) | /bin/cut -d . -f 1) -%global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} - -%global KVERSION %{target_kernel_version_full} - -%{!?_name: %define _name fwctl} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} - -Summary: %{_name} Driver -Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} -License: GPLv2 -Url: http://nvidia.com -Group: System Environment/Base - -# -# To populate these sources: -# 1. Build the unsigned packages as normal -# 2. Sign the desired binary -# 3. Place the unsigned package and signed binary in this spec's folder -# 4. Build this spec - -Source0: %{_name}-%{version}-%{release}.%{_arch}.rpm -Source1: fwctl.ko -Source2: mlx5_fwctl.ko - -Vendor: Microsoft Corporation -Distribution: Azure Linux -ExclusiveArch: x86_64 - -%description -fwctl signed kernel modules - -%package -n %{_name} -Summary: %{summary} -Requires: mlnx-ofa_kernel = %{_mofed_full_version} -Requires: mlnx-ofa_kernel-modules = %{_mofed_full_version} -Requires: kernel = %{target_kernel_version_full} -Requires: kmod - -%description -n %{_name} -%{description} - -%prep - -%build -mkdir rpm_contents -pushd rpm_contents - -# This spec's whole purpose is to inject the signed modules -rpm2cpio %{SOURCE0} | cpio -idmv - -cp -rf %{SOURCE1} ./lib/modules/%{KVERSION}/updates/fwctl/fwctl.ko -cp -rf %{SOURCE2} ./lib/modules/%{KVERSION}/updates/fwctl/mlx5/mlx5_fwctl.ko - -popd - -%install -pushd rpm_contents - -# Don't use * wildcard. It does not copy over hidden files in the root folder... -cp -rp ./. %{buildroot}/ - -popd - -%post -n %{_name} -if [ $1 -ge 1 ]; then # 1 : This package is being installed or reinstalled - /sbin/depmod %{KVERSION} -fi # 1 : closed -# END of post - -%postun -n %{_name} -/sbin/depmod %{KVERSION} - -%files -n %{_name} -%defattr(-,root,root,-) -%license %{_datadir}/licenses/%{_name}/copyright -/lib/modules/%{KVERSION}/updates/ -%config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*.conf - - -%changelog -* Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 -- Bump mofed release number - -* Thu May 29 2025 Nicolas Guibourge - 24.10-20 -- Add kernel version and release nb into release nb - -* Fri May 23 2025 CBL-Mariner Servicing Account - 24.10-19 -- Bump release to rebuild for new kernel release - -* Tue May 13 2025 Siddharth Chintamaneni - 24.10-18 -- Bump release to rebuild for new kernel release - -* Tue Apr 29 2025 Siddharth Chintamaneni - 24.10-17 -- Bump release to rebuild for new kernel release - -* Fri Apr 25 2025 Chris Co - 24.10-16 -- Bump release to rebuild for new kernel release - -* Tue Apr 08 2025 Pawel Winogrodzki - 24.10-15 -- Re-naming the package to de-duplicate the SRPM name. - -* Sat Apr 05 2025 CBL-Mariner Servicing Account - 24.10-14 -- Bump release to rebuild for new kernel release - -* Fri Mar 14 2025 CBL-Mariner Servicing Account - 24.10-13 -- Bump release to rebuild for new kernel release - -* Tue Mar 11 2025 CBL-Mariner Servicing Account - 24.10-12 -- Bump release to rebuild for new kernel release - -* Mon Mar 10 2025 Chris Co - 24.10-11 -- Bump release to rebuild for new kernel release - -* Wed Mar 05 2025 Rachel Menge - 24.10-10 -- Bump release to rebuild for new kernel release - -* Tue Mar 04 2025 Rachel Menge - 24.10-9 -- Bump release to rebuild for new kernel release - -* Wed Feb 19 2025 Chris Co - 24.10-8 -- Bump release to rebuild for new kernel release - -* Tue Feb 11 2025 Rachel Menge - 24.10-7 -- Bump release to rebuild for new kernel release - -* Wed Feb 05 2025 Tobias Brick - 24.10-6 -- Bump release to rebuild for new kernel release - -* Tue Feb 04 2025 Alberto David Perez Guevara - 24.10-5 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-4 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-3 -- Bump release to match kernel - -* Thu Jan 30 2025 Rachel Menge - 24.10-2 -- Bump release to match kernel - -* Sat Jan 18 2025 Binu Jose Philip - 24.10-1 -- Creating signed spec -- Initial Azure Linux import from NVIDIA (license: GPLv2) -- License verified diff --git a/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec b/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec index ac87321505d..fe4b56ba482 100644 --- a/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec +++ b/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec @@ -13,7 +13,7 @@ Summary: Signed GRand Unified Bootloader for %{buildarch} systems Name: grub2-efi-binary-signed-%{buildarch} Version: 2.06 -Release: 25%{?dist} +Release: 26%{?dist} License: GPLv3+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -84,6 +84,9 @@ cp %{SOURCE3} %{buildroot}/boot/efi/EFI/%{efidir}/%{grubpxeefiname} /boot/efi/EFI/%{efidir}/%{grubpxeefiname} %changelog +* Mon Nov 24 2025 Akhila Guruju - 2.06-26 +- Bump release number to match grub release + * Tue Jun 17 2025 Kshitiz Godara - 2.06-25 - Bump release number to match grub release diff --git a/SPECS-SIGNED/iser-hwe-signed/iser-hwe-signed.spec b/SPECS-SIGNED/iser-hwe-signed/iser-hwe-signed.spec index 17e980fa988..cf8d11b56ee 100644 --- a/SPECS-SIGNED/iser-hwe-signed/iser-hwe-signed.spec +++ b/SPECS-SIGNED/iser-hwe-signed/iser-hwe-signed.spec @@ -32,19 +32,19 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %global KVERSION %{target_kernel_version_full} %{!?_name: %define _name iser-hwe} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} Summary: %{_name} Driver Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -61,7 +61,6 @@ Source1: ib_iser.ko Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 %description iser signed kernel modules @@ -112,6 +111,13 @@ fi # 1 : closed %config(noreplace) %{_sysconfdir}/depmod.d/zz02-iser-*.conf %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/iser-signed/iser-signed.spec b/SPECS-SIGNED/iser-signed/iser-signed.spec index 997593ac32b..b4812a1d04f 100644 --- a/SPECS-SIGNED/iser-signed/iser-signed.spec +++ b/SPECS-SIGNED/iser-signed/iser-signed.spec @@ -38,12 +38,12 @@ %global KVERSION %{target_kernel_version_full} %{!?_name: %define _name iser} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} Summary: %{_name} Driver Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -110,6 +110,9 @@ fi # 1 : closed %config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*.conf %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS-SIGNED/isert-hwe-signed/isert-hwe-signed.spec b/SPECS-SIGNED/isert-hwe-signed/isert-hwe-signed.spec index d655d629a04..a2cec60bdb0 100644 --- a/SPECS-SIGNED/isert-hwe-signed/isert-hwe-signed.spec +++ b/SPECS-SIGNED/isert-hwe-signed/isert-hwe-signed.spec @@ -32,19 +32,19 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %global KVERSION %{target_kernel_version_full} %{!?_name: %define _name isert-hwe} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} Summary: %{_name} Driver -Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Name: %{_name}-signed +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -61,7 +61,6 @@ Source1: ib_isert.ko Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 %description isert signed kernel modules @@ -111,6 +110,13 @@ fi # 1 : closed %config(noreplace) %{_sysconfdir}/depmod.d/zz02-isert-*.conf %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/isert-signed/isert-signed.spec b/SPECS-SIGNED/isert-signed/isert-signed.spec index 7029baea58a..651837398ab 100644 --- a/SPECS-SIGNED/isert-signed/isert-signed.spec +++ b/SPECS-SIGNED/isert-signed/isert-signed.spec @@ -38,12 +38,12 @@ %global KVERSION %{target_kernel_version_full} %{!?_name: %define _name isert} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} Summary: %{_name} Driver -Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Name: %{_name}-signed +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -109,6 +109,9 @@ fi # 1 : closed %config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*.conf %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec b/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec index 453924f74b0..798aea4d52a 100644 --- a/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec +++ b/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec @@ -6,7 +6,7 @@ %define uname_r %{version}-%{release} Summary: Signed Linux Kernel for %{buildarch} systems Name: kernel-64k-signed-%{buildarch} -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -105,6 +105,21 @@ echo "initrd of kernel %{uname_r} removed" >&2 %exclude /module_info.ld %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Bump release to match kernel,kernel-ipe,kernel-64k + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS-SIGNED/kernel-hwe-signed/kernel-hwe-signed.spec b/SPECS-SIGNED/kernel-hwe-signed/kernel-hwe-signed.spec index cf3b992768a..9e789322585 100644 --- a/SPECS-SIGNED/kernel-hwe-signed/kernel-hwe-signed.spec +++ b/SPECS-SIGNED/kernel-hwe-signed/kernel-hwe-signed.spec @@ -9,7 +9,7 @@ Summary: Signed Linux Kernel for %{buildarch} systems Name: kernel-hwe-signed-%{buildarch} Version: 6.12.57.1 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -100,6 +100,9 @@ echo "initrd of kernel %{uname_r} removed" >&2 %exclude /module_info.ld %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 6.12.57.1-2 +- Bump to match kernel-hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 6.12.57.1-1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec b/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec index 00ebb4cc3be..5eaa396d001 100644 --- a/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec +++ b/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec @@ -10,7 +10,7 @@ Summary: Signed MSHV-enabled Linux Kernel for %{buildarch} systems Name: kernel-mshv-signed-%{buildarch} Version: 6.6.100.mshv1 -Release: 2%{?dist} +Release: 3%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -140,6 +140,9 @@ echo "initrd of kernel %{uname_r} removed" >&2 %exclude /lib/modules/%{uname_r}/build %changelog +* Mon Jan 06 2026 Roaa Sakr - 6.6.100.mshv1-3 +- Enable ftrace syscalls tracing support in kernel config + * Wed Oct 22 2025 Saul Paredes - 6.6.100.mshv1-2 - Enable build on aarch64 diff --git a/SPECS-SIGNED/kernel-signed/kernel-signed.spec b/SPECS-SIGNED/kernel-signed/kernel-signed.spec index 03ed23b5519..c14d5b669a8 100644 --- a/SPECS-SIGNED/kernel-signed/kernel-signed.spec +++ b/SPECS-SIGNED/kernel-signed/kernel-signed.spec @@ -9,7 +9,7 @@ %define uname_r %{version}-%{release} Summary: Signed Linux Kernel for %{buildarch} systems Name: kernel-signed-%{buildarch} -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -145,6 +145,21 @@ echo "initrd of kernel %{uname_r} removed" >&2 %exclude /module_info.ld %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Bump release to match kernel,kernel-ipe,kernel-64k + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec b/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec index 18dc4a88a70..b233e3995e7 100644 --- a/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec +++ b/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec @@ -5,7 +5,7 @@ %define kernelver %{version}-%{release} Summary: Signed Unified Kernel Image for %{buildarch} systems Name: kernel-uki-signed-%{buildarch} -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -68,6 +68,21 @@ popd /boot/efi/EFI/Linux/vmlinuz-uki-%{kernelver}.efi %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Bump release to match kernel,kernel-ipe,kernel-64k + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS-SIGNED/knem-hwe-modules-signed/knem-hwe-modules-signed.spec b/SPECS-SIGNED/knem-hwe-modules-signed/knem-hwe-modules-signed.spec index 9548e1b703e..3fe14f5ad82 100644 --- a/SPECS-SIGNED/knem-hwe-modules-signed/knem-hwe-modules-signed.spec +++ b/SPECS-SIGNED/knem-hwe-modules-signed/knem-hwe-modules-signed.spec @@ -29,7 +29,7 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} @@ -44,14 +44,13 @@ Summary: KNEM: High-Performance Intra-Node MPI Communication Name: %{_name}-signed Version: 1.1.4.90mlnx3 -Release: 24%{release_suffix}%{?dist} +Release: 26%{release_suffix}%{?dist} Provides: knem-hwe-mlnx = %{version}-%{release} Obsoletes: knem-hwe-mlnx < %{version}-%{release} License: BSD and GPLv2 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 # @@ -111,6 +110,13 @@ fi /lib/modules/ %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-26_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-25_6.12.57.1.1 +- Build with OFED 25.07.0.9.7.1. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 1.1.4.90mlnx3-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/knem-modules-signed/knem-modules-signed.spec b/SPECS-SIGNED/knem-modules-signed/knem-modules-signed.spec index 9a427ae2296..7a52ad3e219 100644 --- a/SPECS-SIGNED/knem-modules-signed/knem-modules-signed.spec +++ b/SPECS-SIGNED/knem-modules-signed/knem-modules-signed.spec @@ -43,7 +43,7 @@ Summary: KNEM: High-Performance Intra-Node MPI Communication Name: %{_name}-signed Version: 1.1.4.90mlnx3 -Release: 21%{release_suffix}%{?dist} +Release: 22%{release_suffix}%{?dist} Provides: knem-mlnx = %{version}-%{release} Obsoletes: knem-mlnx < %{version}-%{release} License: BSD and GPLv2 @@ -108,6 +108,9 @@ fi /lib/modules/ %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-22 +- Bump release to rebuild for new release. + * Fri Oct 10 2025 Pawel Winogrodzki - 1.1.4.90mlnx3-21 - Bump release to rebuild for new release diff --git a/SPECS-SIGNED/mft_kernel-hwe-signed/mft_kernel-hwe-signed.spec b/SPECS-SIGNED/mft_kernel-hwe-signed/mft_kernel-hwe-signed.spec index b4cef78c16d..579e6d21bca 100644 --- a/SPECS-SIGNED/mft_kernel-hwe-signed/mft_kernel-hwe-signed.spec +++ b/SPECS-SIGNED/mft_kernel-hwe-signed/mft_kernel-hwe-signed.spec @@ -5,7 +5,7 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} @@ -14,8 +14,8 @@ Name: %{_name}-signed Summary: %{_name} Kernel Module for the %{KVERSION} kernel -Version: 4.30.0 -Release: 24%{release_suffix}%{?dist} +Version: 4.33.0 +Release: 2%{release_suffix}%{?dist} License: Dual BSD/GPLv2 Group: System Environment/Kernel @@ -29,9 +29,13 @@ Group: System Environment/Kernel Source0: %{_name}-%{version}-%{release}.%{_arch}.rpm Source1: mst_pci.ko Source2: mst_pciconf.ko +%ifarch aarch64 +Source3: bf3_livefish.ko +%endif + Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 + Conflicts: mft_kernel Conflicts: kernel-mft @@ -61,6 +65,9 @@ pushd rpm_contents rpm2cpio %{SOURCE0} | cpio -idmv cp -rf %{SOURCE1} ./lib/modules/%{KVERSION}/updates/mst_pci.ko cp -rf %{SOURCE2} ./lib/modules/%{KVERSION}/updates/mst_pciconf.ko +%ifarch aarch64 +cp -rf %{SOURCE3} ./lib/modules/%{KVERSION}/updates/bf3_livefish.ko +%endif popd @@ -84,6 +91,13 @@ popd /lib/modules/%{KVERSION}/updates/ %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 4.33.0-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 4.33.0-1_6.12.57.1.1 +- Upgrade version to 4.33.0. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 4.30.0-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/mft_kernel-signed/mft_kernel-signed.spec b/SPECS-SIGNED/mft_kernel-signed/mft_kernel-signed.spec index 624b92edc5f..62d1af9dc2f 100644 --- a/SPECS-SIGNED/mft_kernel-signed/mft_kernel-signed.spec +++ b/SPECS-SIGNED/mft_kernel-signed/mft_kernel-signed.spec @@ -13,8 +13,8 @@ Name: %{_name}-signed Summary: %{_name} Kernel Module for the %{KVERSION} kernel -Version: 4.30.0 -Release: 20%{release_suffix}%{?dist} +Version: 4.33.0 +Release: 1%{release_suffix}%{?dist} License: Dual BSD/GPLv2 Group: System Environment/Kernel @@ -81,6 +81,9 @@ popd /lib/modules/%{KVERSION}/updates/ %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 4.33.0-1 +- Upgrade version to 4.33.0. + * Thu May 29 2025 Nicolas Guibourge - 4.30.0-20 - Add kernel version and release nb into release nb diff --git a/SPECS-SIGNED/mlnx-nfsrdma-hwe-signed/mlnx-nfsrdma-hwe-signed.spec b/SPECS-SIGNED/mlnx-nfsrdma-hwe-signed/mlnx-nfsrdma-hwe-signed.spec index 868ae179699..85b65524093 100644 --- a/SPECS-SIGNED/mlnx-nfsrdma-hwe-signed/mlnx-nfsrdma-hwe-signed.spec +++ b/SPECS-SIGNED/mlnx-nfsrdma-hwe-signed/mlnx-nfsrdma-hwe-signed.spec @@ -32,20 +32,19 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %global KVERSION %{target_kernel_version_full} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} - +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} %{!?_name: %define _name mlnx-nfsrdma-hwe} Summary: %{_name} Driver Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -64,7 +63,6 @@ Source3: xprtrdma.ko Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 %description mellanox rdma signed kernel modules @@ -119,6 +117,13 @@ fi %config(noreplace) %{_sysconfdir}/depmod.d/zz02-mlnx-nfsrdma-*.conf %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/mlnx-nfsrdma-signed/mlnx-nfsrdma-signed.spec b/SPECS-SIGNED/mlnx-nfsrdma-signed/mlnx-nfsrdma-signed.spec index 2818e2066ca..ee61b001cba 100644 --- a/SPECS-SIGNED/mlnx-nfsrdma-signed/mlnx-nfsrdma-signed.spec +++ b/SPECS-SIGNED/mlnx-nfsrdma-signed/mlnx-nfsrdma-signed.spec @@ -37,14 +37,14 @@ %global KVERSION %{target_kernel_version_full} -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} %{!?_name: %define _name mlnx-nfsrdma} Summary: %{_name} Driver Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -117,6 +117,9 @@ fi %config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*.conf %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS-SIGNED/mlnx-ofa_kernel-hwe-modules-signed/mlnx-ofa_kernel-hwe-modules-signed.spec b/SPECS-SIGNED/mlnx-ofa_kernel-hwe-modules-signed/mlnx-ofa_kernel-hwe-modules-signed.spec index 3851ed753c6..8a2964b78fe 100644 --- a/SPECS-SIGNED/mlnx-ofa_kernel-hwe-modules-signed/mlnx-ofa_kernel-hwe-modules-signed.spec +++ b/SPECS-SIGNED/mlnx-ofa_kernel-hwe-modules-signed/mlnx-ofa_kernel-hwe-modules-signed.spec @@ -32,7 +32,7 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} @@ -45,8 +45,8 @@ Summary: Infiniband HCA Driver Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com/ Group: System Environment/Base @@ -88,10 +88,30 @@ Source26: smc_diag.ko Source27: rpcrdma.ko Source28: svcrdma.ko Source29: xprtrdma.ko +Source30: fwctl.ko +Source31: mlx5_fwctl.ko +Source32: mana_ib.ko +Source33: mlx5_dpll.ko +%ifarch aarch64 +Source34: rnbd-client.ko +Source35: rnbd-server.ko +Source36: iw_cxgb4.ko +Source37: erdma.ko +Source38: ib_mthca.ko +Source39: ocrdma.ko +Source40: qedr.ko +Source41: siw.ko +Source42: rtrs-client.ko +Source43: rtrs-core.ko +Source44: rtrs-server.ko +Source45: ib_srpt.ko +Source46: mlx5-vfio-pci.ko +Source47: 9pnet_rdma.ko +Source48: rds_rdma.ko +%endif Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 %description @@ -112,6 +132,8 @@ Obsoletes: mlnx-en-kmp-trace Obsoletes: mlnx-en-doc Obsoletes: mlnx-en-debuginfo Obsoletes: mlnx-en-sources +Obsoletes: fwctl-hwe <= 24.10 +Provides: fwctl-hwe = %{version}-%{release} Requires: kernel-hwe = %{target_kernel_version_full} Requires: kmod @@ -169,6 +191,27 @@ cp -rf %{SOURCE26} ./lib/modules/%{KVERSION}/updates/net/smc/smc_diag.ko cp -rf %{SOURCE27} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/rpcrdma.ko cp -rf %{SOURCE28} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/svcrdma.ko cp -rf %{SOURCE29} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/xprtrdma.ko +cp -rf %{SOURCE30} ./lib/modules/%{KVERSION}/updates/drivers/fwctl/fwctl.ko +cp -rf %{SOURCE31} ./lib/modules/%{KVERSION}/updates/drivers/fwctl/mlx5/mlx5_fwctl.ko +cp -rf %{SOURCE32} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/mana/mana_ib.ko +cp -rf %{SOURCE33} ./lib/modules/%{KVERSION}/updates/drivers/net/ethernet/mellanox/mlx5/core/mlx5_dpll.ko +%ifarch aarch64 +cp -rf %{SOURCE34} ./lib/modules/%{KVERSION}/updates/drivers/block/rnbd/rnbd-client.ko +cp -rf %{SOURCE35} ./lib/modules/%{KVERSION}/updates/drivers/block/rnbd/rnbd-server.ko +cp -rf %{SOURCE36} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/cxgb4/iw_cxgb4.ko +cp -rf %{SOURCE37} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/erdma/erdma.ko +cp -rf %{SOURCE38} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/mthca/ib_mthca.ko +cp -rf %{SOURCE39} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/ocrdma/ocrdma.ko +cp -rf %{SOURCE40} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/qedr/qedr.ko +cp -rf %{SOURCE41} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/sw/siw/siw.ko +cp -rf %{SOURCE42} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/ulp/rtrs/rtrs-client.ko +cp -rf %{SOURCE43} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/ulp/rtrs/rtrs-core.ko +cp -rf %{SOURCE44} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/ulp/rtrs/rtrs-server.ko +cp -rf %{SOURCE45} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/ulp/srpt/ib_srpt.ko +cp -rf %{SOURCE46} ./lib/modules/%{KVERSION}/updates/drivers/vfio/pci/mlx5/mlx5-vfio-pci.ko +cp -rf %{SOURCE47} ./lib/modules/%{KVERSION}/updates/net/9p/9pnet_rdma.ko +cp -rf %{SOURCE48} ./lib/modules/%{KVERSION}/updates/net/rds/rds_rdma.ko +%endif popd @@ -194,6 +237,14 @@ fi %license %{_datadir}/licenses/%{_name}/copyright %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update additional kernel modules fwctl mana and mlx5_dpll included from 25.07 + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/mlnx-ofa_kernel-modules-signed/mlnx-ofa_kernel-modules-signed.spec b/SPECS-SIGNED/mlnx-ofa_kernel-modules-signed/mlnx-ofa_kernel-modules-signed.spec index ffc7688967b..26e840e9974 100644 --- a/SPECS-SIGNED/mlnx-ofa_kernel-modules-signed/mlnx-ofa_kernel-modules-signed.spec +++ b/SPECS-SIGNED/mlnx-ofa_kernel-modules-signed/mlnx-ofa_kernel-modules-signed.spec @@ -44,8 +44,8 @@ Summary: Infiniband HCA Driver Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com/ Group: System Environment/Base @@ -87,6 +87,9 @@ Source26: smc_diag.ko Source27: rpcrdma.ko Source28: svcrdma.ko Source29: xprtrdma.ko +Source30: fwctl.ko +Source31: mlx5_fwctl.ko +Source32: mana_ib.ko Vendor: Microsoft Corporation Distribution: Azure Linux @@ -111,6 +114,8 @@ Obsoletes: mlnx-en-kmp-trace Obsoletes: mlnx-en-doc Obsoletes: mlnx-en-debuginfo Obsoletes: mlnx-en-sources +Obsoletes: fwctl <= 24.10 +Provides: fwctl = %{version}-%{release} Requires: kernel = %{target_kernel_version_full} Requires: kmod @@ -167,6 +172,9 @@ cp -rf %{SOURCE26} ./lib/modules/%{KVERSION}/updates/net/smc/smc_diag.ko cp -rf %{SOURCE27} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/rpcrdma.ko cp -rf %{SOURCE28} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/svcrdma.ko cp -rf %{SOURCE29} ./lib/modules/%{KVERSION}/updates/net/sunrpc/xprtrdma/xprtrdma.ko +cp -rf %{SOURCE30} ./lib/modules/%{KVERSION}/updates/drivers/fwctl/fwctl.ko +cp -rf %{SOURCE31} ./lib/modules/%{KVERSION}/updates/drivers/fwctl/mlx5/mlx5_fwctl.ko +cp -rf %{SOURCE32} ./lib/modules/%{KVERSION}/updates/drivers/infiniband/hw/mana/mana_ib.ko popd @@ -192,6 +200,10 @@ fi %license %{_datadir}/licenses/%{_name}/copyright %changelog +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update additional kernel modules fwctl mana and mlx5_dpll included from 25.07 + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump release to rebuild for new release diff --git a/SPECS-SIGNED/srp-hwe-signed/srp-hwe-signed.spec b/SPECS-SIGNED/srp-hwe-signed/srp-hwe-signed.spec index b1aea71fed6..1cbe2933d0e 100644 --- a/SPECS-SIGNED/srp-hwe-signed/srp-hwe-signed.spec +++ b/SPECS-SIGNED/srp-hwe-signed/srp-hwe-signed.spec @@ -33,7 +33,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -43,12 +43,12 @@ %global KVERSION %{target_kernel_version_full} %define _name srp-hwe -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} Summary: srp driver Name: %{_name}-signed -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -66,7 +66,6 @@ Source2: scsi_transport_srp.ko Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 %description srp kernel modules @@ -112,6 +111,13 @@ popd %license %{_datadir}/licenses/%{_name}/copyright %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/srp-signed/srp-signed.spec b/SPECS-SIGNED/srp-signed/srp-signed.spec index afa60162259..afd8edf4ce3 100644 --- a/SPECS-SIGNED/srp-signed/srp-signed.spec +++ b/SPECS-SIGNED/srp-signed/srp-signed.spec @@ -42,12 +42,12 @@ %global KVERSION %{target_kernel_version_full} %define _name srp -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} Summary: srp driver Name: %{_name}-signed -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base @@ -110,6 +110,9 @@ popd %license %{_datadir}/licenses/%{_name}/copyright %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS-SIGNED/systemd-boot-signed/systemd-boot-signed.spec b/SPECS-SIGNED/systemd-boot-signed/systemd-boot-signed.spec index b18985647d9..153a373eaf2 100644 --- a/SPECS-SIGNED/systemd-boot-signed/systemd-boot-signed.spec +++ b/SPECS-SIGNED/systemd-boot-signed/systemd-boot-signed.spec @@ -20,7 +20,7 @@ Version: 255 # determine the build information from local checkout Version: %(tools/meson-vcs-tag.sh . error | sed -r 's/-([0-9])/.^\1/; s/-g/_g/') %endif -Release: 24%{?dist} +Release: 25%{?dist} License: LGPL-2.1-or-later AND MIT AND GPL-2.0-or-later Vendor: Microsoft Corporation Distribution: Azure Linux @@ -98,6 +98,9 @@ popd /boot/efi/EFI/BOOT/%{grubefiname} %changelog +* Wed Nov 26 2025 Rohit Rawat - 255-25 +- Bump release to match systemd spec + * Tue Sep 16 2025 Akhila Guruju - 255-24 - Bump release to match systemd spec diff --git a/SPECS-SIGNED/xpmem-hwe-modules-signed/xpmem-hwe-modules-signed.spec b/SPECS-SIGNED/xpmem-hwe-modules-signed/xpmem-hwe-modules-signed.spec index 5d798ad5034..633c506cfe8 100644 --- a/SPECS-SIGNED/xpmem-hwe-modules-signed/xpmem-hwe-modules-signed.spec +++ b/SPECS-SIGNED/xpmem-hwe-modules-signed/xpmem-hwe-modules-signed.spec @@ -6,14 +6,14 @@ # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %global KVERSION %{target_kernel_version_full} %define _name xpmem-hwe-modules -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} # xpmem-modules is a sub-package in SPECS/xpmem. # We are making that into a main package for signing. @@ -21,14 +21,13 @@ Summary: Cross-partition memory Name: %{_name}-signed Version: 2.7.4 -Release: 24%{release_suffix}%{?dist} +Release: 26%{release_suffix}%{?dist} License: GPLv2 and LGPLv2.1 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux BuildRequires: automake autoconf URL: https://github.com/openucx/xpmem -ExclusiveArch: aarch64 # # To populate these sources: @@ -94,6 +93,13 @@ if [ $1 = 0 ]; then # 1 : Erase, not upgrade fi %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 2.7.4-26_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 2.7.4-25_6.12.57.1.1 +- Build with OFED 25.07.0.9.7.1. +- Enable build on x86_64 kernel hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 2.7.4-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS-SIGNED/xpmem-modules-signed/xpmem-modules-signed.spec b/SPECS-SIGNED/xpmem-modules-signed/xpmem-modules-signed.spec index eabd4056bb1..420d3fe51a6 100644 --- a/SPECS-SIGNED/xpmem-modules-signed/xpmem-modules-signed.spec +++ b/SPECS-SIGNED/xpmem-modules-signed/xpmem-modules-signed.spec @@ -12,7 +12,7 @@ %global KVERSION %{target_kernel_version_full} %define _name xpmem-modules -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} # xpmem-modules is a sub-package in SPECS/xpmem. # We are making that into a main package for signing. @@ -20,7 +20,7 @@ Summary: Cross-partition memory Name: %{_name}-signed Version: 2.7.4 -Release: 21%{release_suffix}%{?dist} +Release: 22%{release_suffix}%{?dist} License: GPLv2 and LGPLv2.1 Group: System Environment/Libraries Vendor: Microsoft Corporation @@ -93,6 +93,9 @@ fi %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 2.7.4-22 +- Build with OFED 25.07.0.9.7.1. + * Fri Oct 10 2025 Pawel Winogrodzki - 2.7.4-21 - Bump mofed release number - Align %%post* scripts with other kmod packages. diff --git a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json index da7a692d59f..482b00c69fd 100644 --- a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json +++ b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "SymCrypt-OpenSSL-1.9.3.tar.gz": "c86ad801c73ab133291457e425e3973ecfe9e1958290286d620d8a50123653df" + "SymCrypt-OpenSSL-1.9.4.tar.gz": "80c6a2d84f1bfd83e9e331bab941702733aebcf25625f2c490fb105bfd206503" } } diff --git a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec index 998fe7b3748..3431c0bbb51 100644 --- a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec +++ b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec @@ -1,6 +1,6 @@ Summary: The SymCrypt engine for OpenSSL (SCOSSL) allows the use of OpenSSL with SymCrypt as the provider for core cryptographic operations Name: SymCrypt-OpenSSL -Version: 1.9.3 +Version: 1.9.4 Release: 1%{?dist} License: MIT Vendor: Microsoft Corporation @@ -87,6 +87,9 @@ install SymCryptProvider/symcrypt_prov.cnf %{buildroot}%{_sysconfdir}/pki/tls/sy %dir %attr(1733, root, root) %{_localstatedir}/log/keysinuse/ %changelog +* Tue Oct 28 2025 CBL-Mariner Servicing Account - 1.9.4-1 +- Auto-upgrade to 1.9.4 - bug fixes + * Tue Sep 30 2025 CBL-Mariner Servicing Account - 1.9.3-1 - Auto-upgrade to 1.9.3 - bug fixes diff --git a/SPECS/avahi/CVE-2025-68276.patch b/SPECS/avahi/CVE-2025-68276.patch new file mode 100644 index 00000000000..b6e1a8cbab8 --- /dev/null +++ b/SPECS/avahi/CVE-2025-68276.patch @@ -0,0 +1,65 @@ +From 9aa21a5ba089c46301a1c489f9643a5900e2e4cb Mon Sep 17 00:00:00 2001 +From: Evgeny Vereshchagin +Date: Wed, 17 Dec 2025 08:11:23 +0000 +Subject: [PATCH] core: refuse to create wide-area record browsers when + wide-area is off + +It fixes a bug where it was possible for unprivileged local users to +crash avahi-daemon (with wide-area disabled) by creating record browsers +with the AVAHI_LOOKUP_USE_WIDE_AREA flag set via D-Bus (either by calling +the RecordBrowserNew method directly or by creating hostname/address/service +resolvers/browsers that create those browsers internally themselves). + +``` +$ gdbus call --system --dest org.freedesktop.Avahi --object-path / --method org.freedesktop.Avahi.Server.ResolveHostName -- -1 -1 yo.local -1 1 +Error: GDBus.Error:org.freedesktop.DBus.Error.NoReply: Message recipient disconnected from message bus without replying +``` +``` +dbus-protocol.c: interface=org.freedesktop.Avahi.Server, path=/, member=ResolveHostName +avahi-daemon: wide-area.c:725: avahi_wide_area_scan_cache: Assertion `e' failed. +==307948== +==307948== Process terminating with default action of signal 6 (SIGABRT) +==307948== at 0x4B3630C: __pthread_kill_implementation (pthread_kill.c:44) +==307948== by 0x4ADF921: raise (raise.c:26) +==307948== by 0x4AC74AB: abort (abort.c:77) +==307948== by 0x4AC741F: __assert_fail_base.cold (assert.c:118) +==307948== by 0x48D8B85: avahi_wide_area_scan_cache (wide-area.c:725) +==307948== by 0x48C8953: lookup_scan_cache (browse.c:351) +==307948== by 0x48C8B1B: lookup_go (browse.c:386) +==307948== by 0x48C9148: defer_callback (browse.c:516) +==307948== by 0x48AEA0E: expiration_event (timeeventq.c:94) +==307948== by 0x489D3AE: timeout_callback (simple-watch.c:447) +==307948== by 0x489D787: avahi_simple_poll_dispatch (simple-watch.c:563) +==307948== by 0x489D91E: avahi_simple_poll_iterate (simple-watch.c:605) +==307948== +``` + +wide-area has been disabled by default since +9c4214146738146e454f098264690e8e884c39bd (v0.9-rc2). + +https: //github.com/avahi/avahi/security/advisories/GHSA-mhf3-865v-g5rc +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/avahi/avahi/commit/0c013e2e819be3bda74cecf48b5f64956cf8a760.patch +--- + avahi-core/browse.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/avahi-core/browse.c b/avahi-core/browse.c +index 1cf0ee3..57435fc 100644 +--- a/avahi-core/browse.c ++++ b/avahi-core/browse.c +@@ -543,6 +543,11 @@ AvahiSRecordBrowser *avahi_s_record_browser_prepare( + AVAHI_CHECK_VALIDITY_RETURN_NULL(server, AVAHI_FLAGS_VALID(flags, AVAHI_LOOKUP_USE_WIDE_AREA|AVAHI_LOOKUP_USE_MULTICAST), AVAHI_ERR_INVALID_FLAGS); + AVAHI_CHECK_VALIDITY_RETURN_NULL(server, !(flags & AVAHI_LOOKUP_USE_WIDE_AREA) || !(flags & AVAHI_LOOKUP_USE_MULTICAST), AVAHI_ERR_INVALID_FLAGS); + ++ if ((flags & AVAHI_LOOKUP_USE_WIDE_AREA) && !server->wide_area_lookup_engine) { ++ avahi_server_set_errno(server, AVAHI_ERR_NOT_SUPPORTED); ++ return NULL; ++ } ++ + if (!(b = avahi_new(AvahiSRecordBrowser, 1))) { + avahi_server_set_errno(server, AVAHI_ERR_NO_MEMORY); + return NULL; +-- +2.45.4 + diff --git a/SPECS/avahi/CVE-2025-68468.patch b/SPECS/avahi/CVE-2025-68468.patch new file mode 100644 index 00000000000..88c4252265d --- /dev/null +++ b/SPECS/avahi/CVE-2025-68468.patch @@ -0,0 +1,28 @@ +From 2aad32193f8b4391309498c884d6f906eb962c01 Mon Sep 17 00:00:00 2001 +From: Hugo Muis <198191869+friendlyhugo@users.noreply.github.com> +Date: Sun, 2 Mar 2025 18:06:24 +0100 +Subject: [PATCH] core: fix DoS bug by removing incorrect assertion + +Closes https://github.com/avahi/avahi/issues/683 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/avahi/avahi/commit/f66be13d7f31a3ef806d226bf8b67240179d309a.patch +--- + avahi-core/browse.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/avahi-core/browse.c b/avahi-core/browse.c +index e8a915e..2966f95 100644 +--- a/avahi-core/browse.c ++++ b/avahi-core/browse.c +@@ -295,7 +295,6 @@ static void lookup_multicast_callback( + lookup_drop_cname(l, interface, protocol, 0, r); + else { + /* It's a normal record, so let's call the user callback */ +- assert(avahi_key_equal(b->key, l->key)); + + b->callback(b, interface, protocol, event, r, flags, b->userdata); + } +-- +2.45.4 + diff --git a/SPECS/avahi/CVE-2025-68471.patch b/SPECS/avahi/CVE-2025-68471.patch new file mode 100644 index 00000000000..3057bb78f25 --- /dev/null +++ b/SPECS/avahi/CVE-2025-68471.patch @@ -0,0 +1,32 @@ +From 3c1445d537218a0678927dfbca54178b13062080 Mon Sep 17 00:00:00 2001 +From: Hugo Muis <198191869+friendlyhugo@users.noreply.github.com> +Date: Sun, 2 Mar 2025 18:06:24 +0100 +Subject: [PATCH] core: fix DoS bug by changing assert to return + +Closes https://github.com/avahi/avahi/issues/678 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/avahi/avahi/pull/682/commits/9c6eb53bf2e290aed84b1f207e3ce35c54cc0aa1.patch +--- + avahi-core/browse.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/avahi-core/browse.c b/avahi-core/browse.c +index 2966f95..1cf0ee3 100644 +--- a/avahi-core/browse.c ++++ b/avahi-core/browse.c +@@ -319,7 +319,10 @@ static int lookup_start(AvahiSRBLookup *l) { + assert(l); + + assert(!(l->flags & AVAHI_LOOKUP_USE_WIDE_AREA) != !(l->flags & AVAHI_LOOKUP_USE_MULTICAST)); +- assert(!l->wide_area && !l->multicast); ++ if (l->wide_area || l->multicast) { ++ /* Avoid starting a duplicate lookup */ ++ return 0; ++ } + + if (l->flags & AVAHI_LOOKUP_USE_WIDE_AREA) { + +-- +2.45.4 + diff --git a/SPECS/avahi/CVE-2026-24401.patch b/SPECS/avahi/CVE-2026-24401.patch new file mode 100644 index 00000000000..86c351ea6a8 --- /dev/null +++ b/SPECS/avahi/CVE-2026-24401.patch @@ -0,0 +1,75 @@ +From 319fe039124f393a947686119ea3eafdc39d4008 Mon Sep 17 00:00:00 2001 +From: Hugo Muis <198191869+friendlyhugo@users.noreply.github.com> +Date: Sun, 2 Mar 2025 18:06:24 +0100 +Subject: [PATCH] core: fix uncontrolled recursion bug using a simple loop + detection algorithm + +Closes https://github.com/avahi/avahi/issues/501 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/avahi/avahi/commit/78eab31128479f06e30beb8c1cbf99dd921e2524.patch +--- + avahi-core/browse.c | 40 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +diff --git a/avahi-core/browse.c b/avahi-core/browse.c +index 57435fc..d7d541b 100644 +--- a/avahi-core/browse.c ++++ b/avahi-core/browse.c +@@ -400,6 +400,40 @@ static int lookup_go(AvahiSRBLookup *l) { + return n; + } + ++static int lookup_exists_in_path(AvahiSRBLookup* lookup, AvahiSRBLookup* from, AvahiSRBLookup* to) { ++ AvahiRList* rl; ++ if (from == to) ++ return 0; ++ for (rl = from->cname_lookups; rl; rl = rl->rlist_next) { ++ int r = lookup_exists_in_path(lookup, rl->data, to); ++ if (r == 1) { ++ /* loop detected, propagate result */ ++ return r; ++ } else if (r == 0) { ++ /* is loop detected? */ ++ return lookup == from; ++ } else { ++ /* `to` not found, continue */ ++ continue; ++ } ++ } ++ /* no path found */ ++ return -1; ++} ++ ++static int cname_would_create_loop(AvahiSRBLookup* l, AvahiSRBLookup* n) { ++ int ret; ++ if (l == n) ++ /* Loop to self */ ++ return 1; ++ ++ ret = lookup_exists_in_path(n, l->record_browser->root_lookup, l); ++ ++ /* Path to n always exists */ ++ assert(ret != -1); ++ return ret; ++} ++ + static void lookup_handle_cname(AvahiSRBLookup *l, AvahiIfIndex interface, AvahiProtocol protocol, AvahiLookupFlags flags, AvahiRecord *r) { + AvahiKey *k; + AvahiSRBLookup *n; +@@ -419,6 +453,12 @@ static void lookup_handle_cname(AvahiSRBLookup *l, AvahiIfIndex interface, Avahi + return; + } + ++ if (cname_would_create_loop(l, n)) { ++ /* CNAME loops are not allowed */ ++ lookup_unref(n); ++ return; ++ } ++ + l->cname_lookups = avahi_rlist_prepend(l->cname_lookups, lookup_ref(n)); + + lookup_go(n); +-- +2.45.4 + diff --git a/SPECS/avahi/avahi.spec b/SPECS/avahi/avahi.spec index 65e8fe36093..b2ffc517b8d 100644 --- a/SPECS/avahi/avahi.spec +++ b/SPECS/avahi/avahi.spec @@ -3,7 +3,7 @@ Summary: Local network service discovery Name: avahi Version: 0.8 -Release: 5%{?dist} +Release: 7%{?dist} License: LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -19,6 +19,10 @@ Patch6: CVE-2023-38473.patch Patch7: CVE-2023-38470.patch Patch8: CVE-2023-38471.patch Patch9: CVE-2024-52616.patch +Patch10: CVE-2025-68276.patch +Patch11: CVE-2025-68468.patch +Patch12: CVE-2025-68471.patch +Patch13: CVE-2026-24401.patch BuildRequires: automake BuildRequires: dbus-devel >= 0.90 BuildRequires: dbus-glib-devel >= 0.70 @@ -426,7 +430,13 @@ exit 0 %endif %changelog -* Thu Feb 13 2024 Kanishk Bansal - 0.8-5 +* Tue Jan 27 2026 Azure Linux Security Servicing Account - 0.8-7 +- Patch for CVE-2026-24401 + +* Wed Jan 14 2026 Azure Linux Security Servicing Account - 0.8-6 +- Patch for CVE-2025-68471, CVE-2025-68276, CVE-2025-68468 + +* Thu Feb 13 2025 Kanishk Bansal - 0.8-5 - Fix CVE-2024-52616 with an upstream patch * Mon Dec 02 2024 Kanishk Bansal - 0.8-4 diff --git a/SPECS/azurelinux-image-tools/azurelinux-image-tools.signatures.json b/SPECS/azurelinux-image-tools/azurelinux-image-tools.signatures.json index a582daa9578..cf646366838 100644 --- a/SPECS/azurelinux-image-tools/azurelinux-image-tools.signatures.json +++ b/SPECS/azurelinux-image-tools/azurelinux-image-tools.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "azurelinux-image-tools-1.0.0.tar.gz": "fac3fd48c1e0527109239deb8f4df7a526513409fa7474d1d7ffac978c1521d1", - "azurelinux-image-tools-1.0.0-vendor.tar.gz": "6fadff7d823a97658704183f028ecc94e9725fe3ffcba1e4eb48c2d291c184da" + "azurelinux-image-tools-1.1.0.tar.gz": "0d1c7bb68727f7b688153856b225d68447f64ea735f5dac9af39e2d7b1cc1ff2", + "azurelinux-image-tools-1.1.0-vendor.tar.gz": "10efb01240878ae7cae056580f84401df1bd9298a92f3ebac139f0ce5505ba0b" } } \ No newline at end of file diff --git a/SPECS/azurelinux-image-tools/azurelinux-image-tools.spec b/SPECS/azurelinux-image-tools/azurelinux-image-tools.spec index 0e4c92b6e37..e2708a956af 100644 --- a/SPECS/azurelinux-image-tools/azurelinux-image-tools.spec +++ b/SPECS/azurelinux-image-tools/azurelinux-image-tools.spec @@ -2,7 +2,7 @@ Summary: Azure Linux Image Tools Name: azurelinux-image-tools -Version: 1.0.0 +Version: 1.1.0 Release: 1%{?dist} License: MIT URL: https://github.com/microsoft/azure-linux-image-tools/ @@ -39,6 +39,7 @@ Requires: parted Requires: e2fsprogs Requires: dosfstools Requires: xfsprogs +Requires: btrfs-progs Requires: zstd Requires: veritysetup Requires: grub2 @@ -62,12 +63,12 @@ enabling DM-Verity. %prep %autosetup -p1 -n azure-linux-image-tools-%{version} -tar -xf %{SOURCE1} --no-same-owner -C toolkit/tools +tar -xf %{SOURCE1} --no-same-owner %build export GOPATH=%{our_gopath} export GOFLAGS="-mod=vendor" -make -C toolkit go-imagecustomizer REBUILD_TOOLS=y SKIP_LICENSE_SCAN=y +make -C toolkit go-imagecustomizer REBUILD_TOOLS=y SKIP_LICENSE_SCAN=y IMAGE_CUSTOMIZER_VERSION_PREVIEW= %install mkdir -p %{buildroot}%{_bindir} @@ -99,6 +100,9 @@ go test -C toolkit/tools ./... %{_libdir}/imagecustomizer/telemetry-requirements.txt %changelog +* Mon Dec 8 2025 Chris Gunn 1.1.0-1 +- Upgrade to version 1.1.0 + * Wed Sep 24 2025 Lanze Liu 1.0.0-1 - Upgrade to GA version 1.0.0-1 diff --git a/SPECS/azurelinux-image-tools/generate_source_tarball.sh b/SPECS/azurelinux-image-tools/generate_source_tarball.sh index a027ebcbd3a..7ba5a881770 100755 --- a/SPECS/azurelinux-image-tools/generate_source_tarball.sh +++ b/SPECS/azurelinux-image-tools/generate_source_tarball.sh @@ -96,7 +96,7 @@ fi echo "Unpacking source tarball..." tar -xf "$SOURCE_FILE" -cd "$NAME_VER/toolkit/tools" +cd "$NAME_VER" echo "Generate vendored modules tarball" go mod tidy diff --git a/SPECS/azurelinux-release/azurelinux-release.spec b/SPECS/azurelinux-release/azurelinux-release.spec index b06bf82e5ec..9d4bf63a971 100644 --- a/SPECS/azurelinux-release/azurelinux-release.spec +++ b/SPECS/azurelinux-release/azurelinux-release.spec @@ -5,7 +5,7 @@ Summary: Azure Linux release files Name: azurelinux-release Version: %{dist_version}.0 -Release: 37%{?dist} +Release: 39%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -118,6 +118,12 @@ install -Dm0644 %{SOURCE4} -t %{buildroot}%{_sysctldir}/ %{_sysctldir}/*.conf %changelog +* Mon Jan 19 2026 CBL-Mariner Servicing Account - 3.0-39 +- Bump release for Feb 2026 Update + +* Tue Dec 30 2025 CBL-Mariner Servicing Account - 3.0-38 +- Bump release for January 2026 Update + * Fri Dec 05 2025 CBL-Mariner Servicing Account - 3.0-37 - Bump release for December 2025 Update diff --git a/SPECS/bcc/bcc.spec b/SPECS/bcc/bcc.spec index 873e492c5bb..cf4b0c9dad5 100644 --- a/SPECS/bcc/bcc.spec +++ b/SPECS/bcc/bcc.spec @@ -2,7 +2,7 @@ Summary: BPF Compiler Collection (BCC) Name: bcc Version: 0.29.1 -Release: 3%{?dist} +Release: 4%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -64,6 +64,14 @@ Requires: python3-%{name} = %{version}-%{release} %description tools Command line tools for BPF Compiler Collection (BCC) +%package -n libbpf-tools +Summary: Command line libbpf tools for BPF Compiler Collection (BCC) +BuildRequires: libbpf-devel +BuildRequires: bpftool + +%description -n libbpf-tools +Command line libbpf tools for BPF Compiler Collection (BCC) + %prep %autosetup -p1 -n %{name} @@ -77,12 +85,48 @@ cmake .. \ -DPYTHON_CMD=python3 \ -DREVISION_LAST=%{version} \ -DREVISION=%{version} + make %{?_smp_mflags} popd +# It was discussed and agreed to package libbpf-tools with +# 'bpf-' prefix (https://github.com/iovisor/bcc/pull/3263) +# Installing libbpf-tools binaries in temp directory and +# renaming them in there and the install code will just +# take them. +# Note this is no longer needed in versions which contain +# commit https://github.com/iovisor/bcc/commit/3469bf1d94a6b8f5deb34586e6c8e4ffec8dd0be +# as APP_PREFIX can be used (ex: APP_PREFIX='bpf-' \) + +pushd libbpf-tools +make BPFTOOL=bpftool CFLAGS="%{optflags}" LDFLAGS="%{build_ldflags}" +make DESTDIR=./tmp-install prefix= install +( + cd tmp-install/bin + for file in *; do + mv $file bpf-$file + done + # now fix the broken symlinks + for file in `find . -type l`; do + dest=$(readlink "$file") + ln -s -f bpf-$dest $file + done +) +popd %install pushd build make install/strip DESTDIR=%{buildroot} +popd + +# Install libbpf-tools +# We cannot use `install` because some of the tools are symlinks and `install` +# follows those. Since all the tools already have the correct permissions set, +# we just need to copy them to the right place while preserving those +pushd libbpf-tools +mkdir -p %{buildroot}%{_sbindir} +install -m 755 tmp-install/bin/* %{buildroot}%{_sbindir}/ +popd + # mangle shebangs find %{buildroot}/usr/share/bcc/{tools,examples} -type f -exec \ sed -i -e '1 s|^#!/usr/bin/python$|#!'%{__python3}'|' \ @@ -123,7 +167,13 @@ find %{buildroot}%{_lib64dir} -name '*.a' -delete %{_datadir}/%{name}/tools/* %{_datadir}/%{name}/man/* +%files -n libbpf-tools +%{_sbindir}/bpf-* + %changelog +* Tue Dec 16 2025 Rachel Menge - 0.29.1-4 +- Add libbpf-tools subpackage + * Mon Apr 14 2025 Jyoti Kanase - 0.29.1-3 - Patch CVE-2025-29481 diff --git a/SPECS/bind/bind.signatures.json b/SPECS/bind/bind.signatures.json index 92596e61ff8..d5caa38c41d 100644 --- a/SPECS/bind/bind.signatures.json +++ b/SPECS/bind/bind.signatures.json @@ -14,6 +14,6 @@ "named.rwtab": "6a4c84b6709211d09f2d71491d4c66d1d4c0115a9db247a5ed2a9db10e575735", "named.sysconfig": "8f8eff846667b7811358e289e9fe594de17d0e47f2b8cebf7840ad8db7f34816", "setup-named-chroot.sh": "786fbc88c7929fadf217cf2286f2eb03b6fba14843e5da40ad43c0022dd71c3a", - "bind-9.20.15.tar.xz": "d62b38fae48ba83fca6181112d0c71018d8b0f2ce285dc79dc6a0367722ccabb" + "bind-9.20.18.tar.xz": "dfc546c990ac4515529cd45c4dd995862b18ae8a2d0cb29208e8896a5d325331" } } diff --git a/SPECS/bind/bind.spec b/SPECS/bind/bind.spec index 05d57eea952..b140f5a3221 100644 --- a/SPECS/bind/bind.spec +++ b/SPECS/bind/bind.spec @@ -9,7 +9,7 @@ Summary: Domain Name System software Name: bind -Version: 9.20.15 +Version: 9.20.18 Release: 1%{?dist} License: ISC Vendor: Microsoft Corporation @@ -536,6 +536,9 @@ fi; %{_mandir}/man1/named-nzd2nzf.1* %changelog +* Wed Jan 21 2026 CBL-Mariner Servicing Account - 9.20.18-1 +- Auto-upgrade to 9.20.18 - for CVE-2025-13878 + * Fri Oct 24 2025 Kanishk Bansal - 9.20.15-1 - Auto-upgrade to 9.20.15 - for CVE-2025-40778, CVE-2025-8677, CVE-2025-40780 diff --git a/SPECS/bind/nongit-fix.patch b/SPECS/bind/nongit-fix.patch index e46c6384419..02c68306214 100644 --- a/SPECS/bind/nongit-fix.patch +++ b/SPECS/bind/nongit-fix.patch @@ -1,6 +1,6 @@ -From 701de7b23826b983ba5ad674ca85e1ab607b79f6 Mon Sep 17 00:00:00 2001 +From 440bfb303f5c0f5824dc744bb6cf41bc87899609 Mon Sep 17 00:00:00 2001 From: Kanishk Bansal -Date: Fri, 24 Oct 2025 13:43:37 +0000 +Date: Wed, 21 Jan 2026 21:40:19 +0000 Subject: [PATCH] nongit-fix.patch --- @@ -8,11 +8,11 @@ Subject: [PATCH] nongit-fix.patch 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac -index a79b9d6..514587d 100644 +index 057ae04..35102b8 100644 --- a/configure.ac +++ b/configure.ac @@ -19,7 +19,7 @@ m4_define([bind_VERSION_MINOR], 20)dnl - m4_define([bind_VERSION_PATCH], 15)dnl + m4_define([bind_VERSION_PATCH], 18)dnl m4_define([bind_VERSION_EXTRA], )dnl m4_define([bind_DESCRIPTION], [(Stable Release)])dnl -m4_define([bind_SRCID], [m4_esyscmd_s([git rev-parse --short HEAD | cut -b1-7])])dnl diff --git a/SPECS/busybox/busybox.spec b/SPECS/busybox/busybox.spec index 3bf09975319..1ddc210f915 100644 --- a/SPECS/busybox/busybox.spec +++ b/SPECS/busybox/busybox.spec @@ -1,7 +1,7 @@ Summary: Statically linked binary providing simplified versions of system commands Name: busybox Version: 1.36.1 -Release: 19%{?dist} +Release: 21%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -19,7 +19,7 @@ Patch5: CVE-2023-42366.patch Patch6: CVE-2023-39810.patch Patch7: CVE-2022-48174.patch BuildRequires: gcc -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: libselinux-devel >= 1.27.7-2 BuildRequires: libsepol-devel %if 0%{?with_check} @@ -106,6 +106,12 @@ SKIP_KNOWN_BUGS=1 ./runtest %{_mandir}/man1/busybox.petitboot.1.gz %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.36.1-21 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.36.1-20 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 1.36.1-19 - Bump to rebuild with updated glibc diff --git a/SPECS/cloud-hypervisor/cloud-hypervisor.signatures.json b/SPECS/cloud-hypervisor/cloud-hypervisor.signatures.json index 8c94b1c914e..b7ba1acf843 100644 --- a/SPECS/cloud-hypervisor/cloud-hypervisor.signatures.json +++ b/SPECS/cloud-hypervisor/cloud-hypervisor.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "cloud-hypervisor-41.0.139-vendor.tar.gz": "44d4f5770968c2482d7da25bb18cd399b8eb9b6a5f1de5aa816083954d1c8241", - "cloud-hypervisor-41.0.139.tar.gz": "116191af642c8c57710205cefab0787a5fd903e3a946638b1380cac732a8e381" + "cloud-hypervisor-48.0.246.tar.gz": "627775016abe81d478258065e41495d11822dde6a892cab5d234eefc6ffed802", + "cloud-hypervisor-48.0.246-vendor.tar.gz": "a314204b25b980f7055f5ac4bcdbb43051e8f7d29c3c05468653475bda3d87fe" } -} \ No newline at end of file +} diff --git a/SPECS/cloud-hypervisor/cloud-hypervisor.spec b/SPECS/cloud-hypervisor/cloud-hypervisor.spec index 2ed1c588719..750cec90eee 100644 --- a/SPECS/cloud-hypervisor/cloud-hypervisor.spec +++ b/SPECS/cloud-hypervisor/cloud-hypervisor.spec @@ -4,7 +4,7 @@ Name: cloud-hypervisor Summary: Cloud Hypervisor is an open source Virtual Machine Monitor (VMM) that runs on top of the KVM hypervisor and the Microsoft Hypervisor (MSHV). -Version: 41.0.139 +Version: 48.0.246 Release: 1%{?dist} License: ASL 2.0 OR BSD-3-clause Vendor: Microsoft Corporation @@ -137,6 +137,12 @@ cargo build --release --target=%{rust_musl_target} %{cargo_pkg_feature_opts} %{c %license LICENSES/CC-BY-4.0.txt %changelog +* Fri Jan 23 2026 CBL-Mariner Servicing Account - 48.0.246-1 +- Auto-upgrade to 48.0.246 + +* Wed Oct 15 2025 Kavya Sree Kaitepalli - 41.0.139-3 +- Bump release to rebuild with rust + * Thu Oct 09 2025 Saul Paredes - 41.0.139-2 - Enable build on aarch64 diff --git a/SPECS/cmake/CVE-2025-14017.patch b/SPECS/cmake/CVE-2025-14017.patch new file mode 100644 index 00000000000..adfeb603e78 --- /dev/null +++ b/SPECS/cmake/CVE-2025-14017.patch @@ -0,0 +1,117 @@ +From 1d14696f2939b065332bcd54a42fbac46bee9ff5 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Fri, 9 Jan 2026 04:45:45 +0000 +Subject: [PATCH] ldap: call ldap_init() before setting the options + +Closes #19830 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/curl/curl/commit/39d1976b7f709a516e324333.patch +--- + Utilities/cmcurl/lib/ldap.c | 49 ++++++++++++++----------------------- + 1 file changed, 19 insertions(+), 30 deletions(-) + +diff --git a/Utilities/cmcurl/lib/ldap.c b/Utilities/cmcurl/lib/ldap.c +index 678b4d5a..b664e991 100644 +--- a/Utilities/cmcurl/lib/ldap.c ++++ b/Utilities/cmcurl/lib/ldap.c +@@ -364,16 +364,29 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + passwd = conn->passwd; + } + ++#ifdef USE_WIN32_LDAP ++ if(ldap_ssl) ++ server = ldap_sslinit(host, conn->primary.remote_port, 1); ++ else ++#else ++ server = ldap_init(host, conn->primary.remote_port); ++#endif ++ if(!server) { ++ failf(data, "LDAP: cannot setup connect to %s:%u", ++ conn->host.dispname, conn->primary.remote_port); ++ result = CURLE_COULDNT_CONNECT; ++ goto quit; ++ } ++ + #ifdef LDAP_OPT_NETWORK_TIMEOUT +- ldap_set_option(NULL, LDAP_OPT_NETWORK_TIMEOUT, &ldap_timeout); ++ ldap_set_option(server, LDAP_OPT_NETWORK_TIMEOUT, &ldap_timeout); + #endif +- ldap_set_option(NULL, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); ++ ldap_set_option(server, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); + + if(ldap_ssl) { + #ifdef HAVE_LDAP_SSL + #ifdef USE_WIN32_LDAP + /* Win32 LDAP SDK doesn't support insecure mode without CA! */ +- server = ldap_sslinit(host, conn->primary.remote_port, 1); + ldap_set_option(server, LDAP_OPT_SSL, LDAP_OPT_ON); + #else + int ldap_option; +@@ -441,7 +454,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + goto quit; + } + infof(data, "LDAP local: using PEM CA cert: %s", ldap_ca); +- rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_CACERTFILE, ldap_ca); ++ rc = ldap_set_option(server, LDAP_OPT_X_TLS_CACERTFILE, ldap_ca); + if(rc != LDAP_SUCCESS) { + failf(data, "LDAP local: ERROR setting PEM CA cert: %s", + ldap_err2string(rc)); +@@ -453,20 +466,13 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + else + ldap_option = LDAP_OPT_X_TLS_NEVER; + +- rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_REQUIRE_CERT, &ldap_option); ++ rc = ldap_set_option(server, LDAP_OPT_X_TLS_REQUIRE_CERT, &ldap_option); + if(rc != LDAP_SUCCESS) { + failf(data, "LDAP local: ERROR setting cert verify mode: %s", + ldap_err2string(rc)); + result = CURLE_SSL_CERTPROBLEM; + goto quit; + } +- server = ldap_init(host, conn->primary.remote_port); +- if(!server) { +- failf(data, "LDAP local: Cannot connect to %s:%u", +- conn->host.dispname, conn->primary.remote_port); +- result = CURLE_COULDNT_CONNECT; +- goto quit; +- } + ldap_option = LDAP_OPT_X_TLS_HARD; + rc = ldap_set_option(server, LDAP_OPT_X_TLS, &ldap_option); + if(rc != LDAP_SUCCESS) { +@@ -475,15 +481,6 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + result = CURLE_SSL_CERTPROBLEM; + goto quit; + } +-/* +- rc = ldap_start_tls_s(server, NULL, NULL); +- if(rc != LDAP_SUCCESS) { +- failf(data, "LDAP local: ERROR starting SSL/TLS mode: %s", +- ldap_err2string(rc)); +- result = CURLE_SSL_CERTPROBLEM; +- goto quit; +- } +-*/ + #else + (void)ldap_option; + (void)ldap_ca; +@@ -502,15 +499,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + result = CURLE_NOT_BUILT_IN; + goto quit; + } +- else { +- server = ldap_init(host, conn->primary.remote_port); +- if(!server) { +- failf(data, "LDAP local: Cannot connect to %s:%u", +- conn->host.dispname, conn->primary.remote_port); +- result = CURLE_COULDNT_CONNECT; +- goto quit; +- } +- } ++ + #ifdef USE_WIN32_LDAP + ldap_set_option(server, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); + rc = ldap_win_bind(data, server, user, passwd); +-- +2.45.4 + diff --git a/SPECS/cmake/cmake.spec b/SPECS/cmake/cmake.spec index d4d4d7da5ea..171d174c9ee 100644 --- a/SPECS/cmake/cmake.spec +++ b/SPECS/cmake/cmake.spec @@ -2,7 +2,7 @@ Summary: Cmake Name: cmake Version: 3.30.3 -Release: 10%{?dist} +Release: 11%{?dist} License: BSD AND LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -32,6 +32,7 @@ Patch12: CVE-2025-5917.patch Patch13: CVE-2025-5918.patch Patch14: CVE-2025-9301.patch Patch15: CVE-2025-10148.patch +Patch16: CVE-2025-14017.patch BuildRequires: bzip2 BuildRequires: bzip2-devel @@ -112,6 +113,9 @@ bin/ctest --force-new-ctest-process --rerun-failed --output-on-failure %{_libdir}/rpm/macros.d/macros.cmake %changelog +* Fri Jan 09 2026 Azure Linux Security Servicing Account - 3.30.3-11 +- Patch for CVE-2025-14017 + * Sat Sep 13 2025 Azure Linux Security Servicing Account - 3.30.3-10 - Patch for CVE-2025-10148 diff --git a/SPECS/cni-plugins/CVE-2025-65637.patch b/SPECS/cni-plugins/CVE-2025-65637.patch new file mode 100644 index 00000000000..9d869bcaa62 --- /dev/null +++ b/SPECS/cni-plugins/CVE-2025-65637.patch @@ -0,0 +1,136 @@ +From 5819ef66baec7c9b3fa60fb910cf932816f09d9c Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 1/2] This commit fixes a potential denial of service + vulnerability in logrus.Writer() that could be triggered by logging text + longer than 64kb without newlines. Previously, the bufio.Scanner used by + Writer() would hang indefinitely when reading such text without newlines, + causing the application to become unresponsive. + +--- + vendor/github.com/sirupsen/logrus/writer.go | 33 ++++++++++++++++++++- + 1 file changed, 32 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 72e8e3a..36032d0 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -4,6 +4,7 @@ import ( + "bufio" + "io" + "runtime" ++ "strings" + ) + + // Writer at INFO level. See WriterLevel for details. +@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) + } + ++// Writer returns an io.Writer that writes to the logger at the info log level + func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) + } + ++// WriterLevel returns an io.Writer that writes to the logger at the given log level + func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + ++ // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace +@@ -48,23 +52,50 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + printFunc = entry.Print + } + ++ // Start a new goroutine to scan the input and write it to the logger using the specified print function. ++ // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) ++ ++ // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer + } + ++// writerScanner scans the input from the reader and writes it to the logger + func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) ++ ++ // Set the buffer size to the maximum token size to avoid buffer overflows ++ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) ++ ++ // Define a split function to split the input into chunks of up to 64KB ++ chunkSize := 64 * 1024 // 64KB ++ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { ++ if len(data) > chunkSize { ++ return chunkSize, data[:chunkSize], nil ++ } ++ return 0, nil, nil ++ } ++ ++ //Use the custom split function to split the input ++ scanner.Split(splitFunc) ++ ++ // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { +- printFunc(scanner.Text()) ++ printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } ++ ++ // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } ++ ++ // Close the reader when we are done + reader.Close() + } + ++// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected + func writerFinalizer(writer *io.PipeWriter) { + writer.Close() + } +-- +2.45.4 + + +From 980d2232c084024e25540aee091fafe319d20932 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 2/2] Scan text in 64KB chunks + +This commit fixes a potential denial of service +vulnerability in logrus.Writer() that could be +triggered by logging text longer than 64KB +without newlines. Previously, the bufio.Scanner +used by Writer() would hang indefinitely when +reading such text without newlines, causing the +application to become unresponsive. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/sirupsen/logrus/pull/1376.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 36032d0..7e7703c 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -75,7 +75,8 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } +- return 0, nil, nil ++ ++ return len(data), data, nil + } + + //Use the custom split function to split the input +-- +2.45.4 + diff --git a/SPECS/cni-plugins/cni-plugins.spec b/SPECS/cni-plugins/cni-plugins.spec index a64eda5ab4a..0d8fb53f2e3 100644 --- a/SPECS/cni-plugins/cni-plugins.spec +++ b/SPECS/cni-plugins/cni-plugins.spec @@ -1,7 +1,7 @@ Summary: Container Network Interface (CNI) plugins Name: cni-plugins Version: 1.4.0 -Release: 3%{?dist} +Release: 4%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -12,6 +12,7 @@ URL: https://github.com/containernetworking/plugins Source0: %{name}-%{version}.tar.gz Patch0: CVE-2024-45338.patch Patch1: CVE-2025-22872.patch +Patch2: CVE-2025-65637.patch %define _default_cni_plugins_dir /opt/cni/bin BuildRequires: golang >= 1.5 @@ -42,6 +43,9 @@ make -k check |& tee %{_specdir}/%{name}-check-log || %{nocheck} %{_default_cni_plugins_dir}/* %changelog +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 1.4.0-4 +- Patch for CVE-2025-65637 + * Mon Apr 28 2025 Sreeniavsulu Malavathula - 1.4.0-3 - Patch CVE-2025-22872 diff --git a/SPECS/collectd/collectd.spec b/SPECS/collectd/collectd.spec index 27e2f963c78..de1855fffa1 100644 --- a/SPECS/collectd/collectd.spec +++ b/SPECS/collectd/collectd.spec @@ -3,7 +3,7 @@ Summary: Statistics collection daemon for filling RRD files Name: collectd Version: 5.12.0 -Release: 10%{?dist} +Release: 11%{?dist} License: GPLv2 AND MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -867,6 +867,9 @@ make check %{_libdir}/collectd/write_tsdb.so %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 5.12.0-11 +- Bumping release to rebuild with new 'net-snmp' libs. + * Wed Apr 17 2024 Andrew Phelps - 5.12.0-10 - Add patch to fix build break. diff --git a/SPECS/containerd2/containerd2.spec b/SPECS/containerd2/containerd2.spec index 0c16c306eee..992e9219305 100644 --- a/SPECS/containerd2/containerd2.spec +++ b/SPECS/containerd2/containerd2.spec @@ -5,7 +5,7 @@ Summary: Industry-standard container runtime Name: %{upstream_name}2 Version: 2.0.0 -Release: 16%{?dist} +Release: 17%{?dist} License: ASL 2.0 Group: Tools/Container URL: https://www.containerd.io @@ -25,6 +25,7 @@ Patch5: multi-snapshotters-support.patch Patch6: tardev-support.patch Patch7: CVE-2024-25621.patch Patch8: CVE-2025-64329.patch +Patch9: fix-credential-leak-in-cri-errors.patch %{?systemd_requires} BuildRequires: golang < 1.25 @@ -100,6 +101,9 @@ fi %dir /opt/containerd/lib %changelog +* Tue Jan 21 2026 Aadhar Agarwal - 2.0.0-17 +- Backport fix for credential leak in CRI error logs + * Mon Nov 24 2025 Azure Linux Security Servicing Account - 2.0.0-16 - Patch for CVE-2025-64329 diff --git a/SPECS/containerd2/fix-credential-leak-in-cri-errors.patch b/SPECS/containerd2/fix-credential-leak-in-cri-errors.patch new file mode 100644 index 00000000000..909c179c259 --- /dev/null +++ b/SPECS/containerd2/fix-credential-leak-in-cri-errors.patch @@ -0,0 +1,401 @@ +From a34e45d0fa2a7ddefff1a0871c9bf9e3c62bda17 Mon Sep 17 00:00:00 2001 +From: Andrey Noskov +Date: Thu, 6 Nov 2025 13:34:38 +0100 +Subject: [PATCH 1/2] fix: redact all query parameters in CRI error logs + +Signed-off-by: Andrey Noskov +--- + .../cri/instrument/instrumented_service.go | 8 ++ + internal/cri/util/sanitize.go | 93 +++++++++++++ + internal/cri/util/sanitize_test.go | 128 ++++++++++++++++++ + 3 files changed, 229 insertions(+) + create mode 100644 internal/cri/util/sanitize.go + create mode 100644 internal/cri/util/sanitize_test.go + +diff --git a/internal/cri/instrument/instrumented_service.go b/internal/cri/instrument/instrumented_service.go +index c2f5c8de99..f06315a6bd 100644 +--- a/internal/cri/instrument/instrumented_service.go ++++ b/internal/cri/instrument/instrumented_service.go +@@ -351,6 +351,8 @@ func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullIma + log.G(ctx).Infof("PullImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { ++ // Sanitize error to remove sensitive information ++ err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("PullImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("PullImage %q returns image reference %q", +@@ -369,6 +371,8 @@ func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListIm + log.G(ctx).Tracef("ListImages with filter %+v", r.GetFilter()) + defer func() { + if err != nil { ++ // Sanitize error to remove sensitive information ++ err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("ListImages with filter %+v failed", r.GetFilter()) + } else { + log.G(ctx).Tracef("ListImages with filter %+v returns image list %+v", +@@ -386,6 +390,8 @@ func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.Image + log.G(ctx).Tracef("ImageStatus for %q", r.GetImage().GetImage()) + defer func() { + if err != nil { ++ // Sanitize error to remove sensitive information ++ err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("ImageStatus for %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Tracef("ImageStatus for %q returns image status %+v", +@@ -404,6 +410,8 @@ func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.Remov + log.G(ctx).Infof("RemoveImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { ++ // Sanitize error to remove sensitive information ++ err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("RemoveImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("RemoveImage %q returns successfully", r.GetImage().GetImage()) +diff --git a/internal/cri/util/sanitize.go b/internal/cri/util/sanitize.go +new file mode 100644 +index 0000000000..d50a15ebf6 +--- /dev/null ++++ b/internal/cri/util/sanitize.go +@@ -0,0 +1,93 @@ ++/* ++ Copyright The containerd Authors. ++ ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. ++*/ ++ ++package util ++ ++import ( ++ "errors" ++ "net/url" ++ "strings" ++) ++ ++// SanitizeError sanitizes an error by redacting sensitive information in URLs. ++// If the error contains a *url.Error, it parses and sanitizes the URL. ++// Otherwise, it returns the error unchanged. ++func SanitizeError(err error) error { ++ if err == nil { ++ return nil ++ } ++ ++ // Check if the error is or contains a *url.Error ++ var urlErr *url.Error ++ if errors.As(err, &urlErr) { ++ // Parse and sanitize the URL ++ sanitizedURL := sanitizeURL(urlErr.URL) ++ if sanitizedURL != urlErr.URL { ++ // Wrap with sanitized url.Error ++ return &sanitizedError{ ++ original: err, ++ sanitizedURL: sanitizedURL, ++ urlError: urlErr, ++ } ++ } ++ return err ++ } ++ ++ // No sanitization needed for non-URL errors ++ return err ++} ++ ++// sanitizeURL properly parses a URL and redacts all query parameters. ++func sanitizeURL(rawURL string) string { ++ parsed, err := url.Parse(rawURL) ++ if err != nil { ++ // If URL parsing fails, return original (malformed URLs shouldn't leak tokens) ++ return rawURL ++ } ++ ++ // Check if URL has query parameters ++ query := parsed.Query() ++ if len(query) == 0 { ++ return rawURL ++ } ++ ++ // Redact all query parameters ++ for param := range query { ++ query.Set(param, "[REDACTED]") ++ } ++ ++ // Reconstruct URL with sanitized query ++ parsed.RawQuery = query.Encode() ++ return parsed.String() ++} ++ ++// sanitizedError wraps an error containing a *url.Error with a sanitized URL. ++type sanitizedError struct { ++ original error ++ sanitizedURL string ++ urlError *url.Error ++} ++ ++// Error returns the error message with the sanitized URL. ++func (e *sanitizedError) Error() string { ++ // Replace all occurrences of the original URL with the sanitized version ++ return strings.ReplaceAll(e.original.Error(), e.urlError.URL, e.sanitizedURL) ++} ++ ++// Unwrap returns the original error for error chain traversal. ++func (e *sanitizedError) Unwrap() error { ++ return e.original ++} +diff --git a/internal/cri/util/sanitize_test.go b/internal/cri/util/sanitize_test.go +new file mode 100644 +index 0000000000..03e4fb2694 +--- /dev/null ++++ b/internal/cri/util/sanitize_test.go +@@ -0,0 +1,128 @@ ++/* ++ Copyright The containerd Authors. ++ ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. ++*/ ++ ++package util ++ ++import ( ++ "errors" ++ "fmt" ++ "net/url" ++ "testing" ++ ++ "github.com/stretchr/testify/assert" ++ "github.com/stretchr/testify/require" ++) ++ ++func TestSanitizeError_SimpleURLError(t *testing.T) { ++ // Create a url.Error with sensitive info ++ originalURL := "https://storage.blob.core.windows.net/container/blob?sig=SECRET&sv=2020" ++ urlErr := &url.Error{ ++ Op: "Get", ++ URL: originalURL, ++ Err: fmt.Errorf("connection timeout"), ++ } ++ ++ // Sanitize ++ sanitized := SanitizeError(urlErr) ++ require.NotNil(t, sanitized) ++ ++ // Check it's a sanitizedError with correct properties ++ sanitizedErr, ok := sanitized.(*sanitizedError) ++ require.True(t, ok, "Should return *sanitizedError type") ++ assert.Equal(t, urlErr, sanitizedErr.original) ++ assert.Equal(t, urlErr, sanitizedErr.urlError) ++ assert.Equal(t, "https://storage.blob.core.windows.net/container/blob?sig=%5BREDACTED%5D&sv=%5BREDACTED%5D", sanitizedErr.sanitizedURL) ++ ++ // Test Error() method - verifies ReplaceAll functionality ++ expected := "Get \"https://storage.blob.core.windows.net/container/blob?sig=%5BREDACTED%5D&sv=%5BREDACTED%5D\": connection timeout" ++ assert.Equal(t, expected, sanitized.Error()) ++} ++ ++func TestSanitizeError_WrappedError(t *testing.T) { ++ originalURL := "https://storage.blob.core.windows.net/blob?sig=SECRET&sv=2020" ++ urlErr := &url.Error{ ++ Op: "Get", ++ URL: originalURL, ++ Err: fmt.Errorf("timeout"), ++ } ++ ++ wrappedErr := fmt.Errorf("image pull failed: %w", urlErr) ++ ++ // Sanitize ++ sanitized := SanitizeError(wrappedErr) ++ ++ // Test Error() method with wrapped error - verifies ReplaceAll works in wrapped context ++ sanitizedMsg := sanitized.Error() ++ assert.NotContains(t, sanitizedMsg, "SECRET", "Secret should be sanitized") ++ assert.Contains(t, sanitizedMsg, "image pull failed", "Wrapper message should be preserved") ++ assert.Contains(t, sanitizedMsg, "%5BREDACTED%5D", "Should contain sanitized marker") ++ ++ // Should still be able to unwrap to url.Error ++ var targetURLErr *url.Error ++ assert.True(t, errors.As(sanitized, &targetURLErr), ++ "Should be able to find *url.Error in sanitized error chain") ++ ++ // Verify url.Error properties are preserved ++ assert.Equal(t, "Get", targetURLErr.Op) ++ assert.Contains(t, targetURLErr.Err.Error(), "timeout") ++} ++ ++func TestSanitizeError_NonURLError(t *testing.T) { ++ // Regular error without url.Error ++ regularErr := fmt.Errorf("some error occurred") ++ ++ sanitized := SanitizeError(regularErr) ++ ++ // Should return the exact same error object ++ assert.Equal(t, regularErr, sanitized, ++ "Non-URL errors should pass through unchanged") ++} ++ ++func TestSanitizeError_NilError(t *testing.T) { ++ sanitized := SanitizeError(nil) ++ assert.Nil(t, sanitized, "nil error should return nil") ++} ++ ++func TestSanitizeError_NoQueryParams(t *testing.T) { ++ // URL without any query parameters ++ urlErr := &url.Error{ ++ Op: "Get", ++ URL: "https://registry.example.com/v2/image/manifests/latest", ++ Err: fmt.Errorf("not found"), ++ } ++ ++ sanitized := SanitizeError(urlErr) ++ ++ // Should return the same error object (no sanitization needed) ++ assert.Equal(t, urlErr, sanitized, ++ "Errors without query params should pass through unchanged") ++} ++ ++func TestSanitizedError_Unwrap(t *testing.T) { ++ originalURL := "https://storage.blob.core.windows.net/blob?sig=SECRET" ++ urlErr := &url.Error{ ++ Op: "Get", ++ URL: originalURL, ++ Err: fmt.Errorf("timeout"), ++ } ++ ++ sanitized := SanitizeError(urlErr) ++ ++ // Should be able to unwrap ++ unwrapped := errors.Unwrap(sanitized) ++ assert.NotNil(t, unwrapped, "Should be able to unwrap sanitized error") ++ assert.Equal(t, urlErr, unwrapped, "Unwrapped should be the original error") ++} +-- +2.45.4 + + +From 50e383e3907d04aeaec85853edfaa9ab34be1006 Mon Sep 17 00:00:00 2001 +From: Aadhar Agarwal +Date: Tue, 20 Jan 2026 22:16:30 +0000 +Subject: [PATCH 2/2] fix: sanitize error before gRPC return to prevent + credential leak in pod events + +PR #12491 fixed credential leaks in containerd logs but the gRPC error +returned to kubelet still contained sensitive information. This was +visible in Kubernetes pod events via `kubectl describe pod`. + +The issue was that SanitizeError was called inside the defer block, +but errgrpc.ToGRPC(err) was evaluated before the defer ran, so the +gRPC message contained the original unsanitized error. + +Move SanitizeError before the return statement so both the logged +error and the gRPC error are sanitized. + +Ref: #5453 +Signed-off-by: Aadhar Agarwal +--- + .../cri/instrument/instrumented_service.go | 24 ++++++++++++------- + 1 file changed, 16 insertions(+), 8 deletions(-) + +diff --git a/internal/cri/instrument/instrumented_service.go b/internal/cri/instrument/instrumented_service.go +index f06315a6bd..4379f95997 100644 +--- a/internal/cri/instrument/instrumented_service.go ++++ b/internal/cri/instrument/instrumented_service.go +@@ -351,8 +351,6 @@ func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullIma + log.G(ctx).Infof("PullImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { +- // Sanitize error to remove sensitive information +- err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("PullImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("PullImage %q returns image reference %q", +@@ -361,6 +359,10 @@ func (in *instrumentedService) PullImage(ctx context.Context, r *runtime.PullIma + span.RecordError(err) + }() + res, err = in.c.PullImage(ctrdutil.WithNamespace(ctx), r) ++ // Sanitize error to remove sensitive information from both logs and returned gRPC error ++ if err != nil { ++ err = ctrdutil.SanitizeError(err) ++ } + return res, errgrpc.ToGRPC(err) + } + +@@ -371,8 +373,6 @@ func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListIm + log.G(ctx).Tracef("ListImages with filter %+v", r.GetFilter()) + defer func() { + if err != nil { +- // Sanitize error to remove sensitive information +- err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("ListImages with filter %+v failed", r.GetFilter()) + } else { + log.G(ctx).Tracef("ListImages with filter %+v returns image list %+v", +@@ -380,6 +380,10 @@ func (in *instrumentedService) ListImages(ctx context.Context, r *runtime.ListIm + } + }() + res, err = in.c.ListImages(ctrdutil.WithNamespace(ctx), r) ++ // Sanitize error to remove sensitive information from both logs and returned gRPC error ++ if err != nil { ++ err = ctrdutil.SanitizeError(err) ++ } + return res, errgrpc.ToGRPC(err) + } + +@@ -390,8 +394,6 @@ func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.Image + log.G(ctx).Tracef("ImageStatus for %q", r.GetImage().GetImage()) + defer func() { + if err != nil { +- // Sanitize error to remove sensitive information +- err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("ImageStatus for %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Tracef("ImageStatus for %q returns image status %+v", +@@ -399,6 +401,10 @@ func (in *instrumentedService) ImageStatus(ctx context.Context, r *runtime.Image + } + }() + res, err = in.c.ImageStatus(ctrdutil.WithNamespace(ctx), r) ++ // Sanitize error to remove sensitive information from both logs and returned gRPC error ++ if err != nil { ++ err = ctrdutil.SanitizeError(err) ++ } + return res, errgrpc.ToGRPC(err) + } + +@@ -410,8 +416,6 @@ func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.Remov + log.G(ctx).Infof("RemoveImage %q", r.GetImage().GetImage()) + defer func() { + if err != nil { +- // Sanitize error to remove sensitive information +- err = ctrdutil.SanitizeError(err) + log.G(ctx).WithError(err).Errorf("RemoveImage %q failed", r.GetImage().GetImage()) + } else { + log.G(ctx).Infof("RemoveImage %q returns successfully", r.GetImage().GetImage()) +@@ -419,6 +423,10 @@ func (in *instrumentedService) RemoveImage(ctx context.Context, r *runtime.Remov + span.RecordError(err) + }() + res, err := in.c.RemoveImage(ctrdutil.WithNamespace(ctx), r) ++ // Sanitize error to remove sensitive information from both logs and returned gRPC error ++ if err != nil { ++ err = ctrdutil.SanitizeError(err) ++ } + return res, errgrpc.ToGRPC(err) + } + +-- +2.45.4 + diff --git a/SPECS/containerized-data-importer/CVE-2022-2879.patch b/SPECS/containerized-data-importer/CVE-2022-2879.patch index c24bd58e3ac..fad24af4df7 100644 --- a/SPECS/containerized-data-importer/CVE-2022-2879.patch +++ b/SPECS/containerized-data-importer/CVE-2022-2879.patch @@ -45,7 +45,7 @@ index af006fc..2baa0d5 100644 continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) -- realname, err := ioutil.ReadAll(tr) +- realname, err := io.ReadAll(tr) + realname, err := readSpecialFile(tr) if err != nil { return nil, err @@ -54,7 +54,7 @@ index af006fc..2baa0d5 100644 // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { -- buf, err := ioutil.ReadAll(r) +- buf, err := io.ReadAll(r) + buf, err := readSpecialFile(r) if err != nil { return nil, err diff --git a/SPECS/containerized-data-importer/CVE-2023-39325.patch b/SPECS/containerized-data-importer/CVE-2023-39325.patch deleted file mode 100644 index e0085e416d6..00000000000 --- a/SPECS/containerized-data-importer/CVE-2023-39325.patch +++ /dev/null @@ -1,117 +0,0 @@ -diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go -index 8cb14f3..6000140 100644 ---- a/vendor/golang.org/x/net/http2/server.go -+++ b/vendor/golang.org/x/net/http2/server.go -@@ -581,9 +581,11 @@ type serverConn struct { - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push -+ curHandlers uint32 // number of running handler goroutines - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream -+ unstartedHandlers []unstartedHandler - initialStreamSendWindowSize int32 - maxFrameSize int32 - peerMaxHeaderListSize uint32 // zero means unknown (default) -@@ -981,6 +983,8 @@ func (sc *serverConn) serve() { - return - case gracefulShutdownMsg: - sc.startGracefulShutdownInternal() -+ case handlerDoneMsg: -+ sc.handlerDone() - default: - panic("unknown timer") - } -@@ -1028,6 +1032,7 @@ var ( - idleTimerMsg = new(serverMessage) - shutdownTimerMsg = new(serverMessage) - gracefulShutdownMsg = new(serverMessage) -+ handlerDoneMsg = new(serverMessage) - ) - - func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } -@@ -2022,8 +2027,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - } - } - -- go sc.runHandler(rw, req, handler) -- return nil -+ return sc.scheduleHandler(id, rw, req, handler) - } - - func (sc *serverConn) upgradeRequest(req *http.Request) { -@@ -2043,6 +2047,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { - sc.conn.SetReadDeadline(time.Time{}) - } - -+ // This is the first request on the connection, -+ // so start the handler directly rather than going -+ // through scheduleHandler. -+ sc.curHandlers++ - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - } - -@@ -2283,8 +2291,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response - return &responseWriter{rws: rws} - } - -+type unstartedHandler struct { -+ streamID uint32 -+ rw *responseWriter -+ req *http.Request -+ handler func(http.ResponseWriter, *http.Request) -+} -+ -+// scheduleHandler starts a handler goroutine, -+// or schedules one to start as soon as an existing handler finishes. -+func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { -+ sc.serveG.check() -+ maxHandlers := sc.advMaxStreams -+ if sc.curHandlers < maxHandlers { -+ sc.curHandlers++ -+ go sc.runHandler(rw, req, handler) -+ return nil -+ } -+ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { -+ return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) -+ } -+ sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ -+ streamID: streamID, -+ rw: rw, -+ req: req, -+ handler: handler, -+ }) -+ return nil -+} -+ -+func (sc *serverConn) handlerDone() { -+ sc.serveG.check() -+ sc.curHandlers-- -+ i := 0 -+ maxHandlers := sc.advMaxStreams -+ for ; i < len(sc.unstartedHandlers); i++ { -+ u := sc.unstartedHandlers[i] -+ if sc.streams[u.streamID] == nil { -+ // This stream was reset before its goroutine had a chance to start. -+ continue -+ } -+ if sc.curHandlers >= maxHandlers { -+ break -+ } -+ sc.curHandlers++ -+ go sc.runHandler(u.rw, u.req, u.handler) -+ sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references -+ } -+ sc.unstartedHandlers = sc.unstartedHandlers[i:] -+ if len(sc.unstartedHandlers) == 0 { -+ sc.unstartedHandlers = nil -+ } -+} -+ - // Run on its own goroutine. - func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { -+ defer sc.sendServeMsg(handlerDoneMsg) - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() diff --git a/SPECS/containerized-data-importer/CVE-2023-3978.patch b/SPECS/containerized-data-importer/CVE-2023-3978.patch deleted file mode 100644 index 6a3c1192b1e..00000000000 --- a/SPECS/containerized-data-importer/CVE-2023-3978.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 5abbff46d6a70d0e31b41ce98cddaa08cc911e3f Mon Sep 17 00:00:00 2001 -From: Sudipta Pandit -Date: Wed, 5 Feb 2025 20:58:22 +0530 -Subject: [PATCH] Backport fix for CVE-2023-3978 - -Reference: https://go-review.googlesource.com/c/net/+/514896 ---- - vendor/golang.org/x/net/html/render.go | 28 ++++++++++++++++++++++---- - 1 file changed, 24 insertions(+), 4 deletions(-) - -diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go -index 497e132..1da09c8 100644 ---- a/vendor/golang.org/x/net/html/render.go -+++ b/vendor/golang.org/x/net/html/render.go -@@ -194,9 +194,8 @@ func render1(w writer, n *Node) error { - } - } - -- // Render any child nodes. -- switch n.Data { -- case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": -+ // Render any child nodes -+ if childTextNodesAreLiteral(n) { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if c.Type == TextNode { - if _, err := w.WriteString(c.Data); err != nil { -@@ -213,7 +212,7 @@ func render1(w writer, n *Node) error { - // last element in the file, with no closing tag. - return plaintextAbort - } -- default: -+ } else { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if err := render1(w, c); err != nil { - return err -@@ -231,6 +230,27 @@ func render1(w writer, n *Node) error { - return w.WriteByte('>') - } - -+func childTextNodesAreLiteral(n *Node) bool { -+ // Per WHATWG HTML 13.3, if the parent of the current node is a style, -+ // script, xmp, iframe, noembed, noframes, or plaintext element, and the -+ // current node is a text node, append the value of the node's data -+ // literally. The specification is not explicit about it, but we only -+ // enforce this if we are in the HTML namespace (i.e. when the namespace is -+ // ""). -+ // NOTE: we also always include noscript elements, although the -+ // specification states that they should only be rendered as such if -+ // scripting is enabled for the node (which is not something we track). -+ if n.Namespace != "" { -+ return false -+ } -+ switch n.Data { -+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": -+ return true -+ default: -+ return false -+ } -+} -+ - // writeQuoted writes s to w surrounded by quotes. Normally it will use double - // quotes, but if s contains a double quote, it will use single quotes. - // It is used for writing the identifiers in a doctype declaration. --- -2.34.1 - diff --git a/SPECS/containerized-data-importer/CVE-2023-44487.patch b/SPECS/containerized-data-importer/CVE-2023-44487.patch deleted file mode 100644 index ee2a818f281..00000000000 --- a/SPECS/containerized-data-importer/CVE-2023-44487.patch +++ /dev/null @@ -1,258 +0,0 @@ -diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go -index 3dd1564..9d9a3fd 100644 ---- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go -+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go -@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, - ID: http2.SettingMaxFrameSize, - Val: http2MaxFrameLen, - }} -- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is -- // permitted in the HTTP2 spec. -- maxStreams := config.MaxStreams -- if maxStreams == 0 { -- maxStreams = math.MaxUint32 -- } else { -+ if config.MaxStreams != math.MaxUint32 { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxConcurrentStreams, -- Val: maxStreams, -+ Val: config.MaxStreams, - }) - } - dynamicWindow := true -@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, - framer: framer, - readerDone: make(chan struct{}), - writerDone: make(chan struct{}), -- maxStreams: maxStreams, -+ maxStreams: config.MaxStreams, - inTapHandle: config.InTapHandle, - fc: &trInFlow{limit: uint32(icwz)}, - state: reachable, -diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go -index f4dde72..98839ad 100644 ---- a/vendor/google.golang.org/grpc/server.go -+++ b/vendor/google.golang.org/grpc/server.go -@@ -43,7 +43,6 @@ import ( - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/channelz" -- "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" -@@ -74,10 +73,10 @@ func init() { - srv.drainServerTransports(addr) - } - internal.AddGlobalServerOptions = func(opt ...ServerOption) { -- extraServerOptions = append(extraServerOptions, opt...) -+ globalServerOptions = append(globalServerOptions, opt...) - } - internal.ClearGlobalServerOptions = func() { -- extraServerOptions = nil -+ globalServerOptions = nil - } - internal.BinaryLogger = binaryLogger - internal.JoinServerOptions = newJoinServerOption -@@ -115,12 +114,6 @@ type serviceInfo struct { - mdata interface{} - } - --type serverWorkerData struct { -- st transport.ServerTransport -- wg *sync.WaitGroup -- stream *transport.Stream --} -- - // Server is a gRPC server to serve RPC requests. - type Server struct { - opts serverOptions -@@ -145,7 +138,7 @@ type Server struct { - channelzID *channelz.Identifier - czData *channelzData - -- serverWorkerChannels []chan *serverWorkerData -+ serverWorkerChannel chan func() - } - - type serverOptions struct { -@@ -177,13 +170,14 @@ type serverOptions struct { - } - - var defaultServerOptions = serverOptions{ -+ maxConcurrentStreams: math.MaxUint32, - maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, - maxSendMessageSize: defaultServerMaxSendMessageSize, - connectionTimeout: 120 * time.Second, - writeBufferSize: defaultWriteBufSize, - readBufferSize: defaultReadBufSize, - } --var extraServerOptions []ServerOption -+var globalServerOptions []ServerOption - - // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. - type ServerOption interface { -@@ -387,6 +381,9 @@ func MaxSendMsgSize(m int) ServerOption { - // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number - // of concurrent streams to each ServerTransport. - func MaxConcurrentStreams(n uint32) ServerOption { -+ if n == 0 { -+ n = math.MaxUint32 -+ } - return newFuncServerOption(func(o *serverOptions) { - o.maxConcurrentStreams = n - }) -@@ -565,42 +562,35 @@ const serverWorkerResetThreshold = 1 << 16 - // re-allocations (see the runtime.morestack problem [1]). - // - // [1] https://github.com/golang/go/issues/18138 --func (s *Server) serverWorker(ch chan *serverWorkerData) { -- // To make sure all server workers don't reset at the same time, choose a -- // random number of iterations before resetting. -- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) -- for completed := 0; completed < threshold; completed++ { -- data, ok := <-ch -+func (s *Server) serverWorker() { -+ for completed := 0; completed < serverWorkerResetThreshold; completed++ { -+ f, ok := <-s.serverWorkerChannel - if !ok { - return - } -- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) -- data.wg.Done() -+ f() - } -- go s.serverWorker(ch) -+ go s.serverWorker() - } - - // initServerWorkers creates worker goroutines and channels to process incoming - // connections to reduce the time spent overall on runtime.morestack. - func (s *Server) initServerWorkers() { -- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) -+ s.serverWorkerChannel = make(chan func()) - for i := uint32(0); i < s.opts.numServerWorkers; i++ { -- s.serverWorkerChannels[i] = make(chan *serverWorkerData) -- go s.serverWorker(s.serverWorkerChannels[i]) -+ go s.serverWorker() - } - } - - func (s *Server) stopServerWorkers() { -- for i := uint32(0); i < s.opts.numServerWorkers; i++ { -- close(s.serverWorkerChannels[i]) -- } -+ close(s.serverWorkerChannel) - } - - // NewServer creates a gRPC server which has no service registered and has not - // started to accept requests yet. - func NewServer(opt ...ServerOption) *Server { - opts := defaultServerOptions -- for _, o := range extraServerOptions { -+ for _, o := range globalServerOptions { - o.apply(&opts) - } - for _, o := range opt { -@@ -945,25 +935,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() - var wg sync.WaitGroup - -- var roundRobinCounter uint32 -+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) -+ -+ streamQuota.acquire() -+ f := func() { -+ defer streamQuota.release() -+ defer wg.Done() -+ s.handleStream(st, stream, s.traceInfo(st, stream)) -+ } -+ - if s.opts.numServerWorkers > 0 { -- data := &serverWorkerData{st: st, wg: &wg, stream: stream} - select { -- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: -+ case s.serverWorkerChannel <- f: -+ return - default: - // If all stream workers are busy, fallback to the default code path. -- go func() { -- s.handleStream(st, stream, s.traceInfo(st, stream)) -- wg.Done() -- }() - } - } else { -- go func() { -- defer wg.Done() -- s.handleStream(st, stream, s.traceInfo(st, stream)) -- }() -+ go f() - } - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { -@@ -1978,3 +1969,34 @@ type channelzServer struct { - func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() - } -+ -+// atomicSemaphore implements a blocking, counting semaphore. acquire should be -+// called synchronously; release may be called asynchronously. -+type atomicSemaphore struct { -+ n atomic.Int64 -+ wait chan struct{} -+} -+ -+func (q *atomicSemaphore) acquire() { -+ if q.n.Add(-1) < 0 { -+ // We ran out of quota. Block until a release happens. -+ <-q.wait -+ } -+} -+ -+func (q *atomicSemaphore) release() { -+ // N.B. the "<= 0" check below should allow for this to work with multiple -+ // concurrent calls to acquire, but also note that with synchronous calls to -+ // acquire, as our system does, n will never be less than -1. There are -+ // fairness issues (queuing) to consider if this was to be generalized. -+ if q.n.Add(1) <= 0 { -+ // An acquire was waiting on us. Unblock it. -+ q.wait <- struct{}{} -+ } -+} -+ -+func newHandlerQuota(n uint32) *atomicSemaphore { -+ a := &atomicSemaphore{wait: make(chan struct{}, 1)} -+ a.n.Store(int64(n)) -+ return a -+} -\ No newline at end of file -diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go -index d738725..3674914 100644 ---- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go -+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go -@@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { - // OnError will block if it is called more often than the embedded period time. - // This will prevent overly tight hot error loops. - func (r *rudimentaryErrorBackoff) OnError(error) { -+ now := time.Now() // start the timer before acquiring the lock - r.lastErrorTimeLock.Lock() -- defer r.lastErrorTimeLock.Unlock() -- d := time.Since(r.lastErrorTime) -- if d < r.minPeriod { -- // If the time moves backwards for any reason, do nothing -- time.Sleep(r.minPeriod - d) -- } -+ d := now.Sub(r.lastErrorTime) - r.lastErrorTime = time.Now() -+ r.lastErrorTimeLock.Unlock() -+ -+ // Do not sleep with the lock held because that causes all callers of HandleError to block. -+ // We only want the current goroutine to block. -+ // A negative or zero duration causes time.Sleep to return immediately. -+ // If the time moves backwards for any reason, do nothing. -+ time.Sleep(r.minPeriod - d) - } - - // GetCaller returns the caller of the function that calls it. diff --git a/SPECS/containerized-data-importer/CVE-2023-45288.patch b/SPECS/containerized-data-importer/CVE-2023-45288.patch deleted file mode 100644 index 80eaa40216a..00000000000 --- a/SPECS/containerized-data-importer/CVE-2023-45288.patch +++ /dev/null @@ -1,83 +0,0 @@ -Author: Damien Neil -AuthorDate: 2024-01-10 13:41:39 -0800 -Commit: Gopher Robot -CommitDate: 2024-04-03 17:06:00 +0000 - -[internal-branch.go1.21-vendor] http2: close connections when receiving too many headers - -Maintaining HPACK state requires that we parse and process -all HEADERS and CONTINUATION frames on a connection. -When a request's headers exceed MaxHeaderBytes, we don't -allocate memory to store the excess headers but we do -parse them. This permits an attacker to cause an HTTP/2 -endpoint to read arbitrary amounts of data, all associated -with a request which is going to be rejected. - -Set a limit on the amount of excess header frames we -will process before closing a connection. - -Thanks to Bartek Nowotarski for reporting this issue. - -Fixes CVE-2023-45288 -For golang/go#65051 - -Change-Id: I15df097268df13bb5a9e9d3a5c04a8a141d850f6 -Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2130527 -Reviewed-by: Roland Shoemaker -Reviewed-by: Tatiana Bradley -Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2197243 -Run-TryBot: Damien Neil -Reviewed-by: Dmitri Shuralyov -Reviewed-on: https://go-review.googlesource.com/c/net/+/576057 -LUCI-TryBot-Result: Go LUCI -Auto-Submit: Dmitri Shuralyov - -diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go -index c1f6b90..175c154 100644 ---- a/vendor/golang.org/x/net/http2/frame.go -+++ b/vendor/golang.org/x/net/http2/frame.go -@@ -1565,6 +1565,7 @@ - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true -+ remainSize = 0 - return - } - remainSize -= size -@@ -1577,6 +1578,36 @@ - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() -+ -+ // Avoid parsing large amounts of headers that we will then discard. -+ // If the sender exceeds the max header list size by too much, -+ // skip parsing the fragment and close the connection. -+ // -+ // "Too much" is either any CONTINUATION frame after we've already -+ // exceeded the max header list size (in which case remainSize is 0), -+ // or a frame whose encoded size is more than twice the remaining -+ // header list bytes we're willing to accept. -+ if int64(len(frag)) > int64(2*remainSize) { -+ if VerboseLogs { -+ log.Printf("http2: header list too large") -+ } -+ // It would be nice to send a RST_STREAM before sending the GOAWAY, -+ // but the struture of the server's frame writer makes this difficult. -+ return nil, ConnectionError(ErrCodeProtocol) -+ } -+ -+ // Also close the connection after any CONTINUATION frame following an -+ // invalid header, since we stop tracking the size of the headers after -+ // an invalid one. -+ if invalid != nil { -+ if VerboseLogs { -+ log.Printf("http2: invalid header: %v", invalid) -+ } -+ // It would be nice to send a RST_STREAM before sending the GOAWAY, -+ // but the struture of the server's frame writer makes this difficult. -+ return nil, ConnectionError(ErrCodeProtocol) -+ } -+ - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } diff --git a/SPECS/containerized-data-importer/CVE-2024-24786.patch b/SPECS/containerized-data-importer/CVE-2024-24786.patch deleted file mode 100644 index 6c80204f5b0..00000000000 --- a/SPECS/containerized-data-importer/CVE-2024-24786.patch +++ /dev/null @@ -1,152 +0,0 @@ -From 1576982839ab9771784526720ed0a2f4a2aa2280 Mon Sep 17 00:00:00 2001 -From: bala -Date: Mon, 25 Nov 2024 16:47:53 +0000 -Subject: [PATCH] Vendor patch applied - ---- - .../protobuf/encoding/protojson/decode.go | 12 ++++ - .../encoding/protojson/well_known_types.go | 59 +++++++------------ - .../protobuf/internal/encoding/json/decode.go | 2 +- - 3 files changed, 33 insertions(+), 40 deletions(-) - -diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go -index 5f28148..67fe4e7 100644 ---- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go -+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go -@@ -11,6 +11,7 @@ import ( - "strconv" - "strings" - -+ "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/json" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/errors" -@@ -47,6 +48,10 @@ type UnmarshalOptions struct { - protoregistry.MessageTypeResolver - protoregistry.ExtensionTypeResolver - } -+ -+ // RecursionLimit limits how deeply messages may be nested. -+ // If zero, a default limit is applied. -+ RecursionLimit int - } - - // Unmarshal reads the given []byte and populates the given proto.Message -@@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { - if o.Resolver == nil { - o.Resolver = protoregistry.GlobalTypes - } -+ if o.RecursionLimit == 0 { -+ o.RecursionLimit = protowire.DefaultRecursionLimit -+ } - - dec := decoder{json.NewDecoder(b), o} - if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { -@@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { - - // unmarshalMessage unmarshals a message into the given protoreflect.Message. - func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { -+ d.opts.RecursionLimit-- -+ if d.opts.RecursionLimit < 0 { -+ return errors.New("exceeded max recursion depth") -+ } - if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { - return unmarshal(d, m) - } -diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go -index 6c37d41..4b177c8 100644 ---- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go -+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go -@@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { - // Use another decoder to parse the unread bytes for @type field. This - // avoids advancing a read from current decoder because the current JSON - // object may contain the fields of the embedded type. -- dec := decoder{d.Clone(), UnmarshalOptions{}} -+ dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} - tok, err := findTypeURL(dec) - switch err { - case errEmptyObject: -@@ -308,48 +308,29 @@ Loop: - // array) in order to advance the read to the next JSON value. It relies on - // the decoder returning an error if the types are not in valid sequence. - func (d decoder) skipJSONValue() error { -- tok, err := d.Read() -- if err != nil { -- return err -- } -- // Only need to continue reading for objects and arrays. -- switch tok.Kind() { -- case json.ObjectOpen: -- for { -- tok, err := d.Read() -- if err != nil { -- return err -- } -- switch tok.Kind() { -- case json.ObjectClose: -- return nil -- case json.Name: -- // Skip object field value. -- if err := d.skipJSONValue(); err != nil { -- return err -- } -- } -+ var open int -+ for { -+ tok, err := d.Read() -+ if err != nil { -+ return err - } -- -- case json.ArrayOpen: -- for { -- tok, err := d.Peek() -- if err != nil { -- return err -- } -- switch tok.Kind() { -- case json.ArrayClose: -- d.Read() -- return nil -- default: -- // Skip array item. -- if err := d.skipJSONValue(); err != nil { -- return err -- } -+ switch tok.Kind() { -+ case json.ObjectClose, json.ArrayClose: -+ open-- -+ case json.ObjectOpen, json.ArrayOpen: -+ open++ -+ if open > d.opts.RecursionLimit { -+ return errors.New("exceeded max recursion depth") - } -+ case json.EOF: -+ // This can only happen if there's a bug in Decoder.Read. -+ // Avoid an infinite loop if this does happen. -+ return errors.New("unexpected EOF") -+ } -+ if open == 0 { -+ return nil - } - } -- return nil - } - - // unmarshalAnyValue unmarshals the given custom-type message from the JSON -diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go -index d043a6e..d2b3ac0 100644 ---- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go -+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go -@@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { - - case ObjectClose: - if len(d.openStack) == 0 || -- d.lastToken.kind == comma || -+ d.lastToken.kind&(Name|comma) != 0 || - d.openStack[len(d.openStack)-1] != ObjectOpen { - return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) - } --- -2.39.4 - diff --git a/SPECS/containerized-data-importer/CVE-2024-28180.patch b/SPECS/containerized-data-importer/CVE-2024-28180.patch deleted file mode 100644 index 45d72463733..00000000000 --- a/SPECS/containerized-data-importer/CVE-2024-28180.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 886860405f81160c23e8e9e8c80694f094f0e104 Mon Sep 17 00:00:00 2001 -From: Kanishk Bansal -Date: Wed, 29 Jan 2025 14:11:18 +0000 -Subject: [PATCH] Address CVE-2024-28180 - ---- - vendor/gopkg.in/square/go-jose.v2/crypter.go | 6 ++++++ - vendor/gopkg.in/square/go-jose.v2/encoding.go | 20 +++++++++++++++---- - 2 files changed, 22 insertions(+), 4 deletions(-) - -diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go -index d24cabf..a628386 100644 ---- a/vendor/gopkg.in/square/go-jose.v2/crypter.go -+++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go -@@ -405,6 +405,9 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { - // Decrypt and validate the object and return the plaintext. Note that this - // function does not support multi-recipient, if you desire multi-recipient - // decryption use DecryptMulti instead. -+// -+// Automatically decompresses plaintext, but returns an error if the decompressed -+// data would be >250kB or >10x the size of the compressed data, whichever is larger. - func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - -@@ -469,6 +472,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) - // with support for multiple recipients. It returns the index of the recipient - // for which the decryption was successful, the merged headers for that recipient, - // and the plaintext. -+// -+// Automatically decompresses plaintext, but returns an error if the decompressed -+// data would be >250kB or >3x the size of the compressed data, whichever is larger. - func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - -diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go -index 70f7385..2b92116 100644 ---- a/vendor/gopkg.in/square/go-jose.v2/encoding.go -+++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go -@@ -21,6 +21,7 @@ import ( - "compress/flate" - "encoding/base64" - "encoding/binary" -+ "fmt" - "io" - "math/big" - "strings" -@@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - } - } - --// Compress with DEFLATE -+// deflate compresses the input. - func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - -@@ -97,15 +98,26 @@ func deflate(input []byte) ([]byte, error) { - return output.Bytes(), err - } - --// Decompress with DEFLATE -+// inflate decompresses the input. -+// -+// Errors if the decompressed data would be >250kB or >10x the size of the -+// compressed data, whichever is larger. - func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - -- _, err := io.Copy(output, reader) -- if err != nil { -+ maxCompressedSize := 10 * int64(len(input)) -+ if maxCompressedSize < 250000 { -+ maxCompressedSize = 250000 -+ } -+ limit := maxCompressedSize + 1 -+ n, err := io.CopyN(output, reader, limit) -+ if err != nil && err != io.EOF { - return nil, err - } -+ if n == limit { -+ return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) -+ } - - err = reader.Close() - return output.Bytes(), err --- -2.43.0 - diff --git a/SPECS/containerized-data-importer/CVE-2024-3727.patch b/SPECS/containerized-data-importer/CVE-2024-3727.patch deleted file mode 100644 index 92f882851e9..00000000000 --- a/SPECS/containerized-data-importer/CVE-2024-3727.patch +++ /dev/null @@ -1,165 +0,0 @@ -From ea14d57b98cc37decad0c39ccbafb27994274b47 Mon Sep 17 00:00:00 2001 -From: Brian Fjeldstad -Date: Thu, 6 Jun 2024 21:13:36 +0000 -Subject: [PATCH] apply CVE-2024-3727 fix to v5.19.1 - ---- - vendor/github.com/containers/image/v5/docker/docker_client.go | 3 +++ - vendor/github.com/containers/image/v5/docker/docker_image.go | 8 ++++++-- - vendor/github.com/containers/image/v5/docker/docker_image_dest.go | 15 ++++++++++++--- - vendor/github.com/containers/image/v5/docker/docker_image_src.go | 19 +++++++++++++++++-- - vendor/github.com/containers/image/v5/docker/lookaside.go | 7 +++++-- - 5 files changed, 43 insertions(+), 9 deletions(-) - -diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go -index 833323b4..99bde923 100644 ---- a/vendor/github.com/containers/image/v5/docker/docker_client.go -+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go -@@ -796,6 +796,9 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { - // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, - // using the original data structures. - func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { -+ if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters -+ return nil, err -+ } - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { -diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go -index c84bb37d..0076d229 100644 ---- a/vendor/github.com/containers/image/v5/docker/docker_image.go -+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go -@@ -83,8 +83,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. - if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { - return nil, err - } -- tags = append(tags, tagsHolder.Tags...) -- -+ for _, tag := range tagsHolder.Tags { -+ if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values -+ return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err) -+ } -+ tags = append(tags, tag) -+ } - link := res.Header.Get("Link") - if link == "" { - break -diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go -index e7af8f93..1096c56f 100644 ---- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go -+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go -@@ -226,6 +226,9 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, - // If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); - // it returns a non-nil error only on an unexpected failure. - func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { -+ if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters -+ return false, -1, err -+ } - checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) -@@ -558,8 +561,11 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { -- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) -- err := d.putOneSignature(url, signature) -+ url, err := signatureStorageURL(d.c.signatureBase, manifestDigest, i) -+ if err != nil { -+ return err -+ } -+ err = d.putOneSignature(url, signature) - if err != nil { - return err - } -@@ -570,7 +576,10 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { -- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) -+ url, err := signatureStorageURL(d.c.signatureBase, manifestDigest, i) -+ if err != nil { -+ return err -+ } - missing, err := d.c.deleteOneSignature(url) - if err != nil { - return err -diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go -index 314e9b39..43ca0c4f 100644 ---- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go -+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go -@@ -178,6 +178,9 @@ func simplifyContentType(contentType string) string { - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { -+ if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters -+ return nil, "", err -+ } - return s.fetchManifest(ctx, instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(ctx) -@@ -373,6 +376,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, - return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") - } - -+ if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters -+ return nil, nil, err -+ } - path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) -@@ -425,6 +431,9 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca - } - } - -+ if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters -+ return nil, 0, err -+ } - path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) -@@ -486,7 +495,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst - // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} - for i := 0; ; i++ { -- url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) -+ url, err := signatureStorageURL(s.c.signatureBase, manifestDigest, i) -+ if err != nil { -+ return nil, err -+ } - signature, missing, err := s.getOneSignature(ctx, url) - if err != nil { - return nil, err -@@ -627,7 +639,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere - } - - for i := 0; ; i++ { -- url := signatureStorageURL(c.signatureBase, manifestDigest, i) -+ url, err := signatureStorageURL(c.signatureBase, manifestDigest, i) -+ if err != nil { -+ return err -+ } - missing, err := c.deleteOneSignature(url) - if err != nil { - return err -diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go -index 515e5932..2e400c09 100644 ---- a/vendor/github.com/containers/image/v5/docker/lookaside.go -+++ b/vendor/github.com/containers/image/v5/docker/lookaside.go -@@ -229,8 +229,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string { - // signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. - // base is not nil from the caller - // NOTE: Keep this in sync with docs/signature-protocols.md! --func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { -+func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) { -+ if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly. -+ return nil, err -+ } - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) -- return &url -+ return &url, nil - } --- -2.34.1 - diff --git a/SPECS/containerized-data-importer/CVE-2024-45338.patch b/SPECS/containerized-data-importer/CVE-2024-45338.patch deleted file mode 100644 index b1a7b333044..00000000000 --- a/SPECS/containerized-data-importer/CVE-2024-45338.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 0c0cb82a7671b2aa12c5136ab9368245e3803985 Mon Sep 17 00:00:00 2001 -From: Rohit Rawat -Date: Thu, 2 Jan 2025 10:22:13 +0000 -Subject: [PATCH] Fix CVE CVE-2024-45338 in containerized-data-importer - ---- - .../vendor/golang.org/x/net/html/doctype.go | 2 +- - .../vendor/golang.org/x/net/html/foreign.go | 3 +-- - .../vendor/golang.org/x/net/html/parse.go | 4 ++-- - 3 files changed, 4 insertions(+), 5 deletions(-) - -diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go -index c484e5a..bca3ae9 100644 ---- a/vendor/golang.org/x/net/html/doctype.go -+++ b/vendor/golang.org/x/net/html/doctype.go -@@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && -- strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { -+ strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { - quirks = true - } - } -diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go -index 9da9e9d..e8515d8 100644 ---- a/vendor/golang.org/x/net/html/foreign.go -+++ b/vendor/golang.org/x/net/html/foreign.go -@@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { -- val := strings.ToLower(a.Val) -- if val == "text/html" || val == "application/xhtml+xml" { -+ if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { - return true - } - } -diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go -index 46a89ed..5b8374b 100644 ---- a/vendor/golang.org/x/net/html/parse.go -+++ b/vendor/golang.org/x/net/html/parse.go -@@ -1031,7 +1031,7 @@ func inBodyIM(p *parser) bool { - if p.tok.DataAtom == a.Input { - for _, t := range p.tok.Attr { - if t.Key == "type" { -- if strings.ToLower(t.Val) == "hidden" { -+ if strings.EqualFold(t.Val, "hidden") { - // Skip setting framesetOK = false - return true - } -@@ -1459,7 +1459,7 @@ func inTableIM(p *parser) bool { - return inHeadIM(p) - case a.Input: - for _, t := range p.tok.Attr { -- if t.Key == "type" && strings.ToLower(t.Val) == "hidden" { -+ if t.Key == "type" && strings.EqualFold(t.Val, "hidden") { - p.addElement() - p.oe.pop() - return true --- -2.39.4 - diff --git a/SPECS/containerized-data-importer/CVE-2025-22868.patch b/SPECS/containerized-data-importer/CVE-2025-22868.patch deleted file mode 100644 index c4f136f3ca1..00000000000 --- a/SPECS/containerized-data-importer/CVE-2025-22868.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 681b4d8edca1bcfea5bce685d77ea7b82ed3e7b3 Mon Sep 17 00:00:00 2001 -From: Neal Patel -Date: Thu, 30 Jan 2025 14:10:09 -0500 -Subject: [PATCH] jws: split token into fixed number of parts - -Thanks to 'jub0bs' for reporting this issue. - -Fixes #71490 -Fixes CVE-2025-22868 - -Change-Id: I2552731f46d4907f29aafe7863c558387b6bd6e2 -Reviewed-on: https://go-review.googlesource.com/c/oauth2/+/652155 -Auto-Submit: Gopher Robot -Reviewed-by: Damien Neil -Reviewed-by: Roland Shoemaker -LUCI-TryBot-Result: Go LUCI ---- - vendor/golang.org/x/oauth2/jws/jws.go | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go -index 95015648b..6f03a49d3 100644 ---- a/vendor/golang.org/x/oauth2/jws/jws.go -+++ b/vendor/golang.org/x/oauth2/jws/jws.go -@@ -165,11 +165,11 @@ func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - // Verify tests whether the provided JWT token's signature was produced by the private key - // associated with the supplied public key. - func Verify(token string, key *rsa.PublicKey) error { -- parts := strings.Split(token, ".") -- if len(parts) != 3 { -+ if strings.Count(token, ".") != 2 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - -+ parts := strings.SplitN(token, ".", 3) - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { diff --git a/SPECS/containerized-data-importer/CVE-2025-27144.patch b/SPECS/containerized-data-importer/CVE-2025-27144.patch deleted file mode 100644 index 6015ed48ca9..00000000000 --- a/SPECS/containerized-data-importer/CVE-2025-27144.patch +++ /dev/null @@ -1,50 +0,0 @@ -From fa324fa38481f9d2da9109cb5983326f62ff7507 Mon Sep 17 00:00:00 2001 -From: Kanishk-Bansal -Date: Fri, 28 Feb 2025 07:45:53 +0000 -Subject: [PATCH] CVE-2025-27144 -Upstream Ref: https://github.com/go-jose/go-jose/commit/c9ed84d8f0cfadcfad817150158caca6fcbc518b - ---- - vendor/gopkg.in/square/go-jose.v2/jwe.go | 5 +++-- - vendor/gopkg.in/square/go-jose.v2/jws.go | 5 +++-- - 2 files changed, 6 insertions(+), 4 deletions(-) - -diff --git a/vendor/gopkg.in/square/go-jose.v2/jwe.go b/vendor/gopkg.in/square/go-jose.v2/jwe.go -index b5a6dcd..cd1de9e 100644 ---- a/vendor/gopkg.in/square/go-jose.v2/jwe.go -+++ b/vendor/gopkg.in/square/go-jose.v2/jwe.go -@@ -201,10 +201,11 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { - - // parseEncryptedCompact parses a message in compact format. - func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { -- parts := strings.Split(input, ".") -- if len(parts) != 5 { -+ // Five parts is four separators -+ if strings.Count(input, ".") != 4 { - return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts") - } -+ parts := strings.SplitN(input, ".", 5) - - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) - if err != nil { -diff --git a/vendor/gopkg.in/square/go-jose.v2/jws.go b/vendor/gopkg.in/square/go-jose.v2/jws.go -index 7e261f9..a8d55fb 100644 ---- a/vendor/gopkg.in/square/go-jose.v2/jws.go -+++ b/vendor/gopkg.in/square/go-jose.v2/jws.go -@@ -275,10 +275,11 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { - - // parseSignedCompact parses a message in compact format. - func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) { -- parts := strings.Split(input, ".") -- if len(parts) != 3 { -+ // Three parts is two separators -+ if strings.Count(input, ".") != 2 { - return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts") - } -+ parts := strings.SplitN(input, ".", 3) - - if parts[1] != "" && payload != nil { - return nil, fmt.Errorf("square/go-jose: payload is not detached") --- -2.45.2 - diff --git a/SPECS/containerized-data-importer/CVE-2025-58058.patch b/SPECS/containerized-data-importer/CVE-2025-58058.patch index 3ab53122faf..f6ed74aa7e7 100644 --- a/SPECS/containerized-data-importer/CVE-2025-58058.patch +++ b/SPECS/containerized-data-importer/CVE-2025-58058.patch @@ -1,38 +1,3 @@ -From bef47b0f46c4200c1efe37fc10122cf462979eb2 Mon Sep 17 00:00:00 2001 -From: Ulrich Kunitz -Date: Mon, 12 Dec 2022 20:41:07 +0100 -Subject: [PATCH 1/3] lzma: fix handling of small dictionary sizes - -As Matt Dainty (@bodgit) reported there is an issue if the header of the -LZMA stream is less than the minimum dictionary size of 4096 byte. The -specification of the LZMA format says that in that case a dictionary -size of 4096 byte should be used, our code returns an error. - -This commit changes the behavior and adds a simple test case to test for -the right behavior. - -Fixes [#52](https://github.com/ulikunitz/xz/pull/52) ---- - vendor/github.com/ulikunitz/xz/lzma/reader.go | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go -index 2ed13c8..8d675a3 100644 ---- a/vendor/github.com/ulikunitz/xz/lzma/reader.go -+++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go -@@ -70,7 +70,7 @@ func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { - return nil, err - } - if r.h.dictCap < MinDictCap { -- return nil, errors.New("lzma: dictionary capacity too small") -+ r.h.dictCap = MinDictCap - } - dictCap := r.h.dictCap - if c.DictCap > dictCap { --- -2.45.4 - - From 497594b7b0e995ea8187c16ed1942f8005fc23d3 Mon Sep 17 00:00:00 2001 From: Ulrich Kunitz Date: Thu, 21 Aug 2025 17:57:47 +0200 diff --git a/SPECS/containerized-data-importer/containerized-data-importer.signatures.json b/SPECS/containerized-data-importer/containerized-data-importer.signatures.json index d5ded8e7035..27bd9437093 100644 --- a/SPECS/containerized-data-importer/containerized-data-importer.signatures.json +++ b/SPECS/containerized-data-importer/containerized-data-importer.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "containerized-data-importer-1.57.0.tar.gz": "71191e9e98df6d73490ae2bb74fa069bd2967a439f9a76d6bba1822fccc134ce" + "containerized-data-importer-1.62.0.tar.gz": "cdf830f40c1133214bf0567d385032b1ab9418d19b29f199fe836f88d35f4efd" } } diff --git a/SPECS/containerized-data-importer/containerized-data-importer.spec b/SPECS/containerized-data-importer/containerized-data-importer.spec index 2e8a7697199..b3213c97007 100644 --- a/SPECS/containerized-data-importer/containerized-data-importer.spec +++ b/SPECS/containerized-data-importer/containerized-data-importer.spec @@ -17,28 +17,18 @@ Summary: Container native virtualization Name: containerized-data-importer -Version: 1.57.0 -Release: 17%{?dist} +Version: 1.62.0 +Release: 1%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux Group: System/Packages URL: https://github.com/kubevirt/containerized-data-importer Source0: https://github.com/kubevirt/containerized-data-importer/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz -Patch0: CVE-2024-3727.patch -Patch1: CVE-2022-2879.patch -Patch2: CVE-2024-24786.patch -Patch3: CVE-2024-45338.patch -Patch4: CVE-2023-39325.patch -Patch5: CVE-2023-44487.patch -Patch6: CVE-2024-28180.patch -Patch7: CVE-2023-45288.patch -Patch8: CVE-2023-3978.patch -Patch9: CVE-2025-27144.patch -Patch10: CVE-2025-22868.patch -Patch11: CVE-2025-22872.patch -Patch12: CVE-2025-58058.patch -Patch13: CVE-2025-58183.patch +Patch0: CVE-2022-2879.patch +Patch1: CVE-2025-22872.patch +Patch2: CVE-2025-58058.patch +Patch3: CVE-2025-58183.patch BuildRequires: golang < 1.25 BuildRequires: golang-packaging BuildRequires: libnbd-devel @@ -233,6 +223,14 @@ install -m 0644 _out/manifests/release/cdi-cr.yaml %{buildroot}%{_datadir}/cdi/m %{_datadir}/cdi/manifests %changelog +* Tue Dec 09 2025 Harshit Gupta - 1.62.0-1 +- Upgrade to 1.62.0-1 +- Update patches CVE-2025-58058.patch, CVE-2022-2879.patch +- Remove old CVE patches + +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 1.57.0-18 +- Patch for CVE-2025-65637 + * Sat Nov 15 2025 Azure Linux Security Servicing Account - 1.57.0-17 - Patch for CVE-2025-58183 diff --git a/SPECS/coredns/CVE-2025-68151.patch b/SPECS/coredns/CVE-2025-68151.patch new file mode 100644 index 00000000000..3ba5a8a1715 --- /dev/null +++ b/SPECS/coredns/CVE-2025-68151.patch @@ -0,0 +1,1481 @@ +From 0d8cbb1a6bcb6bc9c1a489865278b8725fa20812 Mon Sep 17 00:00:00 2001 +From: Ville Vesilehto +Date: Thu, 18 Dec 2025 05:08:59 +0200 +Subject: [PATCH] Merge commit from fork + +Add configurable resource limits to prevent potential DoS vectors +via connection/stream exhaustion on gRPC, HTTPS, and HTTPS/3 servers. + +New configuration plugins: +- grpc_server: configure max_streams, max_connections +- https: configure max_connections +- https3: configure max_streams + +Changes: +- Use netutil.LimitListener for connection limiting +- Use gRPC MaxConcurrentStreams and message size limits +- Add QUIC MaxIncomingStreams for HTTPS/3 stream limiting +- Set secure defaults: 256 max streams, 200 max connections +- Setting any limit to 0 means unbounded/fallback to previous impl + +Defaults are applied automatically when plugins are omitted from +config. + +Includes tests and integration tests. + +Signed-off-by: Ville Vesilehto + +Upstream Patch Reference: https://github.com/coredns/coredns/commit/0d8cbb1a6bcb6bc9c1a489865278b8725fa20812.patch +--- + core/dnsserver/config.go | 12 ++ + core/dnsserver/server_grpc.go | 69 +++++++-- + core/dnsserver/server_https.go | 32 +++- + core/dnsserver/server_https_test.go | 61 ++++++++ + core/dnsserver/server_quic.go | 29 +++- + core/dnsserver/zdirectives.go | 2 + + core/plugin/zplugin.go | 2 + + plugin.cfg | 2 + + plugin/chaos/zowners.go | 2 +- + plugin/grpc_server/README.md | 51 +++++++ + plugin/grpc_server/setup.go | 79 ++++++++++ + plugin/grpc_server/setup_test.go | 169 +++++++++++++++++++++ + plugin/https/README.md | 47 ++++++ + plugin/https/setup.go | 63 ++++++++ + plugin/https/setup_test.go | 144 ++++++++++++++++++ + test/grpc_test.go | 151 +++++++++++++++++- + test/https_test.go | 177 ++++++++++++++++++++++ + vendor/golang.org/x/net/netutil/listen.go | 87 +++++++++++ + vendor/modules.txt | 1 + + 19 files changed, 1156 insertions(+), 24 deletions(-) + create mode 100644 plugin/grpc_server/README.md + create mode 100644 plugin/grpc_server/setup.go + create mode 100644 plugin/grpc_server/setup_test.go + create mode 100644 plugin/https/README.md + create mode 100644 plugin/https/setup.go + create mode 100644 plugin/https/setup_test.go + create mode 100644 test/https_test.go + create mode 100644 vendor/golang.org/x/net/netutil/listen.go + +diff --git a/core/dnsserver/config.go b/core/dnsserver/config.go +index cba5795..9bdf4a7 100644 +--- a/core/dnsserver/config.go ++++ b/core/dnsserver/config.go +@@ -62,6 +62,18 @@ type Config struct { + // This is nil if not specified, allowing for a default to be used. + MaxQUICWorkerPoolSize *int + ++ // MaxGRPCStreams defines the maximum number of concurrent streams per gRPC connection. ++ // This is nil if not specified, allowing for a default to be used. ++ MaxGRPCStreams *int ++ ++ // MaxGRPCConnections defines the maximum number of concurrent gRPC connections. ++ // This is nil if not specified, allowing for a default to be used. ++ MaxGRPCConnections *int ++ ++ // MaxHTTPSConnections defines the maximum number of concurrent HTTPS connections. ++ // This is nil if not specified, allowing for a default to be used. ++ MaxHTTPSConnections *int ++ + // Timeouts for TCP, TLS and HTTPS servers. + ReadTimeout time.Duration + WriteTimeout time.Duration +diff --git a/core/dnsserver/server_grpc.go b/core/dnsserver/server_grpc.go +index 9d7a95a..5149436 100644 +--- a/core/dnsserver/server_grpc.go ++++ b/core/dnsserver/server_grpc.go +@@ -15,17 +15,35 @@ import ( + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/miekg/dns" + "github.com/opentracing/opentracing-go" ++ "golang.org/x/net/netutil" + "google.golang.org/grpc" + "google.golang.org/grpc/peer" + ) + ++const ( ++ // maxDNSMessageBytes is the maximum size of a DNS message on the wire. ++ maxDNSMessageBytes = dns.MaxMsgSize ++ ++ // maxProtobufPayloadBytes accounts for protobuf overhead. ++ // Field tag=1 (1 byte) + length varint for 65535 (3 bytes) = 4 bytes total ++ maxProtobufPayloadBytes = maxDNSMessageBytes + 4 ++ ++ // DefaultGRPCMaxStreams is the default maximum number of concurrent streams per connection. ++ DefaultGRPCMaxStreams = 256 ++ ++ // DefaultGRPCMaxConnections is the default maximum number of concurrent connections. ++ DefaultGRPCMaxConnections = 200 ++) ++ + // ServergRPC represents an instance of a DNS-over-gRPC server. + type ServergRPC struct { + *Server + *pb.UnimplementedDnsServiceServer +- grpcServer *grpc.Server +- listenAddr net.Addr +- tlsConfig *tls.Config ++ grpcServer *grpc.Server ++ listenAddr net.Addr ++ tlsConfig *tls.Config ++ maxStreams int ++ maxConnections int + } + + // NewServergRPC returns a new CoreDNS GRPC server and compiles all plugin in to it. +@@ -49,7 +67,22 @@ func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) { + tlsConfig.NextProtos = []string{"h2"} + } + +- return &ServergRPC{Server: s, tlsConfig: tlsConfig}, nil ++ maxStreams := DefaultGRPCMaxStreams ++ if len(group) > 0 && group[0] != nil && group[0].MaxGRPCStreams != nil { ++ maxStreams = *group[0].MaxGRPCStreams ++ } ++ ++ maxConnections := DefaultGRPCMaxConnections ++ if len(group) > 0 && group[0] != nil && group[0].MaxGRPCConnections != nil { ++ maxConnections = *group[0].MaxGRPCConnections ++ } ++ ++ return &ServergRPC{ ++ Server: s, ++ tlsConfig: tlsConfig, ++ maxStreams: maxStreams, ++ maxConnections: maxConnections, ++ }, nil + } + + // Compile-time check to ensure Server implements the caddy.GracefulServer interface +@@ -61,21 +94,36 @@ func (s *ServergRPC) Serve(l net.Listener) error { + s.listenAddr = l.Addr() + s.m.Unlock() + ++ serverOpts := []grpc.ServerOption{ ++ grpc.MaxRecvMsgSize(maxProtobufPayloadBytes), ++ grpc.MaxSendMsgSize(maxProtobufPayloadBytes), ++ } ++ ++ // Only set MaxConcurrentStreams if not unbounded (0) ++ if s.maxStreams > 0 { ++ serverOpts = append(serverOpts, grpc.MaxConcurrentStreams(uint32(s.maxStreams))) ++ } ++ + if s.Tracer() != nil { + onlyIfParent := func(parentSpanCtx opentracing.SpanContext, method string, req, resp interface{}) bool { + return parentSpanCtx != nil + } +- intercept := otgrpc.OpenTracingServerInterceptor(s.Tracer(), otgrpc.IncludingSpans(onlyIfParent)) +- s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(intercept)) +- } else { +- s.grpcServer = grpc.NewServer() ++ serverOpts = append(serverOpts, grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(s.Tracer(), otgrpc.IncludingSpans(onlyIfParent)))) + } + ++ s.grpcServer = grpc.NewServer(serverOpts...) ++ + pb.RegisterDnsServiceServer(s.grpcServer, s) + + if s.tlsConfig != nil { + l = tls.NewListener(l, s.tlsConfig) + } ++ ++ // Wrap listener to limit concurrent connections ++ if s.maxConnections > 0 { ++ l = netutil.LimitListener(l, s.maxConnections) ++ } ++ + return s.grpcServer.Serve(l) + } + +@@ -122,7 +170,10 @@ func (s *ServergRPC) Stop() (err error) { + // any normal server. We use a custom responseWriter to pick up the bytes we need to write + // back to the client as a protobuf. + func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket, error) { +- msg := new(dns.Msg) ++ if len(in.GetMsg()) > dns.MaxMsgSize { ++ return nil, fmt.Errorf("dns message exceeds size limit: %d", len(in.GetMsg())) ++ } ++ msg := new(dns.Msg) + err := msg.Unpack(in.Msg) + if err != nil { + return nil, err +diff --git a/core/dnsserver/server_https.go b/core/dnsserver/server_https.go +index 09c7d62..f92b1b4 100644 +--- a/core/dnsserver/server_https.go ++++ b/core/dnsserver/server_https.go +@@ -18,15 +18,23 @@ import ( + "github.com/coredns/coredns/plugin/pkg/response" + "github.com/coredns/coredns/plugin/pkg/reuseport" + "github.com/coredns/coredns/plugin/pkg/transport" ++ ++ "golang.org/x/net/netutil" ++) ++ ++const ( ++ // DefaultHTTPSMaxConnections is the default maximum number of concurrent connections. ++ DefaultHTTPSMaxConnections = 200 + ) + + // ServerHTTPS represents an instance of a DNS-over-HTTPS server. + type ServerHTTPS struct { + *Server +- httpsServer *http.Server +- listenAddr net.Addr +- tlsConfig *tls.Config +- validRequest func(*http.Request) bool ++ httpsServer *http.Server ++ listenAddr net.Addr ++ tlsConfig *tls.Config ++ validRequest func(*http.Request) bool ++ maxConnections int + } + + // loggerAdapter is a simple adapter around CoreDNS logger made to implement io.Writer in order to log errors from HTTP server +@@ -80,8 +88,17 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) { + IdleTimeout: s.idleTimeout, + ErrorLog: stdlog.New(&loggerAdapter{}, "", 0), + } ++ maxConnections := DefaultHTTPSMaxConnections ++ if len(group) > 0 && group[0] != nil && group[0].MaxHTTPSConnections != nil { ++ maxConnections = *group[0].MaxHTTPSConnections ++ } ++ + sh := &ServerHTTPS{ +- Server: s, tlsConfig: tlsConfig, httpsServer: srv, validRequest: validator, ++ Server: s, ++ tlsConfig: tlsConfig, ++ httpsServer: srv, ++ validRequest: validator, ++ maxConnections: maxConnections, + } + sh.httpsServer.Handler = sh + +@@ -97,6 +114,11 @@ func (s *ServerHTTPS) Serve(l net.Listener) error { + s.listenAddr = l.Addr() + s.m.Unlock() + ++ // Wrap listener to limit concurrent connections (before TLS) ++ if s.maxConnections > 0 { ++ l = netutil.LimitListener(l, s.maxConnections) ++ } ++ + if s.tlsConfig != nil { + l = tls.NewListener(l, s.tlsConfig) + } +diff --git a/core/dnsserver/server_https_test.go b/core/dnsserver/server_https_test.go +index 0099681..4a19513 100644 +--- a/core/dnsserver/server_https_test.go ++++ b/core/dnsserver/server_https_test.go +@@ -65,3 +65,64 @@ func TestCustomHTTPRequestValidator(t *testing.T) { + }) + } + } ++ ++func TestNewServerHTTPSWithCustomLimits(t *testing.T) { ++ maxConnections := 100 ++ c := Config{ ++ Zone: "example.com.", ++ Transport: "https", ++ TLSConfig: &tls.Config{}, ++ ListenHosts: []string{"127.0.0.1"}, ++ Port: "443", ++ MaxHTTPSConnections: &maxConnections, ++ } ++ ++ server, err := NewServerHTTPS("127.0.0.1:443", []*Config{&c}) ++ if err != nil { ++ t.Fatalf("NewServerHTTPS() with custom limits failed: %v", err) ++ } ++ ++ if server.maxConnections != maxConnections { ++ t.Errorf("Expected maxConnections = %d, got %d", maxConnections, server.maxConnections) ++ } ++} ++ ++func TestNewServerHTTPSDefaults(t *testing.T) { ++ c := Config{ ++ Zone: "example.com.", ++ Transport: "https", ++ TLSConfig: &tls.Config{}, ++ ListenHosts: []string{"127.0.0.1"}, ++ Port: "443", ++ } ++ ++ server, err := NewServerHTTPS("127.0.0.1:443", []*Config{&c}) ++ if err != nil { ++ t.Fatalf("NewServerHTTPS() failed: %v", err) ++ } ++ ++ if server.maxConnections != DefaultHTTPSMaxConnections { ++ t.Errorf("Expected default maxConnections = %d, got %d", DefaultHTTPSMaxConnections, server.maxConnections) ++ } ++} ++ ++func TestNewServerHTTPSZeroLimits(t *testing.T) { ++ zero := 0 ++ c := Config{ ++ Zone: "example.com.", ++ Transport: "https", ++ TLSConfig: &tls.Config{}, ++ ListenHosts: []string{"127.0.0.1"}, ++ Port: "443", ++ MaxHTTPSConnections: &zero, ++ } ++ ++ server, err := NewServerHTTPS("127.0.0.1:443", []*Config{&c}) ++ if err != nil { ++ t.Fatalf("NewServerHTTPS() with zero limits failed: %v", err) ++ } ++ ++ if server.maxConnections != 0 { ++ t.Errorf("Expected maxConnections = 0, got %d", server.maxConnections) ++ } ++} +diff --git a/core/dnsserver/server_quic.go b/core/dnsserver/server_quic.go +index a744cd0..ed362b5 100644 +--- a/core/dnsserver/server_quic.go ++++ b/core/dnsserver/server_quic.go +@@ -146,12 +146,29 @@ func (s *ServerQUIC) serveQUICConnection(conn quic.Connection) { + return + } + +- // Use a bounded worker pool +- s.streamProcessPool <- struct{}{} // Acquire a worker slot, may block +- go func(st quic.Stream, cn quic.Connection) { +- defer func() { <-s.streamProcessPool }() // Release worker slot +- s.serveQUICStream(st, cn) +- }(stream, conn) ++ // Use a bounded worker pool with context cancellation ++ select { ++ case s.streamProcessPool <- struct{}{}: ++ // Got worker slot immediately ++ go func(st quic.Stream, cn quic.Connection) { ++ defer func() { <-s.streamProcessPool }() // Release worker slot ++ s.serveQUICStream(st, cn) ++ }(stream, conn) ++ default: ++ // Worker pool full, check for context cancellation ++ go func(st quic.Stream, cn quic.Connection) { ++ select { ++ case s.streamProcessPool <- struct{}{}: ++ // Got worker slot after waiting ++ defer func() { <-s.streamProcessPool }() // Release worker slot ++ s.serveQUICStream(st, cn) ++ case <-conn.Context().Done(): ++ // Connection context was cancelled while waiting ++ st.Close() ++ return ++ } ++ }(stream, conn) ++ } + } + } + +diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go +index eb054c9..bc4b086 100644 +--- a/core/dnsserver/zdirectives.go ++++ b/core/dnsserver/zdirectives.go +@@ -16,6 +16,8 @@ var Directives = []string{ + "cancel", + "tls", + "quic", ++ "grpc_server", ++ "https", + "timeouts", + "reload", + "nsid", +diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go +index 5cdb101..a357ddc 100644 +--- a/core/plugin/zplugin.go ++++ b/core/plugin/zplugin.go +@@ -27,9 +27,11 @@ import ( + _ "github.com/coredns/coredns/plugin/forward" + _ "github.com/coredns/coredns/plugin/geoip" + _ "github.com/coredns/coredns/plugin/grpc" ++ _ "github.com/coredns/coredns/plugin/grpc_server" + _ "github.com/coredns/coredns/plugin/header" + _ "github.com/coredns/coredns/plugin/health" + _ "github.com/coredns/coredns/plugin/hosts" ++ _ "github.com/coredns/coredns/plugin/https" + _ "github.com/coredns/coredns/plugin/k8s_external" + _ "github.com/coredns/coredns/plugin/kubernetes" + _ "github.com/coredns/coredns/plugin/loadbalance" +diff --git a/plugin.cfg b/plugin.cfg +index a01852b..081a57e 100644 +--- a/plugin.cfg ++++ b/plugin.cfg +@@ -25,6 +25,8 @@ geoip:geoip + cancel:cancel + tls:tls + quic:quic ++grpc_server:grpc_server ++https:https + timeouts:timeouts + reload:reload + nsid:nsid +diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go +index 419ca3c..b9553f3 100644 +--- a/plugin/chaos/zowners.go ++++ b/plugin/chaos/zowners.go +@@ -1,4 +1,4 @@ + package chaos + + // Owners are all GitHub handlers of all maintainers. +-var Owners = []string{"Tantalor93", "bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "greenpau", "ihac", "inigohu", "isolus", "jameshartig", "johnbelamaric", "miekg", "mqasimsarfraz", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "snebel29", "stp-ip", "superq", "varyoo", "ykhr53", "yongtang", "zouyee"} ++var Owners = []string{"Tantalor93", "bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "greenpau", "ihac", "inigohu", "isolus", "jameshartig", "johnbelamaric", "miekg", "mqasimsarfraz", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "snebel29", "stp-ip", "superq", "thevilledev", "varyoo", "ykhr53", "yongtang", "zouyee"} +diff --git a/plugin/grpc_server/README.md b/plugin/grpc_server/README.md +new file mode 100644 +index 0000000..1a19bb1 +--- /dev/null ++++ b/plugin/grpc_server/README.md +@@ -0,0 +1,51 @@ ++# grpc_server ++ ++## Name ++ ++*grpc_server* - configures DNS-over-gRPC server options. ++ ++## Description ++ ++The *grpc_server* plugin allows you to configure parameters for the DNS-over-gRPC server to fine-tune the security posture and performance of the server. ++ ++This plugin can only be used once per gRPC listener block. ++ ++## Syntax ++ ++```txt ++grpc_server { ++ max_streams POSITIVE_INTEGER ++ max_connections POSITIVE_INTEGER ++} ++``` ++ ++* `max_streams` limits the number of concurrent gRPC streams per connection. This helps prevent unbounded streams on a single connection, exhausting server resources. The default value is 256 if not specified. Set to 0 for unbounded. ++* `max_connections` limits the number of concurrent TCP connections to the gRPC server. The default value is 200 if not specified. Set to 0 for unbounded. ++ ++## Examples ++ ++Set custom limits for maximum streams and connections: ++ ++``` ++grpc://.:8053 { ++ tls cert.pem key.pem ++ grpc_server { ++ max_streams 50 ++ max_connections 100 ++ } ++ whoami ++} ++``` ++ ++Set values to 0 for unbounded, matching CoreDNS behaviour before v1.14.0: ++ ++``` ++grpc://.:8053 { ++ tls cert.pem key.pem ++ grpc_server { ++ max_streams 0 ++ max_connections 0 ++ } ++ whoami ++} ++``` +diff --git a/plugin/grpc_server/setup.go b/plugin/grpc_server/setup.go +new file mode 100644 +index 0000000..0cecd7d +--- /dev/null ++++ b/plugin/grpc_server/setup.go +@@ -0,0 +1,79 @@ ++package grpc_server ++ ++import ( ++ "strconv" ++ ++ "github.com/coredns/caddy" ++ "github.com/coredns/coredns/core/dnsserver" ++ "github.com/coredns/coredns/plugin" ++) ++ ++func init() { ++ caddy.RegisterPlugin("grpc_server", caddy.Plugin{ ++ ServerType: "dns", ++ Action: setup, ++ }) ++} ++ ++func setup(c *caddy.Controller) error { ++ err := parseGRPCServer(c) ++ if err != nil { ++ return plugin.Error("grpc_server", err) ++ } ++ return nil ++} ++ ++func parseGRPCServer(c *caddy.Controller) error { ++ config := dnsserver.GetConfig(c) ++ ++ // Skip the "grpc_server" directive itself ++ c.Next() ++ ++ // Get any arguments on the "grpc_server" line ++ args := c.RemainingArgs() ++ if len(args) > 0 { ++ return c.ArgErr() ++ } ++ ++ // Process all nested directives in the block ++ for c.NextBlock() { ++ switch c.Val() { ++ case "max_streams": ++ args := c.RemainingArgs() ++ if len(args) != 1 { ++ return c.ArgErr() ++ } ++ val, err := strconv.Atoi(args[0]) ++ if err != nil { ++ return c.Errf("invalid max_streams value '%s': %v", args[0], err) ++ } ++ if val < 0 { ++ return c.Errf("max_streams must be a non-negative integer: %d", val) ++ } ++ if config.MaxGRPCStreams != nil { ++ return c.Err("max_streams already defined for this server block") ++ } ++ config.MaxGRPCStreams = &val ++ case "max_connections": ++ args := c.RemainingArgs() ++ if len(args) != 1 { ++ return c.ArgErr() ++ } ++ val, err := strconv.Atoi(args[0]) ++ if err != nil { ++ return c.Errf("invalid max_connections value '%s': %v", args[0], err) ++ } ++ if val < 0 { ++ return c.Errf("max_connections must be a non-negative integer: %d", val) ++ } ++ if config.MaxGRPCConnections != nil { ++ return c.Err("max_connections already defined for this server block") ++ } ++ config.MaxGRPCConnections = &val ++ default: ++ return c.Errf("unknown property '%s'", c.Val()) ++ } ++ } ++ ++ return nil ++} +diff --git a/plugin/grpc_server/setup_test.go b/plugin/grpc_server/setup_test.go +new file mode 100644 +index 0000000..75fb749 +--- /dev/null ++++ b/plugin/grpc_server/setup_test.go +@@ -0,0 +1,169 @@ ++package grpc_server ++ ++import ( ++ "fmt" ++ "strings" ++ "testing" ++ ++ "github.com/coredns/caddy" ++ "github.com/coredns/coredns/core/dnsserver" ++) ++ ++func TestSetup(t *testing.T) { ++ tests := []struct { ++ input string ++ shouldErr bool ++ expectedErrContent string ++ expectedMaxStreams *int ++ expectedMaxConnections *int ++ }{ ++ // Valid configurations ++ { ++ input: `grpc_server`, ++ shouldErr: false, ++ }, ++ { ++ input: `grpc_server { ++ }`, ++ shouldErr: false, ++ }, ++ { ++ input: `grpc_server { ++ max_streams 100 ++ }`, ++ shouldErr: false, ++ expectedMaxStreams: intPtr(100), ++ }, ++ { ++ input: `grpc_server { ++ max_connections 200 ++ }`, ++ shouldErr: false, ++ expectedMaxConnections: intPtr(200), ++ }, ++ { ++ input: `grpc_server { ++ max_streams 50 ++ max_connections 100 ++ }`, ++ shouldErr: false, ++ expectedMaxStreams: intPtr(50), ++ expectedMaxConnections: intPtr(100), ++ }, ++ // Zero values (unbounded) ++ { ++ input: `grpc_server { ++ max_streams 0 ++ }`, ++ shouldErr: false, ++ expectedMaxStreams: intPtr(0), ++ }, ++ { ++ input: `grpc_server { ++ max_connections 0 ++ }`, ++ shouldErr: false, ++ expectedMaxConnections: intPtr(0), ++ }, ++ // Error cases ++ { ++ input: `grpc_server { ++ max_streams ++ }`, ++ shouldErr: true, ++ expectedErrContent: "Wrong argument count", ++ }, ++ { ++ input: `grpc_server { ++ max_streams abc ++ }`, ++ shouldErr: true, ++ expectedErrContent: "invalid max_streams value", ++ }, ++ { ++ input: `grpc_server { ++ max_streams -1 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "must be a non-negative integer", ++ }, ++ { ++ input: `grpc_server { ++ max_streams 100 ++ max_streams 200 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "already defined", ++ }, ++ { ++ input: `grpc_server { ++ unknown_option 123 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "unknown property", ++ }, ++ { ++ input: `grpc_server extra_arg`, ++ shouldErr: true, ++ expectedErrContent: "Wrong argument count", ++ }, ++ } ++ ++ for i, test := range tests { ++ c := caddy.NewTestController("dns", test.input) ++ err := setup(c) ++ ++ if test.shouldErr && err == nil { ++ t.Errorf("Test %d (%s): Expected error but got none", i, test.input) ++ continue ++ } ++ ++ if !test.shouldErr && err != nil { ++ t.Errorf("Test %d (%s): Expected no error but got: %v", i, test.input, err) ++ continue ++ } ++ ++ if test.shouldErr && test.expectedErrContent != "" { ++ if !strings.Contains(err.Error(), test.expectedErrContent) { ++ t.Errorf("Test %d (%s): Expected error containing '%s' but got: %v", ++ i, test.input, test.expectedErrContent, err) ++ } ++ continue ++ } ++ ++ if !test.shouldErr { ++ config := dnsserver.GetConfig(c) ++ assertIntPtrValue(t, i, test.input, "MaxGRPCStreams", config.MaxGRPCStreams, test.expectedMaxStreams) ++ assertIntPtrValue(t, i, test.input, "MaxGRPCConnections", config.MaxGRPCConnections, test.expectedMaxConnections) ++ } ++ } ++} ++ ++func intPtr(v int) *int { ++ return &v ++} ++ ++func assertIntPtrValue(t *testing.T, testIndex int, testInput, fieldName string, actual, expected *int) { ++ t.Helper() ++ if actual == nil && expected == nil { ++ return ++ } ++ ++ if (actual == nil) != (expected == nil) { ++ t.Errorf("Test %d (%s): Expected %s to be %v, but got %v", ++ testIndex, testInput, fieldName, formatNilableInt(expected), formatNilableInt(actual)) ++ return ++ } ++ ++ if *actual != *expected { ++ t.Errorf("Test %d (%s): Expected %s to be %d, but got %d", ++ testIndex, testInput, fieldName, *expected, *actual) ++ } ++} ++ ++func formatNilableInt(v *int) string { ++ if v == nil { ++ return "nil" ++ } ++ return fmt.Sprintf("%d", *v) ++} +diff --git a/plugin/https/README.md b/plugin/https/README.md +new file mode 100644 +index 0000000..938c2db +--- /dev/null ++++ b/plugin/https/README.md +@@ -0,0 +1,47 @@ ++# https ++ ++## Name ++ ++*https* - configures DNS-over-HTTPS (DoH) server options. ++ ++## Description ++ ++The *https* plugin allows you to configure parameters for the DNS-over-HTTPS (DoH) server to fine-tune the security posture and performance of the server. ++ ++This plugin can only be used once per HTTPS listener block. ++ ++## Syntax ++ ++```txt ++https { ++ max_connections POSITIVE_INTEGER ++} ++``` ++ ++* `max_connections` limits the number of concurrent TCP connections to the HTTPS server. The default value is 200 if not specified. Set to 0 for unbounded. ++ ++## Examples ++ ++Set custom limits for maximum connections: ++ ++``` ++https://.:443 { ++ tls cert.pem key.pem ++ https { ++ max_connections 100 ++ } ++ whoami ++} ++``` ++ ++Set values to 0 for unbounded, matching CoreDNS behaviour before v1.14.0: ++ ++``` ++https://.:443 { ++ tls cert.pem key.pem ++ https { ++ max_connections 0 ++ } ++ whoami ++} ++``` +diff --git a/plugin/https/setup.go b/plugin/https/setup.go +new file mode 100644 +index 0000000..727a378 +--- /dev/null ++++ b/plugin/https/setup.go +@@ -0,0 +1,63 @@ ++package https ++ ++import ( ++ "strconv" ++ ++ "github.com/coredns/caddy" ++ "github.com/coredns/coredns/core/dnsserver" ++ "github.com/coredns/coredns/plugin" ++) ++ ++func init() { ++ caddy.RegisterPlugin("https", caddy.Plugin{ ++ ServerType: "dns", ++ Action: setup, ++ }) ++} ++ ++func setup(c *caddy.Controller) error { ++ err := parseDOH(c) ++ if err != nil { ++ return plugin.Error("https", err) ++ } ++ return nil ++} ++ ++func parseDOH(c *caddy.Controller) error { ++ config := dnsserver.GetConfig(c) ++ ++ // Skip the "https" directive itself ++ c.Next() ++ ++ // Get any arguments on the "https" line ++ args := c.RemainingArgs() ++ if len(args) > 0 { ++ return c.ArgErr() ++ } ++ ++ // Process all nested directives in the block ++ for c.NextBlock() { ++ switch c.Val() { ++ case "max_connections": ++ args := c.RemainingArgs() ++ if len(args) != 1 { ++ return c.ArgErr() ++ } ++ val, err := strconv.Atoi(args[0]) ++ if err != nil { ++ return c.Errf("invalid max_connections value '%s': %v", args[0], err) ++ } ++ if val < 0 { ++ return c.Errf("max_connections must be a non-negative integer: %d", val) ++ } ++ if config.MaxHTTPSConnections != nil { ++ return c.Err("max_connections already defined for this server block") ++ } ++ config.MaxHTTPSConnections = &val ++ default: ++ return c.Errf("unknown property '%s'", c.Val()) ++ } ++ } ++ ++ return nil ++} +diff --git a/plugin/https/setup_test.go b/plugin/https/setup_test.go +new file mode 100644 +index 0000000..cb7020a +--- /dev/null ++++ b/plugin/https/setup_test.go +@@ -0,0 +1,144 @@ ++package https ++ ++import ( ++ "fmt" ++ "strings" ++ "testing" ++ ++ "github.com/coredns/caddy" ++ "github.com/coredns/coredns/core/dnsserver" ++) ++ ++func TestSetup(t *testing.T) { ++ tests := []struct { ++ input string ++ shouldErr bool ++ expectedErrContent string ++ expectedMaxConnections *int ++ }{ ++ // Valid configurations ++ { ++ input: `https`, ++ shouldErr: false, ++ }, ++ { ++ input: `https { ++ }`, ++ shouldErr: false, ++ }, ++ { ++ input: `https { ++ max_connections 200 ++ }`, ++ shouldErr: false, ++ expectedMaxConnections: intPtr(200), ++ }, ++ // Zero values (unbounded) ++ { ++ input: `https { ++ max_connections 0 ++ }`, ++ shouldErr: false, ++ expectedMaxConnections: intPtr(0), ++ }, ++ // Error cases ++ { ++ input: `https { ++ max_connections ++ }`, ++ shouldErr: true, ++ expectedErrContent: "Wrong argument count", ++ }, ++ { ++ input: `https { ++ max_connections abc ++ }`, ++ shouldErr: true, ++ expectedErrContent: "invalid max_connections value", ++ }, ++ { ++ input: `https { ++ max_connections -1 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "must be a non-negative integer", ++ }, ++ { ++ input: `https { ++ max_connections 100 ++ max_connections 200 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "already defined", ++ }, ++ { ++ input: `https { ++ unknown_option 123 ++ }`, ++ shouldErr: true, ++ expectedErrContent: "unknown property", ++ }, ++ { ++ input: `https extra_arg`, ++ shouldErr: true, ++ expectedErrContent: "Wrong argument count", ++ }, ++ } ++ ++ for i, test := range tests { ++ c := caddy.NewTestController("dns", test.input) ++ err := setup(c) ++ ++ if test.shouldErr && err == nil { ++ t.Errorf("Test %d (%s): Expected error but got none", i, test.input) ++ continue ++ } ++ ++ if !test.shouldErr && err != nil { ++ t.Errorf("Test %d (%s): Expected no error but got: %v", i, test.input, err) ++ continue ++ } ++ ++ if test.shouldErr && test.expectedErrContent != "" { ++ if !strings.Contains(err.Error(), test.expectedErrContent) { ++ t.Errorf("Test %d (%s): Expected error containing '%s' but got: %v", ++ i, test.input, test.expectedErrContent, err) ++ } ++ continue ++ } ++ ++ if !test.shouldErr { ++ config := dnsserver.GetConfig(c) ++ assertIntPtrValue(t, i, test.input, "MaxHTTPSConnections", config.MaxHTTPSConnections, test.expectedMaxConnections) ++ } ++ } ++} ++ ++func intPtr(v int) *int { ++ return &v ++} ++ ++func assertIntPtrValue(t *testing.T, testIndex int, testInput, fieldName string, actual, expected *int) { ++ t.Helper() ++ if actual == nil && expected == nil { ++ return ++ } ++ ++ if (actual == nil) != (expected == nil) { ++ t.Errorf("Test %d (%s): Expected %s to be %v, but got %v", ++ testIndex, testInput, fieldName, formatNilableInt(expected), formatNilableInt(actual)) ++ return ++ } ++ ++ if *actual != *expected { ++ t.Errorf("Test %d (%s): Expected %s to be %d, but got %d", ++ testIndex, testInput, fieldName, *expected, *actual) ++ } ++} ++ ++func formatNilableInt(v *int) string { ++ if v == nil { ++ return "nil" ++ } ++ return fmt.Sprintf("%d", *v) ++} +diff --git a/test/grpc_test.go b/test/grpc_test.go +index 8c3b032..3e35045 100644 +--- a/test/grpc_test.go ++++ b/test/grpc_test.go +@@ -2,19 +2,40 @@ package test + + import ( + "context" ++ "crypto/tls" ++ "net" + "testing" ++ "time" + + "github.com/coredns/coredns/pb" + + "github.com/miekg/dns" + "google.golang.org/grpc" ++ "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + ) + ++var grpcCorefile = `grpc://.:0 { ++ whoami ++}` ++ ++var grpcLimitCorefile = `grpc://.:0 { ++ grpc_server { ++ max_streams 2 ++ } ++ whoami ++}` ++ ++var grpcConnectionLimitCorefile = `grpc://.:0 { ++ tls ../plugin/tls/test_cert.pem ../plugin/tls/test_key.pem ../plugin/tls/test_ca.pem ++ grpc_server { ++ max_connections 2 ++ } ++ whoami ++}` ++ + func TestGrpc(t *testing.T) { +- corefile := `grpc://.:0 { +- whoami +- }` ++ corefile := grpcCorefile + + g, _, tcp, err := CoreDNSServerAndPorts(corefile) + if err != nil { +@@ -53,3 +74,127 @@ func TestGrpc(t *testing.T) { + t.Errorf("Expected 2 RRs in additional section, but got %d", len(d.Extra)) + } + } ++ ++// TestGRPCWithLimits tests that the server starts and works with configured limits ++func TestGRPCWithLimits(t *testing.T) { ++ g, _, tcp, err := CoreDNSServerAndPorts(grpcLimitCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer g.Stop() ++ ++ conn, err := grpc.NewClient(tcp, grpc.WithTransportCredentials(insecure.NewCredentials())) ++ if err != nil { ++ t.Fatalf("Expected no error but got: %s", err) ++ } ++ defer conn.Close() ++ ++ client := pb.NewDnsServiceClient(conn) ++ ++ m := new(dns.Msg) ++ m.SetQuestion("whoami.example.org.", dns.TypeA) ++ msg, _ := m.Pack() ++ ++ reply, err := client.Query(context.Background(), &pb.DnsPacket{Msg: msg}) ++ if err != nil { ++ t.Fatalf("Query failed: %s", err) ++ } ++ ++ d := new(dns.Msg) ++ if err := d.Unpack(reply.GetMsg()); err != nil { ++ t.Fatalf("Failed to unpack: %s", err) ++ } ++ ++ if d.Rcode != dns.RcodeSuccess { ++ t.Errorf("Expected success but got %d", d.Rcode) ++ } ++} ++ ++// TestGRPCConnectionLimit tests that connection limits are enforced ++func TestGRPCConnectionLimit(t *testing.T) { ++ g, _, tcp, err := CoreDNSServerAndPorts(grpcConnectionLimitCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer g.Stop() ++ ++ const maxConns = 2 ++ ++ // Create TLS connections to hold them open ++ tlsConfig := &tls.Config{InsecureSkipVerify: true} ++ conns := make([]net.Conn, 0, maxConns+1) ++ defer func() { ++ for _, c := range conns { ++ c.Close() ++ } ++ }() ++ ++ // Open connections up to the limit - these should succeed ++ for i := range maxConns { ++ conn, err := tls.Dial("tcp", tcp, tlsConfig) ++ if err != nil { ++ t.Fatalf("Connection %d failed (should succeed): %v", i+1, err) ++ } ++ conns = append(conns, conn) ++ } ++ ++ // Try to open more connections beyond the limit - should timeout ++ conn, err := tls.DialWithDialer( ++ &net.Dialer{Timeout: 100 * time.Millisecond}, ++ "tcp", tcp, tlsConfig, ++ ) ++ if err == nil { ++ conn.Close() ++ t.Fatal("Connection beyond limit should have timed out") ++ } ++ ++ // Close one connection and verify a new one can be established ++ conns[0].Close() ++ conns = conns[1:] ++ ++ time.Sleep(10 * time.Millisecond) ++ ++ conn, err = tls.Dial("tcp", tcp, tlsConfig) ++ if err != nil { ++ t.Fatalf("Connection after freeing slot failed: %v", err) ++ } ++ conns = append(conns, conn) ++} ++ ++// TestGRPCTLSWithLimits tests that gRPC with TLS starts and works with configured limits ++func TestGRPCTLSWithLimits(t *testing.T) { ++ g, _, tcp, err := CoreDNSServerAndPorts(grpcConnectionLimitCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer g.Stop() ++ ++ tlsConfig := &tls.Config{InsecureSkipVerify: true} ++ creds := credentials.NewTLS(tlsConfig) ++ ++ conn, err := grpc.NewClient(tcp, grpc.WithTransportCredentials(creds)) ++ if err != nil { ++ t.Fatalf("Expected no error but got: %s", err) ++ } ++ defer conn.Close() ++ ++ client := pb.NewDnsServiceClient(conn) ++ ++ m := new(dns.Msg) ++ m.SetQuestion("whoami.example.org.", dns.TypeA) ++ msg, _ := m.Pack() ++ ++ reply, err := client.Query(context.Background(), &pb.DnsPacket{Msg: msg}) ++ if err != nil { ++ t.Fatalf("Query failed: %s", err) ++ } ++ ++ d := new(dns.Msg) ++ if err := d.Unpack(reply.GetMsg()); err != nil { ++ t.Fatalf("Failed to unpack: %s", err) ++ } ++ ++ if d.Rcode != dns.RcodeSuccess { ++ t.Errorf("Expected success but got %d", d.Rcode) ++ } ++} +diff --git a/test/https_test.go b/test/https_test.go +new file mode 100644 +index 0000000..2bf4940 +--- /dev/null ++++ b/test/https_test.go +@@ -0,0 +1,177 @@ ++package test ++ ++import ( ++ "bytes" ++ "crypto/tls" ++ "io" ++ "net" ++ "net/http" ++ "testing" ++ "time" ++ ++ "github.com/miekg/dns" ++) ++ ++var httpsCorefile = `https://.:0 { ++ tls ../plugin/tls/test_cert.pem ../plugin/tls/test_key.pem ../plugin/tls/test_ca.pem ++ whoami ++}` ++ ++var httpsLimitCorefile = `https://.:0 { ++ tls ../plugin/tls/test_cert.pem ../plugin/tls/test_key.pem ../plugin/tls/test_ca.pem ++ https { ++ max_connections 2 ++ } ++ whoami ++}` ++ ++func TestHTTPS(t *testing.T) { ++ s, _, tcp, err := CoreDNSServerAndPorts(httpsCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer s.Stop() ++ ++ // Create HTTPS client with TLS config ++ tlsConfig := &tls.Config{ ++ InsecureSkipVerify: true, ++ } ++ client := &http.Client{ ++ Transport: &http.Transport{ ++ TLSClientConfig: tlsConfig, ++ }, ++ Timeout: 5 * time.Second, ++ } ++ ++ // Create DNS query ++ m := new(dns.Msg) ++ m.SetQuestion("whoami.example.org.", dns.TypeA) ++ msg, err := m.Pack() ++ if err != nil { ++ t.Fatalf("Failed to pack DNS message: %v", err) ++ } ++ ++ // Make DoH request ++ url := "https://" + tcp + "/dns-query" ++ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(msg)) ++ if err != nil { ++ t.Fatalf("Failed to create request: %v", err) ++ } ++ req.Header.Set("Content-Type", "application/dns-message") ++ req.Header.Set("Accept", "application/dns-message") ++ ++ resp, err := client.Do(req) ++ if err != nil { ++ t.Fatalf("Failed to make request: %v", err) ++ } ++ defer resp.Body.Close() ++ ++ if resp.StatusCode != http.StatusOK { ++ t.Fatalf("Expected status 200, got %d", resp.StatusCode) ++ } ++ ++ body, err := io.ReadAll(resp.Body) ++ if err != nil { ++ t.Fatalf("Failed to read response: %v", err) ++ } ++ ++ d := new(dns.Msg) ++ err = d.Unpack(body) ++ if err != nil { ++ t.Fatalf("Failed to unpack response: %v", err) ++ } ++ ++ if d.Rcode != dns.RcodeSuccess { ++ t.Errorf("Expected success but got %d", d.Rcode) ++ } ++ ++ if len(d.Extra) != 2 { ++ t.Errorf("Expected 2 RRs in additional section, but got %d", len(d.Extra)) ++ } ++} ++ ++// TestHTTPSWithLimits tests that the server starts and works with configured limits ++func TestHTTPSWithLimits(t *testing.T) { ++ s, _, tcp, err := CoreDNSServerAndPorts(httpsLimitCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer s.Stop() ++ ++ client := &http.Client{ ++ Transport: &http.Transport{ ++ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, ++ }, ++ Timeout: 5 * time.Second, ++ } ++ ++ m := new(dns.Msg) ++ m.SetQuestion("whoami.example.org.", dns.TypeA) ++ msg, _ := m.Pack() ++ ++ req, _ := http.NewRequest(http.MethodPost, "https://"+tcp+"/dns-query", bytes.NewReader(msg)) ++ req.Header.Set("Content-Type", "application/dns-message") ++ ++ resp, err := client.Do(req) ++ if err != nil { ++ t.Fatalf("Request failed: %s", err) ++ } ++ defer resp.Body.Close() ++ ++ if resp.StatusCode != http.StatusOK { ++ t.Fatalf("Expected status 200, got %d", resp.StatusCode) ++ } ++} ++ ++// TestHTTPSConnectionLimit tests that connection limits are enforced ++func TestHTTPSConnectionLimit(t *testing.T) { ++ s, _, tcp, err := CoreDNSServerAndPorts(httpsLimitCorefile) ++ if err != nil { ++ t.Fatalf("Could not get CoreDNS serving instance: %s", err) ++ } ++ defer s.Stop() ++ ++ const maxConns = 2 ++ const totalConns = 4 ++ ++ // Create raw TLS connections to hold them open ++ conns := make([]net.Conn, 0, totalConns) ++ defer func() { ++ for _, c := range conns { ++ c.Close() ++ } ++ }() ++ ++ // Open connections up to the limit - these should succeed ++ for i := range maxConns { ++ conn, err := tls.Dial("tcp", tcp, &tls.Config{InsecureSkipVerify: true}) ++ if err != nil { ++ t.Fatalf("Connection %d failed (should succeed): %v", i+1, err) ++ } ++ conns = append(conns, conn) ++ } ++ ++ // Try to open more connections beyond the limit ++ // The LimitListener blocks Accept() until a slot is free, so Dial with timeout should fail ++ conn, err := tls.DialWithDialer( ++ &net.Dialer{Timeout: 100 * time.Millisecond}, ++ "tcp", tcp, ++ &tls.Config{InsecureSkipVerify: true}, ++ ) ++ if err == nil { ++ conn.Close() ++ t.Fatal("Connection beyond limit should have timed out") ++ } ++ ++ // Close one connection and verify a new one can be established ++ conns[0].Close() ++ conns = conns[1:] ++ ++ time.Sleep(10 * time.Millisecond) // Give the listener time to accept ++ ++ conn, err = tls.Dial("tcp", tcp, &tls.Config{InsecureSkipVerify: true}) ++ if err != nil { ++ t.Fatalf("Connection after freeing slot failed: %v", err) ++ } ++ conns = append(conns, conn) ++} +diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go +new file mode 100644 +index 0000000..f8b779e +--- /dev/null ++++ b/vendor/golang.org/x/net/netutil/listen.go +@@ -0,0 +1,87 @@ ++// Copyright 2013 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package netutil provides network utility functions, complementing the more ++// common ones in the net package. ++package netutil // import "golang.org/x/net/netutil" ++ ++import ( ++ "net" ++ "sync" ++) ++ ++// LimitListener returns a Listener that accepts at most n simultaneous ++// connections from the provided Listener. ++func LimitListener(l net.Listener, n int) net.Listener { ++ return &limitListener{ ++ Listener: l, ++ sem: make(chan struct{}, n), ++ done: make(chan struct{}), ++ } ++} ++ ++type limitListener struct { ++ net.Listener ++ sem chan struct{} ++ closeOnce sync.Once // ensures the done chan is only closed once ++ done chan struct{} // no values sent; closed when Close is called ++} ++ ++// acquire acquires the limiting semaphore. Returns true if successfully ++// acquired, false if the listener is closed and the semaphore is not ++// acquired. ++func (l *limitListener) acquire() bool { ++ select { ++ case <-l.done: ++ return false ++ case l.sem <- struct{}{}: ++ return true ++ } ++} ++func (l *limitListener) release() { <-l.sem } ++ ++func (l *limitListener) Accept() (net.Conn, error) { ++ if !l.acquire() { ++ // If the semaphore isn't acquired because the listener was closed, expect ++ // that this call to accept won't block, but immediately return an error. ++ // If it instead returns a spurious connection (due to a bug in the ++ // Listener, such as https://golang.org/issue/50216), we immediately close ++ // it and try again. Some buggy Listener implementations (like the one in ++ // the aforementioned issue) seem to assume that Accept will be called to ++ // completion, and may otherwise fail to clean up the client end of pending ++ // connections. ++ for { ++ c, err := l.Listener.Accept() ++ if err != nil { ++ return nil, err ++ } ++ c.Close() ++ } ++ } ++ ++ c, err := l.Listener.Accept() ++ if err != nil { ++ l.release() ++ return nil, err ++ } ++ return &limitListenerConn{Conn: c, release: l.release}, nil ++} ++ ++func (l *limitListener) Close() error { ++ err := l.Listener.Close() ++ l.closeOnce.Do(func() { close(l.done) }) ++ return err ++} ++ ++type limitListenerConn struct { ++ net.Conn ++ releaseOnce sync.Once ++ release func() ++} ++ ++func (l *limitListenerConn) Close() error { ++ err := l.Conn.Close() ++ l.releaseOnce.Do(l.release) ++ return err ++} +diff --git a/vendor/modules.txt b/vendor/modules.txt +index bc85814..528f1e2 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -717,6 +717,7 @@ golang.org/x/net/internal/socket + golang.org/x/net/internal/timeseries + golang.org/x/net/ipv4 + golang.org/x/net/ipv6 ++golang.org/x/net/netutil + golang.org/x/net/trace + # golang.org/x/oauth2 v0.23.0 + ## explicit; go 1.18 +-- +2.45.4 + diff --git a/SPECS/coredns/CVE-2025-68156.patch b/SPECS/coredns/CVE-2025-68156.patch new file mode 100644 index 00000000000..bf8ff967023 --- /dev/null +++ b/SPECS/coredns/CVE-2025-68156.patch @@ -0,0 +1,147 @@ +From 45c789c5baed4906bf5ac28281d58a373f3045b9 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Fri, 19 Dec 2025 11:54:37 +0000 +Subject: [PATCH] fix(builtin): limit recursion depth + +Add builtin.MaxDepth (default 10k) to prevent stack overflows when +processing deeply nested or cyclic structures in builtin functions. +The functions flatten, min, max, mean, and median now return a +"recursion depth exceeded" error instead of crashing the runtime. + +Signed-off-by: Ville Vesilehto +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/expr-lang/expr/pull/870.patch +--- + .../expr-lang/expr/builtin/builtin.go | 13 ++++++++---- + .../github.com/expr-lang/expr/builtin/lib.go | 21 +++++++++++++------ + 2 files changed, 24 insertions(+), 10 deletions(-) + +diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go +index cc6f197..0b6d565 100644 +--- a/vendor/github.com/expr-lang/expr/builtin/builtin.go ++++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go +@@ -3,6 +3,7 @@ package builtin + import ( + "encoding/base64" + "encoding/json" ++ "errors" + "fmt" + "reflect" + "sort" +@@ -16,6 +17,10 @@ import ( + var ( + Index map[string]int + Names []string ++ ++ // MaxDepth limits the recursion depth for nested structures. ++ MaxDepth = 10000 ++ ErrorMaxDepth = errors.New("recursion depth exceeded") + ) + + func init() { +@@ -377,7 +382,7 @@ var Builtins = []*Function{ + { + Name: "max", + Func: func(args ...any) (any, error) { +- return minMax("max", runtime.Less, args...) ++ return minMax("max", runtime.Less, 0, args...) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + return validateAggregateFunc("max", args) +@@ -386,7 +391,7 @@ var Builtins = []*Function{ + { + Name: "min", + Func: func(args ...any) (any, error) { +- return minMax("min", runtime.More, args...) ++ return minMax("min", runtime.More, 0, args...) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + return validateAggregateFunc("min", args) +@@ -395,7 +400,7 @@ var Builtins = []*Function{ + { + Name: "mean", + Func: func(args ...any) (any, error) { +- count, sum, err := mean(args...) ++ count, sum, err := mean(0, args...) + if err != nil { + return nil, err + } +@@ -411,7 +416,7 @@ var Builtins = []*Function{ + { + Name: "median", + Func: func(args ...any) (any, error) { +- values, err := median(args...) ++ values, err := median(0, args...) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go +index e3cd61b..4dca4ee 100644 +--- a/vendor/github.com/expr-lang/expr/builtin/lib.go ++++ b/vendor/github.com/expr-lang/expr/builtin/lib.go +@@ -258,7 +258,10 @@ func String(arg any) any { + return fmt.Sprintf("%v", arg) + } + +-func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { ++func minMax(name string, fn func(any, any) bool, depth int, args ...any) (any, error) { ++ if depth > MaxDepth { ++ return nil, ErrorMaxDepth ++ } + var val any + for _, arg := range args { + rv := reflect.ValueOf(deref.Deref(arg)) +@@ -266,7 +269,7 @@ func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elemVal, err := minMax(name, fn, rv.Index(i).Interface()) ++ elemVal, err := minMax(name, fn, depth+1, rv.Index(i).Interface()) + if err != nil { + return nil, err + } +@@ -299,7 +302,10 @@ func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { + return val, nil + } + +-func mean(args ...any) (int, float64, error) { ++func mean(depth int, args ...any) (int, float64, error) { ++ if depth > MaxDepth { ++ return 0, 0, ErrorMaxDepth ++ } + var total float64 + var count int + +@@ -309,7 +315,7 @@ func mean(args ...any) (int, float64, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elemCount, elemSum, err := mean(rv.Index(i).Interface()) ++ elemCount, elemSum, err := mean(depth+1, rv.Index(i).Interface()) + if err != nil { + return 0, 0, err + } +@@ -332,7 +338,10 @@ func mean(args ...any) (int, float64, error) { + return count, total, nil + } + +-func median(args ...any) ([]float64, error) { ++func median(depth int, args ...any) ([]float64, error) { ++ if depth > MaxDepth { ++ return nil, ErrorMaxDepth ++ } + var values []float64 + + for _, arg := range args { +@@ -341,7 +350,7 @@ func median(args ...any) ([]float64, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elems, err := median(rv.Index(i).Interface()) ++ elems, err := median(depth+1, rv.Index(i).Interface()) + if err != nil { + return nil, err + } +-- +2.45.4 + diff --git a/SPECS/coredns/coredns.spec b/SPECS/coredns/coredns.spec index 7e93ab36196..722f09d1a9c 100644 --- a/SPECS/coredns/coredns.spec +++ b/SPECS/coredns/coredns.spec @@ -6,7 +6,7 @@ Summary: Fast and flexible DNS server Name: coredns Version: 1.11.4 -Release: 11%{?dist} +Release: 13%{?dist} License: Apache License 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -43,6 +43,8 @@ Patch4: CVE-2024-53259.patch Patch5: CVE-2025-47950.patch Patch6: CVE-2025-58063.patch Patch7: CVE-2025-59530.patch +Patch8: CVE-2025-68156.patch +Patch9: CVE-2025-68151.patch BuildRequires: golang < 1.25 @@ -84,6 +86,12 @@ go install github.com/fatih/faillint@latest && \ %{_bindir}/%{name} %changelog +* Thu Jan 15 2026 Aditya Singh - 1.11.4-13 +- Patch for CVE-2025-68151 + +* Fri Dec 19 2025 Azure Linux Security Servicing Account - 1.11.4-12 +- Patch for CVE-2025-68156 + * Mon Oct 27 2025 Azure Linux Security Servicing Account - 1.11.4-11 - Patch for CVE-2025-59530 @@ -111,7 +119,7 @@ go install github.com/fatih/faillint@latest && \ * Mon Mar 03 2025 Kanishk Bansal - 1.11.4-3 - Fix CVE-2025-22868 with an upstream patch -* Mon Feb 10 2025 Sam Meluch - 1.11.4-2 +* Mon Feb 17 2025 Sam Meluch - 1.11.4-2 - readd check section from 2.0 * Fri Feb 14 2025 CBL-Mariner Servicing Account - 1.11.4-1 diff --git a/SPECS/crash-gcore-command/crash-gcore-command.spec b/SPECS/crash-gcore-command/crash-gcore-command.spec index 9aa1e600b56..03be0c16cc9 100644 --- a/SPECS/crash-gcore-command/crash-gcore-command.spec +++ b/SPECS/crash-gcore-command/crash-gcore-command.spec @@ -1,6 +1,6 @@ Name: crash-gcore-command Version: 1.6.1 -Release: 2%{?dist} +Release: 3%{?dist} Summary: gcore extension module for crash utility Group: Development/Tools Vendor: Microsoft Corporation @@ -8,10 +8,11 @@ Distribution: Azure Linux URL: https://github.com/crash-utility/crash-extensions Source0: https://github.com/crash-utility/crash-extensions/raw/master/%{name}-%{version}.tar.gz Source1: gcore_defs.patch +Patch0: set_context-third-arg.patch License: GPLv2+ BuildRequires: zlib-devel -BuildRequires: crash-devel >= 7.2.5 -Requires: crash >= 7.2.5 +BuildRequires: crash-devel >= 9.0.0 +Requires: crash >= 9.0.0 BuildRoot: %{_tmppath}/%{name}-%{version}-root ExclusiveArch: x86_64 @@ -20,7 +21,11 @@ Command for creating a core dump file of a user-space task that was running in a kernel dumpfile. %prep -%setup -q -n %{name}-%{version} +# Note: we use -p2 here since patches come from upstream crash-gcore, but the +# source tarball comes from official crash-utilities/crash-extensions +# repository which already removes the top layer of directory nesting from the +# upstream crash-gcore code tree. +%autosetup -p2 -n %{name}-%{version} %build %ifarch x86_64 @@ -39,9 +44,11 @@ install -pm 755 gcore.so %{buildroot}%{_libdir}/crash/extensions/ %defattr(-,root,root) %license COPYING %{_libdir}/crash/extensions/gcore.so -%doc COPYING %changelog +* Tue Jan 06 2026 Chris Co - 1.6.1-3 +- add patch to fix build break with newer crash v9.0.0 + * Fri Jul 08 2022 Andrew Phelps 1.6.1-2 - Add ExclusiveArch: x86_64 * Fri Mar 04 2022 Andrew Phelps 1.6.1-1 diff --git a/SPECS/crash-gcore-command/set_context-third-arg.patch b/SPECS/crash-gcore-command/set_context-third-arg.patch new file mode 100644 index 00000000000..d6d2b6dd624 --- /dev/null +++ b/SPECS/crash-gcore-command/set_context-third-arg.patch @@ -0,0 +1,44 @@ +From 62b8d5005eaa9014467db85cfe268e65c6679f4c Mon Sep 17 00:00:00 2001 +From: Tao Liu +Date: Mon, 4 Nov 2024 17:11:19 +1300 +Subject: [PATCH] gcore: update set_context with upstream counterpart + +With the introduction of upstream commit "Preparing for gdb stack unwind +support" [1], the function set_context() is added by a 3rd parameter. Without +this patch, the compiliation of gcore will fail. + +The 3rd parameter of set_context() is used to sync the context of crash and +gdb, so gdb can hold the the value of registers of current crash's task +context, and gdb can output the stack unwinding for current task. + +This have nothing to do with gcore, so simply set the 3rd parameter as FALSE. + +[1]: https://github.com/lian-bo/crash/commit/d75d15d31b92f8882ccb15c960665e2c8a8d1c28 + +Signed-off-by: Tao Liu +--- + src/gcore.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/gcore.c b/src/gcore.c +index 47a9c0d..a79f69b 100644 +--- a/src/gcore.c ++++ b/src/gcore.c +@@ -306,7 +306,7 @@ static void do_gcore(char *arg) + + if (tc != CURRENT_CONTEXT()) { + gcore->orig_task = CURRENT_TASK(); +- (void) set_context(tc->task, NO_PID); ++ (void) set_context(tc->task, NO_PID, FALSE); + } + + snprintf(gcore->corename, CORENAME_MAX_SIZE + 1, "core.%lu.%s", +@@ -340,7 +340,7 @@ static void do_gcore(char *arg) + } + + if (gcore->orig_task) +- (void)set_context(gcore->orig_task, NO_PID); ++ (void)set_context(gcore->orig_task, NO_PID, FALSE); + + } + diff --git a/SPECS/crash/CVE-2022-37434.patch b/SPECS/crash/CVE-2022-37434.patch new file mode 100644 index 00000000000..15f56a640bb --- /dev/null +++ b/SPECS/crash/CVE-2022-37434.patch @@ -0,0 +1,41 @@ +From eff308af425b67093bab25f80f1ae950166bece1 Mon Sep 17 00:00:00 2001 +From: Mark Adler +Date: Sat, 30 Jul 2022 15:51:11 -0700 +Subject: [PATCH] Fix a bug when getting a gzip header extra field with + inflate(). + +NOTE change for azurelinux: +This patch comes from zlib upstream which is vendored into gdb sources in the +zlib directory. So this patch has been tweaked to apply to the zlib directory +inside gdb sources. + +Additionally, gdb does not use inflateGetHeader() so this bug does not affect gdb, but still patch the vendored source code to keep the source code patched. + +Original commit message below: + +If the extra field was larger than the space the user provided with +inflateGetHeader(), and if multiple calls of inflate() delivered +the extra header data, then there could be a buffer overflow of the +provided space. This commit assures that provided space is not +exceeded. +--- + zlib/inflate.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/zlib/inflate.c b/zlib/inflate.c +index 7be8c6366..7a7289749 100644 +--- a/zlib/inflate.c ++++ b/zlib/inflate.c +@@ -763,9 +763,10 @@ int flush; + copy = state->length; + if (copy > have) copy = have; + if (copy) { ++ len = state->head->extra_len - state->length; + if (state->head != Z_NULL && +- state->head->extra != Z_NULL) { +- len = state->head->extra_len - state->length; ++ state->head->extra != Z_NULL && ++ len < state->head->extra_max) { + zmemcpy(state->head->extra + len, next, + len + copy > state->head->extra_max ? + state->head->extra_max - len : copy); diff --git a/SPECS/crash/CVE-2025-11082.patch b/SPECS/crash/CVE-2025-11082.patch new file mode 100644 index 00000000000..804e37d7b5d --- /dev/null +++ b/SPECS/crash/CVE-2025-11082.patch @@ -0,0 +1,45 @@ +From ea1a0737c7692737a644af0486b71e4a392cbca8 Mon Sep 17 00:00:00 2001 +From: "H.J. Lu" +Date: Mon, 22 Sep 2025 15:20:34 +0800 +Subject: [PATCH] elf: Don't read beyond .eh_frame section size + + PR ld/33464 + * elf-eh-frame.c (_bfd_elf_parse_eh_frame): Don't read beyond + .eh_frame section size. + +Signed-off-by: H.J. Lu +--- + bfd/elf-eh-frame.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/bfd/elf-eh-frame.c b/bfd/elf-eh-frame.c +index dc0d2e097f5..30bb313489c 100644 +--- a/bfd/elf-eh-frame.c ++++ b/bfd/elf-eh-frame.c +@@ -737,6 +737,7 @@ _bfd_elf_parse_eh_frame (bfd *abfd, struct bfd_link_info *info, + if (hdr_id == 0) + { + unsigned int initial_insn_length; ++ char *null_byte; + + /* CIE */ + this_inf->cie = 1; +@@ -753,10 +754,13 @@ _bfd_elf_parse_eh_frame (bfd *abfd, struct bfd_link_info *info, + REQUIRE (cie->version == 1 + || cie->version == 3 + || cie->version == 4); +- REQUIRE (strlen ((char *) buf) < sizeof (cie->augmentation)); ++ null_byte = memchr ((char *) buf, 0, end - buf); ++ REQUIRE (null_byte != NULL); ++ REQUIRE ((size_t) (null_byte - (char *) buf) ++ < sizeof (cie->augmentation)); + + strcpy (cie->augmentation, (char *) buf); +- buf = (bfd_byte *) strchr ((char *) buf, '\0') + 1; ++ buf = (bfd_byte *) null_byte + 1; + this_inf->u.cie.aug_str_len = buf - start - 1; + ENSURE_NO_RELOCS (buf); + if (buf[0] == 'e' && buf[1] == 'h') +-- +2.43.7 + diff --git a/SPECS/crash/crash.signatures.json b/SPECS/crash/crash.signatures.json index da2a3f64430..baa65d15e9b 100644 --- a/SPECS/crash/crash.signatures.json +++ b/SPECS/crash/crash.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "gdb-10.2-4.tar.gz": "f2902cd89e725e0dd2e4ac007d4a31bf0237ad3b1a38191455d801ee6096246b", - "crash-8.0.4.tar.gz": "94df600c183301013787cd47112044e358fb37bb8e2b5544f40377dda98ee78f" + "gdb-16.2.tar.gz": "bdc1da4a033280ac752e7d34b0418efaa45bed093235cb88e62ea961752a37f8", + "crash-9.0.0.tar.gz": "86ccd8f78145b414e40e006bafe678f020360c174f2b771111754576f1427877" } } diff --git a/SPECS/crash/crash.spec b/SPECS/crash/crash.spec index a70b983f41a..7852597b619 100644 --- a/SPECS/crash/crash.spec +++ b/SPECS/crash/crash.spec @@ -1,20 +1,28 @@ -%global gdb_version 10.2 +%global gdb_version 16.2 Name: crash -Version: 8.0.4 -Release: 5%{?dist} +Version: 9.0.0 +Release: 1%{?dist} Summary: kernel crash analysis utility for live systems, netdump, diskdump, kdump, LKCD or mcore dumpfiles Group: Development/Tools Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/crash-utility/crash Source0: https://github.com/crash-utility/%{name}/archive/%{version}.tar.gz#/%{name}-%{version}.tar.gz -# crash requires gdb tarball for the build. There is no option to use the host gdb. For crash 8.0.1 the newest supported gdb version is 10.2. -# '-2' version of the tarball contains fix for CVE-2022-37434 which cannot be applied as a .patch because source1 is only untar'ed during crash make -# '-3' version of the tarball contains fix for CVE-2021-20197, CVE-2022-47673, CVE-2022-47696 which cannot be applied as a .patch because source1 is only untar'ed during crash make -# '-4' version of the tarball contains fix for CVE-2025-11082 which cannot be applied as a .patch because source1 is only untar'ed during crash make -Source1: gdb-%{gdb_version}-4.tar.gz +# crash requires gdb tarball for the build. There is no option to use the host gdb. For crash 9.0.0, the minimum required gdb version is 16.2. +Source1: gdb-%{gdb_version}.tar.gz # lzo patch sourced from https://src.fedoraproject.org/rpms/crash/blob/rawhide/f/lzo_snappy_zstd.patch + +# Since we have two source tarballs to patch, we use separate patch numbering +# to indicate where patches are applied where during the prep section. +# Patch 0-99 will automatically apply to Source0 (crash) +# Patch 100+ will automatically apply to Source1 (gdb) + +# Patches for crash sources Patch0: lzo_snappy_zstd.patch +# Patches for gdb sources +Patch100: CVE-2022-37434.patch +Patch101: CVE-2025-11082.patch + License: GPLv3+ BuildRequires: binutils BuildRequires: glibc-devel @@ -48,9 +56,24 @@ This package contains the "crash-target-arm64" binary for analyzing arm64 crash %endif %prep -%autosetup -n %{name}-%{version} -# make expect the gdb tarball to be named with its version only, gdb-[version].tar.gz, e.g.: gdb-10.2.tar.gz -cp %{SOURCE1} ./gdb-%{gdb_version}.tar.gz +# -N skips automatic patch application +%autosetup -n %{name}-%{version} -N + +# Apply only patches 0-99 to original crash source +%autopatch -p1 -M 99 + +# Extract and patch secondary gdb sources, re-tar and gzip them, and clean up the working directory. +# Note: crash's make expects the gdb tarball to be named with its version only, gdb-[version].tar.gz, e.g.: gdb-10.2.tar.gz +tar -xzf %{SOURCE1} +pushd gdb-%{gdb_version} +%autopatch -p1 -m 100 +popd +# Re-tar with consistent timestamps for reproducibility +tar --sort=name \ + --mtime="2021-04-26 00:00Z" \ + --owner=0 --group=0 --numeric-owner \ + -czf gdb-%{gdb_version}.tar.gz gdb-%{gdb_version} +rm -rf gdb-%{gdb_version}/ %build %ifarch x86_64 @@ -98,6 +121,9 @@ cp -p defs.h %{buildroot}%{_includedir}/crash %endif %changelog +* Thu Nov 20 2025 Chris Co - 9.0.0-1 +- Update to 9.0.0 + * Fri Oct 03 2025 Azure Linux Security Servicing Account - 8.0.4-5 - Update gdb-10.2-4.tar.gz to address CVE-2025-11082 diff --git a/SPECS/curl/CVE-2025-14017.patch b/SPECS/curl/CVE-2025-14017.patch new file mode 100644 index 00000000000..468c422d429 --- /dev/null +++ b/SPECS/curl/CVE-2025-14017.patch @@ -0,0 +1,117 @@ +From 477745dc74450c96f10afdacdcfecac67b50f138 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Fri, 9 Jan 2026 03:55:08 +0000 +Subject: [PATCH] ldap: call ldap_init() before setting the options; set + options on server; adjust CACERTFILE and REQUIRE_CERT; move init earlier and + remove duplicate init; update error message; consistent with upstream patch + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/curl/curl/commit/39d1976b7f709a516e324333.patch +--- + lib/ldap.c | 49 +++++++++++++++++++------------------------------ + 1 file changed, 19 insertions(+), 30 deletions(-) + +diff --git a/lib/ldap.c b/lib/ldap.c +index 2cbdb9c..a1e60b0 100644 +--- a/lib/ldap.c ++++ b/lib/ldap.c +@@ -367,16 +367,29 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + passwd = conn->passwd; + } + ++#ifdef USE_WIN32_LDAP ++ if(ldap_ssl) ++ server = ldap_sslinit(host, (curl_ldap_num_t)conn->primary.remote_port, 1); ++ else ++#else ++ server = ldap_init(host, (curl_ldap_num_t)conn->primary.remote_port); ++#endif ++ if(!server) { ++ failf(data, "LDAP: cannot setup connect to %s:%u", ++ conn->host.dispname, conn->primary.remote_port); ++ result = CURLE_COULDNT_CONNECT; ++ goto quit; ++ } ++ + #ifdef LDAP_OPT_NETWORK_TIMEOUT +- ldap_set_option(NULL, LDAP_OPT_NETWORK_TIMEOUT, &ldap_timeout); ++ ldap_set_option(server, LDAP_OPT_NETWORK_TIMEOUT, &ldap_timeout); + #endif +- ldap_set_option(NULL, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); ++ ldap_set_option(server, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); + + if(ldap_ssl) { + #ifdef HAVE_LDAP_SSL + #ifdef USE_WIN32_LDAP + /* Win32 LDAP SDK does not support insecure mode without CA! */ +- server = ldap_sslinit(host, (curl_ldap_num_t)conn->primary.remote_port, 1); + ldap_set_option(server, LDAP_OPT_SSL, LDAP_OPT_ON); + #else + int ldap_option; +@@ -444,7 +457,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + goto quit; + } + infof(data, "LDAP local: using PEM CA cert: %s", ldap_ca); +- rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_CACERTFILE, ldap_ca); ++ rc = ldap_set_option(server, LDAP_OPT_X_TLS_CACERTFILE, ldap_ca); + if(rc != LDAP_SUCCESS) { + failf(data, "LDAP local: ERROR setting PEM CA cert: %s", + ldap_err2string(rc)); +@@ -456,20 +469,13 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + else + ldap_option = LDAP_OPT_X_TLS_NEVER; + +- rc = ldap_set_option(NULL, LDAP_OPT_X_TLS_REQUIRE_CERT, &ldap_option); ++ rc = ldap_set_option(server, LDAP_OPT_X_TLS_REQUIRE_CERT, &ldap_option); + if(rc != LDAP_SUCCESS) { + failf(data, "LDAP local: ERROR setting cert verify mode: %s", + ldap_err2string(rc)); + result = CURLE_SSL_CERTPROBLEM; + goto quit; + } +- server = ldap_init(host, conn->primary.remote_port); +- if(!server) { +- failf(data, "LDAP local: Cannot connect to %s:%u", +- conn->host.dispname, conn->primary.remote_port); +- result = CURLE_COULDNT_CONNECT; +- goto quit; +- } + ldap_option = LDAP_OPT_X_TLS_HARD; + rc = ldap_set_option(server, LDAP_OPT_X_TLS, &ldap_option); + if(rc != LDAP_SUCCESS) { +@@ -478,15 +484,6 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + result = CURLE_SSL_CERTPROBLEM; + goto quit; + } +-/* +- rc = ldap_start_tls_s(server, NULL, NULL); +- if(rc != LDAP_SUCCESS) { +- failf(data, "LDAP local: ERROR starting SSL/TLS mode: %s", +- ldap_err2string(rc)); +- result = CURLE_SSL_CERTPROBLEM; +- goto quit; +- } +-*/ + #else + (void)ldap_option; + (void)ldap_ca; +@@ -505,15 +502,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) + result = CURLE_NOT_BUILT_IN; + goto quit; + } +- else { +- server = ldap_init(host, (curl_ldap_num_t)conn->primary.remote_port); +- if(!server) { +- failf(data, "LDAP local: Cannot connect to %s:%u", +- conn->host.dispname, conn->primary.remote_port); +- result = CURLE_COULDNT_CONNECT; +- goto quit; +- } +- } ++ + #ifdef USE_WIN32_LDAP + ldap_set_option(server, LDAP_OPT_PROTOCOL_VERSION, &ldap_proto); + rc = ldap_win_bind(data, server, user, passwd); +-- +2.45.4 + diff --git a/SPECS/curl/curl.spec b/SPECS/curl/curl.spec index 333ea5dcb46..1bcef69a899 100644 --- a/SPECS/curl/curl.spec +++ b/SPECS/curl/curl.spec @@ -1,7 +1,7 @@ Summary: An URL retrieval utility and library Name: curl Version: 8.11.1 -Release: 4%{?dist} +Release: 5%{?dist} License: curl Vendor: Microsoft Corporation Distribution: Azure Linux @@ -12,6 +12,7 @@ Patch0: CVE-2025-0665.patch Patch1: CVE-2025-0167.patch Patch2: CVE-2025-0725.patch Patch3: CVE-2025-10148.patch +Patch4: CVE-2025-14017.patch BuildRequires: cmake BuildRequires: krb5-devel BuildRequires: libnghttp2-devel @@ -102,6 +103,9 @@ find %{buildroot} -type f -name "*.la" -delete -print %{_libdir}/libcurl.so.* %changelog +* Fri Jan 09 2026 Azure Linux Security Servicing Account - 8.11.1-5 +- Patch for CVE-2025-14017 + * Thu Sep 11 2025 Azure Linux Security Servicing Account - 8.11.1-4 - Patch for CVE-2025-10148 diff --git a/SPECS/dcos-cli/CVE-2025-65637.patch b/SPECS/dcos-cli/CVE-2025-65637.patch new file mode 100644 index 00000000000..0cc2db84570 --- /dev/null +++ b/SPECS/dcos-cli/CVE-2025-65637.patch @@ -0,0 +1,136 @@ +From 38d4ac052f2b7dede31452232680ed1606450b68 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 1/2] This commit fixes a potential denial of service + vulnerability in logrus.Writer() that could be triggered by logging text + longer than 64kb without newlines. Previously, the bufio.Scanner used by + Writer() would hang indefinitely when reading such text without newlines, + causing the application to become unresponsive. + +--- + vendor/github.com/sirupsen/logrus/writer.go | 33 ++++++++++++++++++++- + 1 file changed, 32 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 72e8e3a..36032d0 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -4,6 +4,7 @@ import ( + "bufio" + "io" + "runtime" ++ "strings" + ) + + // Writer at INFO level. See WriterLevel for details. +@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) + } + ++// Writer returns an io.Writer that writes to the logger at the info log level + func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) + } + ++// WriterLevel returns an io.Writer that writes to the logger at the given log level + func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + ++ // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace +@@ -48,23 +52,50 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + printFunc = entry.Print + } + ++ // Start a new goroutine to scan the input and write it to the logger using the specified print function. ++ // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) ++ ++ // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer + } + ++// writerScanner scans the input from the reader and writes it to the logger + func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) ++ ++ // Set the buffer size to the maximum token size to avoid buffer overflows ++ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) ++ ++ // Define a split function to split the input into chunks of up to 64KB ++ chunkSize := 64 * 1024 // 64KB ++ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { ++ if len(data) > chunkSize { ++ return chunkSize, data[:chunkSize], nil ++ } ++ return 0, nil, nil ++ } ++ ++ //Use the custom split function to split the input ++ scanner.Split(splitFunc) ++ ++ // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { +- printFunc(scanner.Text()) ++ printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } ++ ++ // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } ++ ++ // Close the reader when we are done + reader.Close() + } + ++// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected + func writerFinalizer(writer *io.PipeWriter) { + writer.Close() + } +-- +2.45.4 + + +From f143e9aeb300a7a4a7fb29e76f1edee1a7448d81 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 2/2] Scan text in 64KB chunks + +This commit fixes a potential denial of service +vulnerability in logrus.Writer() that could be +triggered by logging text longer than 64KB +without newlines. Previously, the bufio.Scanner +used by Writer() would hang indefinitely when +reading such text without newlines, causing the +application to become unresponsive. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/sirupsen/logrus/pull/1376.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 36032d0..7e7703c 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -75,7 +75,8 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } +- return 0, nil, nil ++ ++ return len(data), data, nil + } + + //Use the custom split function to split the input +-- +2.45.4 + diff --git a/SPECS/dcos-cli/dcos-cli.spec b/SPECS/dcos-cli/dcos-cli.spec index 6d484ea50c5..cbd09b31120 100644 --- a/SPECS/dcos-cli/dcos-cli.spec +++ b/SPECS/dcos-cli/dcos-cli.spec @@ -1,7 +1,7 @@ Summary: The command line for DC/OS Name: dcos-cli Version: 1.2.0 -Release: 19%{?dist} +Release: 20%{?dist} License: Apache-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -12,6 +12,7 @@ Patch0: CVE-2020-26160.patch Patch1: CVE-2024-28180.patch Patch2: CVE-2025-27144.patch Patch3: CVE-2024-51744.patch +Patch4: CVE-2025-65637.patch BuildRequires: golang < 1.25 BuildRequires: git @@ -49,6 +50,9 @@ go test -mod=vendor %{_bindir}/dcos %changelog +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 1.2.0-20 +- Patch for CVE-2025-65637 + * Sun Aug 31 2025 Andrew Phelps - 1.2.0-19 - Set BR for golang to < 1.25 diff --git a/SPECS/edk2/CVE-2025-15467.patch b/SPECS/edk2/CVE-2025-15467.patch new file mode 100644 index 00000000000..05d6dc686a8 --- /dev/null +++ b/SPECS/edk2/CVE-2025-15467.patch @@ -0,0 +1,43 @@ +From 636fb597f7690441e1003b46e206f0ea96b4dc02 Mon Sep 17 00:00:00 2001 +From: Igor Ustinov +Date: Mon, 12 Jan 2026 12:13:35 +0100 +Subject: [PATCH] Correct handling of AEAD-encrypted CMS with inadmissibly long + IV +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Fixes CVE-2025-15467 + +Reviewed-by: Saša Nedvědický +Reviewed-by: Norbert Pocs +Reviewed-by: Eugene Syromiatnikov +Reviewed-by: Tomas Mraz +MergeDate: Mon Jan 26 19:31:45 2026 +(cherry picked from commit 0ddd6b6bcbdedbe2c8304af05771f8ab11939112) +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/6ced0fe6b10faa560e410e3ee8d6c82f06c65ea3.patch +--- + CryptoPkg/Library/OpensslLib/openssl/crypto/evp/evp_lib.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/evp/evp_lib.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/evp/evp_lib.c +index d88066d..3eb80b8 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/evp/evp_lib.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/evp/evp_lib.c +@@ -249,10 +249,9 @@ int evp_cipher_get_asn1_aead_params(EVP_CIPHER_CTX *c, ASN1_TYPE *type, + if (type == NULL || asn1_params == NULL) + return 0; + +- i = ossl_asn1_type_get_octetstring_int(type, &tl, NULL, EVP_MAX_IV_LENGTH); +- if (i <= 0) ++ i = ossl_asn1_type_get_octetstring_int(type, &tl, iv, EVP_MAX_IV_LENGTH); ++ if (i <= 0 || i > EVP_MAX_IV_LENGTH) + return -1; +- ossl_asn1_type_get_octetstring_int(type, &tl, iv, i); + + memcpy(asn1_params->iv, iv, i); + asn1_params->iv_len = i; +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-2295.patch b/SPECS/edk2/CVE-2025-2295.patch new file mode 100644 index 00000000000..ecf7f9c6aa1 --- /dev/null +++ b/SPECS/edk2/CVE-2025-2295.patch @@ -0,0 +1,54 @@ +From 4029abaa6248f603007642077ee067131bb1c050 Mon Sep 17 00:00:00 2001 +From: Madhavan +Date: Fri, 14 Mar 2025 14:15:13 -0400 +Subject: [PATCH] NetworkPkg/IScsiDxe:Fix for Remote Memory Exposure in ISCSI + bz4206 + +Used SafeUint32Add to calculate and validate OutTransferLength with +boundary check in IScsiOnR2TRcvd to avoid integer overflow + +Signed-off-by: Madhavan +Signed-off-by: rpm-build +Upstream-reference: https://github.com/tianocore/edk2/commit/17cdc512f02a2dfd1b9e24133da56fdda099abda.patch +--- + NetworkPkg/IScsiDxe/IScsiProto.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/NetworkPkg/IScsiDxe/IScsiProto.c b/NetworkPkg/IScsiDxe/IScsiProto.c +index ef58764..fb48e63 100644 +--- a/NetworkPkg/IScsiDxe/IScsiProto.c ++++ b/NetworkPkg/IScsiDxe/IScsiProto.c +@@ -1,7 +1,7 @@ + /** @file + The implementation of iSCSI protocol based on RFC3720. + +-Copyright (c) 2004 - 2018, Intel Corporation. All rights reserved.
++Copyright (c) 2004 - 2025, Intel Corporation. All rights reserved.
+ SPDX-License-Identifier: BSD-2-Clause-Patent + + **/ +@@ -2682,6 +2682,7 @@ IScsiOnR2TRcvd ( + EFI_STATUS Status; + ISCSI_XFER_CONTEXT *XferContext; + UINT8 *Data; ++ UINT32 TransferLength; + + R2THdr = (ISCSI_READY_TO_TRANSFER *)NetbufGetByte (Pdu, 0, NULL); + if (R2THdr == NULL) { +@@ -2712,7 +2713,12 @@ IScsiOnR2TRcvd ( + XferContext->Offset = R2THdr->BufferOffset; + XferContext->DesiredLength = R2THdr->DesiredDataTransferLength; + +- if (((XferContext->Offset + XferContext->DesiredLength) > Packet->OutTransferLength) || ++ Status = SafeUint32Add (XferContext->Offset, XferContext->DesiredLength, &TransferLength); ++ if (EFI_ERROR (Status)) { ++ return EFI_PROTOCOL_ERROR; ++ } ++ ++ if ((TransferLength > Packet->OutTransferLength) || + (XferContext->DesiredLength > Tcb->Conn->Session->MaxBurstLength) + ) + { +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-2296.patch b/SPECS/edk2/CVE-2025-2296.patch new file mode 100644 index 00000000000..bace649c79b --- /dev/null +++ b/SPECS/edk2/CVE-2025-2296.patch @@ -0,0 +1,1218 @@ +From b8e7f492faace8ec5fc184280d74ee5977525921 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Tue, 14 Jan 2025 17:36:39 +0100 +Subject: [PATCH 01/10] OvmfPkg/QemuKernelLoaderFsDxe: rework direct kernel + boot filesystem + +Split KERNEL_BLOB struct into two: + + * One (KERNEL_BLOB_ITEMS) static array describing how to load (unnamed) + blobs from fw_cfg. + * And one (KERNEL_BLOB) dynamically allocated linked list carrying the + data blobs for the pseudo filesystem. + +Also add some debug logging. Prefix most functions with 'QemuKernel' +for consistency and easier log file grepping. Add some small helper +functions. + +This refactoring prepares for loading blobs in other ways. +No (intentional) change in filesystem protocol behavior. + +Signed-off-by: Gerd Hoffmann +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/tianocore/edk2/pull/10628.patch +--- + .../BlobVerifierSevHashes.c | 4 +- + OvmfPkg/Include/Library/BlobVerifierLib.h | 4 +- + .../BlobVerifierLibNull/BlobVerifierNull.c | 4 +- + .../GenericQemuLoadImageLib.c | 56 ++- + .../X86QemuLoadImageLib/X86QemuLoadImageLib.c | 104 ++++- + .../X86QemuLoadImageLib.inf | 1 + + .../QemuKernelLoaderFsDxe.c | 433 ++++++++++++------ + .../QemuKernelLoaderFsDxe.inf | 1 + + 8 files changed, 448 insertions(+), 159 deletions(-) + +diff --git a/OvmfPkg/AmdSev/BlobVerifierLibSevHashes/BlobVerifierSevHashes.c b/OvmfPkg/AmdSev/BlobVerifierLibSevHashes/BlobVerifierSevHashes.c +index 2e58794..f41f440 100644 +--- a/OvmfPkg/AmdSev/BlobVerifierLibSevHashes/BlobVerifierSevHashes.c ++++ b/OvmfPkg/AmdSev/BlobVerifierLibSevHashes/BlobVerifierSevHashes.c +@@ -80,6 +80,7 @@ FindBlobEntryGuid ( + @param[in] BlobName The name of the blob + @param[in] Buf The data of the blob + @param[in] BufSize The size of the blob in bytes ++ @param[in] FetchStatus The status of the previous blob fetch + + @retval EFI_SUCCESS The blob was verified successfully. + @retval EFI_ACCESS_DENIED The blob could not be verified, and therefore +@@ -90,7 +91,8 @@ EFIAPI + VerifyBlob ( + IN CONST CHAR16 *BlobName, + IN CONST VOID *Buf, +- IN UINT32 BufSize ++ IN UINT32 BufSize, ++ IN EFI_STATUS FetchStatus + ) + { + CONST GUID *Guid; +diff --git a/OvmfPkg/Include/Library/BlobVerifierLib.h b/OvmfPkg/Include/Library/BlobVerifierLib.h +index 7e1af27..286b564 100644 +--- a/OvmfPkg/Include/Library/BlobVerifierLib.h ++++ b/OvmfPkg/Include/Library/BlobVerifierLib.h +@@ -22,6 +22,7 @@ + @param[in] BlobName The name of the blob + @param[in] Buf The data of the blob + @param[in] BufSize The size of the blob in bytes ++ @param[in] FetchStatus The status of the previous blob fetch + + @retval EFI_SUCCESS The blob was verified successfully. + @retval EFI_ACCESS_DENIED The blob could not be verified, and therefore +@@ -32,7 +33,8 @@ EFIAPI + VerifyBlob ( + IN CONST CHAR16 *BlobName, + IN CONST VOID *Buf, +- IN UINT32 BufSize ++ IN UINT32 BufSize, ++ IN EFI_STATUS FetchStatus + ); + + #endif +diff --git a/OvmfPkg/Library/BlobVerifierLibNull/BlobVerifierNull.c b/OvmfPkg/Library/BlobVerifierLibNull/BlobVerifierNull.c +index e817c3c..2b80348 100644 +--- a/OvmfPkg/Library/BlobVerifierLibNull/BlobVerifierNull.c ++++ b/OvmfPkg/Library/BlobVerifierLibNull/BlobVerifierNull.c +@@ -16,6 +16,7 @@ + @param[in] BlobName The name of the blob + @param[in] Buf The data of the blob + @param[in] BufSize The size of the blob in bytes ++ @param[in] FetchStatus The status of the previous blob fetch + + @retval EFI_SUCCESS The blob was verified successfully. + @retval EFI_ACCESS_DENIED The blob could not be verified, and therefore +@@ -26,7 +27,8 @@ EFIAPI + VerifyBlob ( + IN CONST CHAR16 *BlobName, + IN CONST VOID *Buf, +- IN UINT32 BufSize ++ IN UINT32 BufSize, ++ IN EFI_STATUS FetchStatus + ) + { + return EFI_SUCCESS; +diff --git a/OvmfPkg/Library/GenericQemuLoadImageLib/GenericQemuLoadImageLib.c b/OvmfPkg/Library/GenericQemuLoadImageLib/GenericQemuLoadImageLib.c +index b99fb35..9d0ba77 100644 +--- a/OvmfPkg/Library/GenericQemuLoadImageLib/GenericQemuLoadImageLib.c ++++ b/OvmfPkg/Library/GenericQemuLoadImageLib/GenericQemuLoadImageLib.c +@@ -57,6 +57,25 @@ STATIC CONST KERNEL_VENMEDIA_FILE_DEVPATH mKernelDevicePath = { + } + }; + ++STATIC CONST KERNEL_VENMEDIA_FILE_DEVPATH mShimDevicePath = { ++ { ++ { ++ MEDIA_DEVICE_PATH, MEDIA_VENDOR_DP, ++ { sizeof (VENDOR_DEVICE_PATH) } ++ }, ++ QEMU_KERNEL_LOADER_FS_MEDIA_GUID ++ }, { ++ { ++ MEDIA_DEVICE_PATH, MEDIA_FILEPATH_DP, ++ { sizeof (KERNEL_FILE_DEVPATH) } ++ }, ++ L"shim", ++ }, { ++ END_DEVICE_PATH_TYPE, END_ENTIRE_DEVICE_PATH_SUBTYPE, ++ { sizeof (EFI_DEVICE_PATH_PROTOCOL) } ++ } ++}; ++ + STATIC CONST SINGLE_VENMEDIA_NODE_DEVPATH mQemuKernelLoaderFsDevicePath = { + { + { +@@ -174,6 +193,7 @@ QemuLoadKernelImage ( + UINTN CommandLineSize; + CHAR8 *CommandLine; + UINTN InitrdSize; ++ BOOLEAN Shim; + + // + // Load the image. This should call back into the QEMU EFI loader file system. +@@ -181,11 +201,35 @@ QemuLoadKernelImage ( + Status = gBS->LoadImage ( + FALSE, // BootPolicy: exact match required + gImageHandle, // ParentImageHandle +- (EFI_DEVICE_PATH_PROTOCOL *)&mKernelDevicePath, ++ (EFI_DEVICE_PATH_PROTOCOL *)&mShimDevicePath, + NULL, // SourceBuffer + 0, // SourceSize + &KernelImageHandle + ); ++ if (Status == EFI_SUCCESS) { ++ Shim = TRUE; ++ DEBUG ((DEBUG_INFO, "%a: booting via shim\n", __func__)); ++ } else { ++ Shim = FALSE; ++ if (Status == EFI_SECURITY_VIOLATION) { ++ gBS->UnloadImage (KernelImageHandle); ++ } ++ ++ if (Status != EFI_NOT_FOUND) { ++ DEBUG ((DEBUG_INFO, "%a: LoadImage(shim): %r\n", __func__, Status)); ++ return Status; ++ } ++ ++ Status = gBS->LoadImage ( ++ FALSE, // BootPolicy: exact match required ++ gImageHandle, // ParentImageHandle ++ (EFI_DEVICE_PATH_PROTOCOL *)&mKernelDevicePath, ++ NULL, // SourceBuffer ++ 0, // SourceSize ++ &KernelImageHandle ++ ); ++ } ++ + switch (Status) { + case EFI_SUCCESS: + break; +@@ -303,6 +347,13 @@ QemuLoadKernelImage ( + KernelLoadedImage->LoadOptionsSize += sizeof (L" initrd=initrd") - 2; + } + ++ if (Shim) { ++ // ++ // Prefix 'kernel ' in UTF-16. ++ // ++ KernelLoadedImage->LoadOptionsSize += sizeof (L"kernel ") - 2; ++ } ++ + if (KernelLoadedImage->LoadOptionsSize == 0) { + KernelLoadedImage->LoadOptions = NULL; + } else { +@@ -323,7 +374,8 @@ QemuLoadKernelImage ( + UnicodeSPrintAsciiFormat ( + KernelLoadedImage->LoadOptions, + KernelLoadedImage->LoadOptionsSize, +- "%a%a", ++ "%a%a%a", ++ (Shim == FALSE) ? "" : "kernel ", + (CommandLineSize == 0) ? "" : CommandLine, + (InitrdSize == 0) ? "" : " initrd=initrd" + ); +diff --git a/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.c b/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.c +index a7ab43c..2d610f6 100644 +--- a/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.c ++++ b/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.c +@@ -19,8 +19,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include +@@ -57,6 +59,25 @@ STATIC CONST KERNEL_VENMEDIA_FILE_DEVPATH mKernelDevicePath = { + } + }; + ++STATIC CONST KERNEL_VENMEDIA_FILE_DEVPATH mShimDevicePath = { ++ { ++ { ++ MEDIA_DEVICE_PATH, MEDIA_VENDOR_DP, ++ { sizeof (VENDOR_DEVICE_PATH) } ++ }, ++ QEMU_KERNEL_LOADER_FS_MEDIA_GUID ++ }, { ++ { ++ MEDIA_DEVICE_PATH, MEDIA_FILEPATH_DP, ++ { sizeof (KERNEL_FILE_DEVPATH) } ++ }, ++ L"shim", ++ }, { ++ END_DEVICE_PATH_TYPE, END_ENTIRE_DEVICE_PATH_SUBTYPE, ++ { sizeof (EFI_DEVICE_PATH_PROTOCOL) } ++ } ++}; ++ + STATIC + VOID + FreeLegacyImage ( +@@ -339,6 +360,7 @@ QemuLoadKernelImage ( + UINTN CommandLineSize; + CHAR8 *CommandLine; + UINTN InitrdSize; ++ BOOLEAN Shim; + + // + // Redundant assignment to work around GCC48/GCC49 limitations. +@@ -351,11 +373,35 @@ QemuLoadKernelImage ( + Status = gBS->LoadImage ( + FALSE, // BootPolicy: exact match required + gImageHandle, // ParentImageHandle +- (EFI_DEVICE_PATH_PROTOCOL *)&mKernelDevicePath, ++ (EFI_DEVICE_PATH_PROTOCOL *)&mShimDevicePath, + NULL, // SourceBuffer + 0, // SourceSize + &KernelImageHandle + ); ++ if (Status == EFI_SUCCESS) { ++ Shim = TRUE; ++ DEBUG ((DEBUG_INFO, "%a: booting via shim\n", __func__)); ++ } else { ++ Shim = FALSE; ++ if (Status == EFI_SECURITY_VIOLATION) { ++ gBS->UnloadImage (KernelImageHandle); ++ } ++ ++ if (Status != EFI_NOT_FOUND) { ++ DEBUG ((DEBUG_INFO, "%a: LoadImage(shim): %r\n", __func__, Status)); ++ return Status; ++ } ++ ++ Status = gBS->LoadImage ( ++ FALSE, // BootPolicy: exact match required ++ gImageHandle, // ParentImageHandle ++ (EFI_DEVICE_PATH_PROTOCOL *)&mKernelDevicePath, ++ NULL, // SourceBuffer ++ 0, // SourceSize ++ &KernelImageHandle ++ ); ++ } ++ + switch (Status) { + case EFI_SUCCESS: + break; +@@ -377,13 +423,45 @@ QemuLoadKernelImage ( + // Fall through + // + case EFI_ACCESS_DENIED: +- // +- // We are running with UEFI secure boot enabled, and the image failed to +- // authenticate. For compatibility reasons, we fall back to the legacy +- // loader in this case. +- // +- // Fall through +- // ++ // ++ // We are running with UEFI secure boot enabled, and the image failed to ++ // authenticate. For compatibility reasons, we fall back to the legacy ++ // loader in this case (unless disabled via fw_cfg). ++ // ++ { ++ EFI_STATUS RetStatus; ++ BOOLEAN Enabled = TRUE; ++ ++ AsciiPrint ( ++ "OVMF: Secure boot image verification failed. Consider using the '-shim'\n" ++ "OVMF: command line switch for qemu (available in version 10.0 + newer).\n" ++ "\n" ++ ); ++ ++ RetStatus = QemuFwCfgParseBool ( ++ "opt/org.tianocore/EnableLegacyLoader", ++ &Enabled ++ ); ++ if (EFI_ERROR (RetStatus)) { ++ Enabled = TRUE; ++ } ++ ++ if (!Enabled) { ++ AsciiPrint ( ++ "OVMF: Fallback to insecure legacy linux kernel loader is disabled.\n" ++ "\n" ++ ); ++ return EFI_ACCESS_DENIED; ++ } else { ++ AsciiPrint ( ++ "OVMF: Using legacy linux kernel loader (insecure and deprecated).\n" ++ "\n" ++ ); ++ // ++ // Fall through ++ // ++ } ++ } + case EFI_UNSUPPORTED: + // + // The image is not natively supported or cross-type supported. Let's try +@@ -465,6 +543,13 @@ QemuLoadKernelImage ( + KernelLoadedImage->LoadOptionsSize += sizeof (L" initrd=initrd") - 2; + } + ++ if (Shim) { ++ // ++ // Prefix 'kernel ' in UTF-16. ++ // ++ KernelLoadedImage->LoadOptionsSize += sizeof (L"kernel ") - 2; ++ } ++ + if (KernelLoadedImage->LoadOptionsSize == 0) { + KernelLoadedImage->LoadOptions = NULL; + } else { +@@ -485,7 +570,8 @@ QemuLoadKernelImage ( + UnicodeSPrintAsciiFormat ( + KernelLoadedImage->LoadOptions, + KernelLoadedImage->LoadOptionsSize, +- "%a%a", ++ "%a%a%a", ++ (Shim == FALSE) ? "" : "kernel ", + (CommandLineSize == 0) ? "" : CommandLine, + (InitrdSize == 0) ? "" : " initrd=initrd" + ); +diff --git a/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.inf b/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.inf +index c7ec041..09babd3 100644 +--- a/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.inf ++++ b/OvmfPkg/Library/X86QemuLoadImageLib/X86QemuLoadImageLib.inf +@@ -33,6 +33,7 @@ + LoadLinuxLib + PrintLib + QemuFwCfgLib ++ QemuFwCfgSimpleParserLib + ReportStatusCodeLib + UefiBootServicesTableLib + +diff --git a/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.c b/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.c +index e473c0b..bbe1aa3 100644 +--- a/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.c ++++ b/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -32,25 +33,24 @@ + // + // Static data that hosts the fw_cfg blobs and serves file requests. + // +-typedef enum { +- KernelBlobTypeKernel, +- KernelBlobTypeInitrd, +- KernelBlobTypeCommandLine, +- KernelBlobTypeMax +-} KERNEL_BLOB_TYPE; +- + typedef struct { +- CONST CHAR16 Name[8]; ++ CHAR16 Name[48]; + struct { +- FIRMWARE_CONFIG_ITEM CONST SizeKey; +- FIRMWARE_CONFIG_ITEM CONST DataKey; +- UINT32 Size; +- } FwCfgItem[2]; +- UINT32 Size; +- UINT8 *Data; +-} KERNEL_BLOB; +- +-STATIC KERNEL_BLOB mKernelBlob[KernelBlobTypeMax] = { ++ FIRMWARE_CONFIG_ITEM SizeKey; ++ FIRMWARE_CONFIG_ITEM DataKey; ++ UINT32 Size; ++ } FwCfgItem[2]; ++} KERNEL_BLOB_ITEMS; ++ ++typedef struct KERNEL_BLOB KERNEL_BLOB; ++struct KERNEL_BLOB { ++ CHAR16 Name[48]; ++ UINT32 Size; ++ UINT8 *Data; ++ KERNEL_BLOB *Next; ++}; ++ ++STATIC KERNEL_BLOB_ITEMS mKernelBlobItems[] = { + { + L"kernel", + { +@@ -70,7 +70,10 @@ STATIC KERNEL_BLOB mKernelBlob[KernelBlobTypeMax] = { + } + }; + +-STATIC UINT64 mTotalBlobBytes; ++STATIC KERNEL_BLOB *mKernelBlobs; ++STATIC UINT64 mKernelBlobCount; ++STATIC UINT64 mKernelNamedBlobCount; ++STATIC UINT64 mTotalBlobBytes; + + // + // Device path for the handle that incorporates our "EFI stub filesystem". +@@ -118,7 +121,7 @@ STATIC EFI_TIME mInitTime; + typedef struct { + UINT64 Signature; // Carries STUB_FILE_SIG. + +- KERNEL_BLOB_TYPE BlobType; // Index into mKernelBlob. KernelBlobTypeMax ++ KERNEL_BLOB *Blob; // Index into mKernelBlob. KernelBlobTypeMax + // denotes the root directory of the filesystem. + + UINT64 Position; // Byte position for regular files; +@@ -178,7 +181,7 @@ typedef struct { + STATIC + EFI_STATUS + EFIAPI +-StubFileOpen ( ++QemuKernelStubFileOpen ( + IN EFI_FILE_PROTOCOL *This, + OUT EFI_FILE_PROTOCOL **NewHandle, + IN CHAR16 *FileName, +@@ -197,7 +200,7 @@ StubFileOpen ( + STATIC + EFI_STATUS + EFIAPI +-StubFileClose ( ++QemuKernelStubFileClose ( + IN EFI_FILE_PROTOCOL *This + ) + { +@@ -220,7 +223,7 @@ StubFileClose ( + STATIC + EFI_STATUS + EFIAPI +-StubFileDelete ( ++QemuKernelStubFileDelete ( + IN EFI_FILE_PROTOCOL *This + ) + { +@@ -230,18 +233,17 @@ StubFileDelete ( + + /** + Helper function that formats an EFI_FILE_INFO structure into the +- user-allocated buffer, for any valid KERNEL_BLOB_TYPE value (including +- KernelBlobTypeMax, which stands for the root directory). ++ user-allocated buffer, for any valid KERNEL_BLOB (including NULL, ++ which stands for the root directory). + + The interface follows the EFI_FILE_GET_INFO -- and for directories, the + EFI_FILE_READ -- interfaces. + +- @param[in] BlobType The KERNEL_BLOB_TYPE value identifying the fw_cfg ++ @param[in] Blob The KERNEL_BLOB identifying the fw_cfg + blob backing the STUB_FILE that information is +- being requested about. If BlobType equals +- KernelBlobTypeMax, then information will be +- provided about the root directory of the +- filesystem. ++ being requested about. If Blob is NULL, ++ then information will be provided about the root ++ directory of the filesystem. + + @param[in,out] BufferSize On input, the size of Buffer. On output, the + amount of data returned in Buffer. In both cases, +@@ -258,10 +260,10 @@ StubFileDelete ( + **/ + STATIC + EFI_STATUS +-ConvertKernelBlobTypeToFileInfo ( +- IN KERNEL_BLOB_TYPE BlobType, +- IN OUT UINTN *BufferSize, +- OUT VOID *Buffer ++QemuKernelBlobTypeToFileInfo ( ++ IN KERNEL_BLOB *Blob, ++ IN OUT UINTN *BufferSize, ++ OUT VOID *Buffer + ) + { + CONST CHAR16 *Name; +@@ -273,17 +275,16 @@ ConvertKernelBlobTypeToFileInfo ( + EFI_FILE_INFO *FileInfo; + UINTN OriginalBufferSize; + +- if (BlobType == KernelBlobTypeMax) { ++ if (Blob == NULL) { + // + // getting file info about the root directory + // ++ DEBUG ((DEBUG_INFO, "%a: file info: directory\n", __func__)); + Name = L"\\"; +- FileSize = KernelBlobTypeMax; ++ FileSize = mKernelBlobCount; + Attribute = EFI_FILE_READ_ONLY | EFI_FILE_DIRECTORY; + } else { +- CONST KERNEL_BLOB *Blob; +- +- Blob = &mKernelBlob[BlobType]; ++ DEBUG ((DEBUG_INFO, "%a: file info: \"%s\"\n", __func__, Blob->Name)); + Name = Blob->Name; + FileSize = Blob->Size; + Attribute = EFI_FILE_READ_ONLY; +@@ -291,7 +292,6 @@ ConvertKernelBlobTypeToFileInfo ( + + NameSize = (StrLen (Name) + 1) * 2; + FileInfoSize = OFFSET_OF (EFI_FILE_INFO, FileName) + NameSize; +- ASSERT (FileInfoSize >= sizeof *FileInfo); + + OriginalBufferSize = *BufferSize; + *BufferSize = FileInfoSize; +@@ -313,6 +313,23 @@ ConvertKernelBlobTypeToFileInfo ( + return EFI_SUCCESS; + } + ++STATIC ++KERNEL_BLOB * ++FindKernelBlob ( ++ CHAR16 *FileName ++ ) ++{ ++ KERNEL_BLOB *Blob; ++ ++ for (Blob = mKernelBlobs; Blob != NULL; Blob = Blob->Next) { ++ if (StrCmp (FileName, Blob->Name) == 0) { ++ return Blob; ++ } ++ } ++ ++ return NULL; ++} ++ + /** + Reads data from a file, or continues scanning a directory. + +@@ -350,25 +367,25 @@ ConvertKernelBlobTypeToFileInfo ( + STATIC + EFI_STATUS + EFIAPI +-StubFileRead ( ++QemuKernelStubFileRead ( + IN EFI_FILE_PROTOCOL *This, + IN OUT UINTN *BufferSize, + OUT VOID *Buffer + ) + { +- STUB_FILE *StubFile; +- CONST KERNEL_BLOB *Blob; +- UINT64 Left; ++ STUB_FILE *StubFile; ++ KERNEL_BLOB *Blob; ++ UINT64 Left, Pos; + + StubFile = STUB_FILE_FROM_FILE (This); + + // + // Scanning the root directory? + // +- if (StubFile->BlobType == KernelBlobTypeMax) { ++ if (StubFile->Blob == NULL) { + EFI_STATUS Status; + +- if (StubFile->Position == KernelBlobTypeMax) { ++ if (StubFile->Position == mKernelBlobCount) { + // + // Scanning complete. + // +@@ -376,8 +393,16 @@ StubFileRead ( + return EFI_SUCCESS; + } + +- Status = ConvertKernelBlobTypeToFileInfo ( +- (KERNEL_BLOB_TYPE)StubFile->Position, ++ for (Pos = 0, Blob = mKernelBlobs; ++ Pos < StubFile->Position; ++ Pos++, Blob = Blob->Next) ++ { ++ } ++ ++ DEBUG ((DEBUG_INFO, "%a: file list: #%d \"%s\"\n", __func__, Pos, Blob->Name)); ++ ++ Status = QemuKernelBlobTypeToFileInfo ( ++ Blob, + BufferSize, + Buffer + ); +@@ -392,7 +417,7 @@ StubFileRead ( + // + // Reading a file. + // +- Blob = &mKernelBlob[StubFile->BlobType]; ++ Blob = StubFile->Blob; + if (StubFile->Position > Blob->Size) { + return EFI_DEVICE_ERROR; + } +@@ -403,6 +428,7 @@ StubFileRead ( + } + + if (Blob->Data != NULL) { ++ DEBUG ((DEBUG_INFO, "%a: file read: \"%s\", %d bytes\n", __func__, Blob->Name, *BufferSize)); + CopyMem (Buffer, Blob->Data + StubFile->Position, *BufferSize); + } + +@@ -436,7 +462,7 @@ StubFileRead ( + STATIC + EFI_STATUS + EFIAPI +-StubFileWrite ( ++QemuKernelStubFileWrite ( + IN EFI_FILE_PROTOCOL *This, + IN OUT UINTN *BufferSize, + IN VOID *Buffer +@@ -445,7 +471,7 @@ StubFileWrite ( + STUB_FILE *StubFile; + + StubFile = STUB_FILE_FROM_FILE (This); +- return (StubFile->BlobType == KernelBlobTypeMax) ? ++ return (StubFile->Blob == NULL) ? + EFI_UNSUPPORTED : + EFI_WRITE_PROTECTED; + } +@@ -467,7 +493,7 @@ StubFileWrite ( + STATIC + EFI_STATUS + EFIAPI +-StubFileGetPosition ( ++QemuKernelStubFileGetPosition ( + IN EFI_FILE_PROTOCOL *This, + OUT UINT64 *Position + ) +@@ -475,7 +501,7 @@ StubFileGetPosition ( + STUB_FILE *StubFile; + + StubFile = STUB_FILE_FROM_FILE (This); +- if (StubFile->BlobType == KernelBlobTypeMax) { ++ if (StubFile->Blob == NULL) { + return EFI_UNSUPPORTED; + } + +@@ -502,7 +528,7 @@ StubFileGetPosition ( + STATIC + EFI_STATUS + EFIAPI +-StubFileSetPosition ( ++QemuKernelStubFileSetPosition ( + IN EFI_FILE_PROTOCOL *This, + IN UINT64 Position + ) +@@ -512,7 +538,7 @@ StubFileSetPosition ( + + StubFile = STUB_FILE_FROM_FILE (This); + +- if (StubFile->BlobType == KernelBlobTypeMax) { ++ if (StubFile->Blob == NULL) { + if (Position == 0) { + // + // rewinding a directory scan is allowed +@@ -527,7 +553,7 @@ StubFileSetPosition ( + // + // regular file seek + // +- Blob = &mKernelBlob[StubFile->BlobType]; ++ Blob = StubFile->Blob; + if (Position == MAX_UINT64) { + // + // seek to end +@@ -584,7 +610,7 @@ StubFileSetPosition ( + STATIC + EFI_STATUS + EFIAPI +-StubFileGetInfo ( ++QemuKernelStubFileGetInfo ( + IN EFI_FILE_PROTOCOL *This, + IN EFI_GUID *InformationType, + IN OUT UINTN *BufferSize, +@@ -597,8 +623,8 @@ StubFileGetInfo ( + StubFile = STUB_FILE_FROM_FILE (This); + + if (CompareGuid (InformationType, &gEfiFileInfoGuid)) { +- return ConvertKernelBlobTypeToFileInfo ( +- StubFile->BlobType, ++ return QemuKernelBlobTypeToFileInfo ( ++ StubFile->Blob, + BufferSize, + Buffer + ); +@@ -686,7 +712,7 @@ StubFileGetInfo ( + STATIC + EFI_STATUS + EFIAPI +-StubFileSetInfo ( ++QemuKernelStubFileSetInfo ( + IN EFI_FILE_PROTOCOL *This, + IN EFI_GUID *InformationType, + IN UINTN BufferSize, +@@ -713,7 +739,7 @@ StubFileSetInfo ( + STATIC + EFI_STATUS + EFIAPI +-StubFileFlush ( ++QemuKernelStubFileFlush ( + IN EFI_FILE_PROTOCOL *This + ) + { +@@ -725,16 +751,16 @@ StubFileFlush ( + // + STATIC CONST EFI_FILE_PROTOCOL mEfiFileProtocolTemplate = { + EFI_FILE_PROTOCOL_REVISION, // revision 1 +- StubFileOpen, +- StubFileClose, +- StubFileDelete, +- StubFileRead, +- StubFileWrite, +- StubFileGetPosition, +- StubFileSetPosition, +- StubFileGetInfo, +- StubFileSetInfo, +- StubFileFlush, ++ QemuKernelStubFileOpen, ++ QemuKernelStubFileClose, ++ QemuKernelStubFileDelete, ++ QemuKernelStubFileRead, ++ QemuKernelStubFileWrite, ++ QemuKernelStubFileGetPosition, ++ QemuKernelStubFileSetPosition, ++ QemuKernelStubFileGetInfo, ++ QemuKernelStubFileSetInfo, ++ QemuKernelStubFileFlush, + NULL, // OpenEx, revision 2 + NULL, // ReadEx, revision 2 + NULL, // WriteEx, revision 2 +@@ -744,7 +770,7 @@ STATIC CONST EFI_FILE_PROTOCOL mEfiFileProtocolTemplate = { + STATIC + EFI_STATUS + EFIAPI +-StubFileOpen ( ++QemuKernelStubFileOpen ( + IN EFI_FILE_PROTOCOL *This, + OUT EFI_FILE_PROTOCOL **NewHandle, + IN CHAR16 *FileName, +@@ -753,7 +779,7 @@ StubFileOpen ( + ) + { + CONST STUB_FILE *StubFile; +- UINTN BlobType; ++ KERNEL_BLOB *Blob; + STUB_FILE *NewStubFile; + + // +@@ -775,21 +801,25 @@ StubFileOpen ( + // Only the root directory supports opening files in it. + // + StubFile = STUB_FILE_FROM_FILE (This); +- if (StubFile->BlobType != KernelBlobTypeMax) { ++ if (StubFile->Blob != NULL) { + return EFI_UNSUPPORTED; + } + + // + // Locate the file. + // +- for (BlobType = 0; BlobType < KernelBlobTypeMax; ++BlobType) { +- if (StrCmp (FileName, mKernelBlob[BlobType].Name) == 0) { +- break; +- } ++ if (FileName[0] == '\\') { ++ // also accept absolute paths, i.e. '\kernel' for 'kernel' ++ FileName++; + } + +- if (BlobType == KernelBlobTypeMax) { ++ Blob = FindKernelBlob (FileName); ++ ++ if (Blob == NULL) { ++ DEBUG ((DEBUG_INFO, "%a: file not found: \"%s\"\n", __func__, FileName)); + return EFI_NOT_FOUND; ++ } else { ++ DEBUG ((DEBUG_INFO, "%a: file opened: \"%s\"\n", __func__, FileName)); + } + + // +@@ -801,7 +831,7 @@ StubFileOpen ( + } + + NewStubFile->Signature = STUB_FILE_SIG; +- NewStubFile->BlobType = (KERNEL_BLOB_TYPE)BlobType; ++ NewStubFile->Blob = Blob; + NewStubFile->Position = 0; + CopyMem ( + &NewStubFile->File, +@@ -843,7 +873,7 @@ StubFileOpen ( + STATIC + EFI_STATUS + EFIAPI +-StubFileSystemOpenVolume ( ++QemuKernelStubFileSystemOpenVolume ( + IN EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *This, + OUT EFI_FILE_PROTOCOL **Root + ) +@@ -856,7 +886,7 @@ StubFileSystemOpenVolume ( + } + + StubFile->Signature = STUB_FILE_SIG; +- StubFile->BlobType = KernelBlobTypeMax; ++ StubFile->Blob = NULL; + StubFile->Position = 0; + CopyMem ( + &StubFile->File, +@@ -870,13 +900,13 @@ StubFileSystemOpenVolume ( + + STATIC CONST EFI_SIMPLE_FILE_SYSTEM_PROTOCOL mFileSystem = { + EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_REVISION, +- StubFileSystemOpenVolume ++ QemuKernelStubFileSystemOpenVolume + }; + + STATIC + EFI_STATUS + EFIAPI +-InitrdLoadFile2 ( ++QemuKernelInitrdLoadFile2 ( + IN EFI_LOAD_FILE2_PROTOCOL *This, + IN EFI_DEVICE_PATH_PROTOCOL *FilePath, + IN BOOLEAN BootPolicy, +@@ -884,8 +914,11 @@ InitrdLoadFile2 ( + OUT VOID *Buffer OPTIONAL + ) + { +- CONST KERNEL_BLOB *InitrdBlob = &mKernelBlob[KernelBlobTypeInitrd]; ++ KERNEL_BLOB *InitrdBlob; + ++ DEBUG ((DEBUG_INFO, "%a: initrd read\n", __func__)); ++ InitrdBlob = FindKernelBlob (L"initrd"); ++ ASSERT (InitrdBlob != NULL); + ASSERT (InitrdBlob->Size > 0); + + if (BootPolicy) { +@@ -914,17 +947,33 @@ InitrdLoadFile2 ( + } + + STATIC CONST EFI_LOAD_FILE2_PROTOCOL mInitrdLoadFile2 = { +- InitrdLoadFile2, ++ QemuKernelInitrdLoadFile2, + }; + + // + // Utility functions. + // + ++STATIC VOID ++QemuKernelChunkedRead ( ++ UINT8 *Dest, ++ UINT32 Bytes ++ ) ++{ ++ UINT32 Chunk; ++ ++ while (Bytes > 0) { ++ Chunk = (Bytes < SIZE_1MB) ? Bytes : SIZE_1MB; ++ QemuFwCfgReadBytes (Chunk, Dest); ++ Bytes -= Chunk; ++ Dest += Chunk; ++ } ++} ++ + /** + Populate a blob in mKernelBlob. + +- param[in,out] Blob Pointer to the KERNEL_BLOB element in mKernelBlob that is ++ param[in,out] Blob Pointer to the KERNEL_BLOB_ITEMS that is + to be filled from fw_cfg. + + @retval EFI_SUCCESS Blob has been populated. If fw_cfg reported a +@@ -935,35 +984,54 @@ STATIC CONST EFI_LOAD_FILE2_PROTOCOL mInitrdLoadFile2 = { + **/ + STATIC + EFI_STATUS +-FetchBlob ( +- IN OUT KERNEL_BLOB *Blob ++QemuKernelFetchBlob ( ++ IN KERNEL_BLOB_ITEMS *BlobItems + ) + { +- UINT32 Left; +- UINTN Idx; +- UINT8 *ChunkData; ++ UINT32 Size; ++ UINTN Idx; ++ UINT8 *ChunkData; ++ KERNEL_BLOB *Blob; ++ EFI_STATUS Status; + + // + // Read blob size. ++ // Size != 0 -> use size as-is ++ // SizeKey != 0 -> read size from fw_cfg ++ // both are 0 -> unused entry + // +- Blob->Size = 0; +- for (Idx = 0; Idx < ARRAY_SIZE (Blob->FwCfgItem); Idx++) { +- if (Blob->FwCfgItem[Idx].SizeKey == 0) { ++ for (Size = 0, Idx = 0; Idx < ARRAY_SIZE (BlobItems->FwCfgItem); Idx++) { ++ if ((BlobItems->FwCfgItem[Idx].SizeKey == 0) && ++ (BlobItems->FwCfgItem[Idx].Size == 0)) ++ { + break; + } + +- QemuFwCfgSelectItem (Blob->FwCfgItem[Idx].SizeKey); +- Blob->FwCfgItem[Idx].Size = QemuFwCfgRead32 (); +- Blob->Size += Blob->FwCfgItem[Idx].Size; ++ if (BlobItems->FwCfgItem[Idx].SizeKey) { ++ QemuFwCfgSelectItem (BlobItems->FwCfgItem[Idx].SizeKey); ++ BlobItems->FwCfgItem[Idx].Size = QemuFwCfgRead32 (); ++ } ++ ++ Size += BlobItems->FwCfgItem[Idx].Size; + } + +- if (Blob->Size == 0) { ++ if (Size == 0) { + return EFI_SUCCESS; + } + ++ Blob = AllocatePool (sizeof (*Blob)); ++ if (Blob->Data == NULL) { ++ return EFI_OUT_OF_RESOURCES; ++ } ++ ++ ZeroMem (Blob, sizeof (*Blob)); ++ + // + // Read blob. + // ++ Status = StrCpyS (Blob->Name, sizeof (Blob->Name), BlobItems->Name); ++ ASSERT (!EFI_ERROR (Status)); ++ Blob->Size = Size; + Blob->Data = AllocatePool (Blob->Size); + if (Blob->Data == NULL) { + DEBUG (( +@@ -973,6 +1041,7 @@ FetchBlob ( + (INT64)Blob->Size, + Blob->Name + )); ++ FreePool (Blob); + return EFI_OUT_OF_RESOURCES; + } + +@@ -985,33 +1054,98 @@ FetchBlob ( + )); + + ChunkData = Blob->Data; +- for (Idx = 0; Idx < ARRAY_SIZE (Blob->FwCfgItem); Idx++) { +- if (Blob->FwCfgItem[Idx].DataKey == 0) { ++ for (Idx = 0; Idx < ARRAY_SIZE (BlobItems->FwCfgItem); Idx++) { ++ if (BlobItems->FwCfgItem[Idx].DataKey == 0) { + break; + } + +- QemuFwCfgSelectItem (Blob->FwCfgItem[Idx].DataKey); ++ QemuFwCfgSelectItem (BlobItems->FwCfgItem[Idx].DataKey); ++ QemuKernelChunkedRead (ChunkData, BlobItems->FwCfgItem[Idx].Size); ++ ChunkData += BlobItems->FwCfgItem[Idx].Size; ++ } ++ ++ Blob->Next = mKernelBlobs; ++ mKernelBlobs = Blob; ++ mKernelBlobCount++; ++ mTotalBlobBytes += Blob->Size; ++ return EFI_SUCCESS; ++} + +- Left = Blob->FwCfgItem[Idx].Size; +- while (Left > 0) { +- UINT32 Chunk; ++STATIC ++EFI_STATUS ++QemuKernelVerifyBlob ( ++ CHAR16 *FileName, ++ EFI_STATUS FetchStatus ++ ) ++{ ++ KERNEL_BLOB *Blob; ++ EFI_STATUS Status; + +- Chunk = (Left < SIZE_1MB) ? Left : SIZE_1MB; +- QemuFwCfgReadBytes (Chunk, ChunkData + Blob->FwCfgItem[Idx].Size - Left); +- Left -= Chunk; +- DEBUG (( +- DEBUG_VERBOSE, +- "%a: %Ld bytes remaining for \"%s\" (%d)\n", +- __func__, +- (INT64)Left, +- Blob->Name, +- (INT32)Idx +- )); ++ if ((StrCmp (FileName, L"kernel") != 0) && ++ (StrCmp (FileName, L"initrd") != 0) && ++ (StrCmp (FileName, L"cmdline") != 0)) ++ { ++ return EFI_SUCCESS; ++ } ++ ++ Blob = FindKernelBlob (FileName); ++ Status = VerifyBlob ( ++ FileName, ++ Blob ? Blob->Data : NULL, ++ Blob ? Blob->Size : 0, ++ FetchStatus ++ ); ++ return Status; ++} ++ ++STATIC ++EFI_STATUS ++QemuKernelFetchNamedBlobs ( ++ VOID ++ ) ++{ ++ struct { ++ UINT32 FileSize; ++ UINT16 FileSelect; ++ UINT16 Reserved; ++ CHAR8 FileName[QEMU_FW_CFG_FNAME_SIZE]; ++ } *DirEntry; ++ KERNEL_BLOB_ITEMS Items; ++ EFI_STATUS Status; ++ EFI_STATUS FetchStatus; ++ UINT32 Count; ++ UINT32 Idx; ++ ++ QemuFwCfgSelectItem (QemuFwCfgItemFileDir); ++ Count = SwapBytes32 (QemuFwCfgRead32 ()); ++ ++ DirEntry = AllocatePool (sizeof (*DirEntry) * Count); ++ QemuFwCfgReadBytes (sizeof (*DirEntry) * Count, DirEntry); ++ ++ for (Idx = 0; Idx < Count; ++Idx) { ++ if (AsciiStrnCmp (DirEntry[Idx].FileName, "etc/boot/", 9) != 0) { ++ continue; + } + +- ChunkData += Blob->FwCfgItem[Idx].Size; ++ ZeroMem (&Items, sizeof (Items)); ++ UnicodeSPrint (Items.Name, sizeof (Items.Name), L"%a", DirEntry[Idx].FileName + 9); ++ Items.FwCfgItem[0].DataKey = SwapBytes16 (DirEntry[Idx].FileSelect); ++ Items.FwCfgItem[0].Size = SwapBytes32 (DirEntry[Idx].FileSize); ++ ++ FetchStatus = QemuKernelFetchBlob (&Items); ++ Status = QemuKernelVerifyBlob ( ++ (CHAR16 *)Items.Name, ++ FetchStatus ++ ); ++ if (EFI_ERROR (Status)) { ++ FreePool (DirEntry); ++ return Status; ++ } ++ ++ mKernelNamedBlobCount++; + } + ++ FreePool (DirEntry); + return EFI_SUCCESS; + } + +@@ -1039,12 +1173,13 @@ QemuKernelLoaderFsDxeEntrypoint ( + IN EFI_SYSTEM_TABLE *SystemTable + ) + { +- UINTN BlobType; +- KERNEL_BLOB *CurrentBlob; +- KERNEL_BLOB *KernelBlob; +- EFI_STATUS Status; +- EFI_HANDLE FileSystemHandle; +- EFI_HANDLE InitrdLoadFile2Handle; ++ UINTN BlobIdx; ++ KERNEL_BLOB_ITEMS *BlobItems; ++ KERNEL_BLOB *Blob; ++ EFI_STATUS Status; ++ EFI_STATUS FetchStatus; ++ EFI_HANDLE FileSystemHandle; ++ EFI_HANDLE InitrdLoadFile2Handle; + + if (!QemuFwCfgIsAvailable ()) { + return EFI_NOT_FOUND; +@@ -1057,30 +1192,38 @@ QemuKernelLoaderFsDxeEntrypoint ( + } + + // +- // Fetch all blobs. ++ // Fetch named blobs. + // +- for (BlobType = 0; BlobType < KernelBlobTypeMax; ++BlobType) { +- CurrentBlob = &mKernelBlob[BlobType]; +- Status = FetchBlob (CurrentBlob); +- if (EFI_ERROR (Status)) { +- goto FreeBlobs; ++ DEBUG ((DEBUG_INFO, "%a: named blobs (etc/boot/*)\n", __func__)); ++ Status = QemuKernelFetchNamedBlobs (); ++ if (EFI_ERROR (Status)) { ++ goto FreeBlobs; ++ } ++ ++ // ++ // Fetch traditional blobs. ++ // ++ DEBUG ((DEBUG_INFO, "%a: traditional blobs\n", __func__)); ++ for (BlobIdx = 0; BlobIdx < ARRAY_SIZE (mKernelBlobItems); ++BlobIdx) { ++ BlobItems = &mKernelBlobItems[BlobIdx]; ++ if (FindKernelBlob (BlobItems->Name)) { ++ continue; + } + +- Status = VerifyBlob ( +- CurrentBlob->Name, +- CurrentBlob->Data, +- CurrentBlob->Size ++ FetchStatus = QemuKernelFetchBlob (BlobItems); ++ ++ Status = QemuKernelVerifyBlob ( ++ (CHAR16 *)BlobItems->Name, ++ FetchStatus + ); + if (EFI_ERROR (Status)) { + goto FreeBlobs; + } +- +- mTotalBlobBytes += CurrentBlob->Size; + } + +- KernelBlob = &mKernelBlob[KernelBlobTypeKernel]; +- +- if (KernelBlob->Data == NULL) { ++ Blob = FindKernelBlob (L"kernel"); ++ if ((Blob == NULL) && (mKernelNamedBlobCount == 0)) { ++ DEBUG ((DEBUG_INFO, "%a: no kernel and no named blobs present -> quit\n", __func__)); + Status = EFI_NOT_FOUND; + #if defined (MDE_CPU_AARCH64) + // +@@ -1125,7 +1268,9 @@ QemuKernelLoaderFsDxeEntrypoint ( + goto FreeBlobs; + } + +- if (KernelBlob[KernelBlobTypeInitrd].Size > 0) { ++ Blob = FindKernelBlob (L"initrd"); ++ if (Blob != NULL) { ++ DEBUG ((DEBUG_INFO, "%a: initrd setup\n", __func__)); + InitrdLoadFile2Handle = NULL; + Status = gBS->InstallMultipleProtocolInterfaces ( + &InitrdLoadFile2Handle, +@@ -1160,13 +1305,11 @@ UninstallFileSystemHandle: + ASSERT_EFI_ERROR (Status); + + FreeBlobs: +- while (BlobType > 0) { +- CurrentBlob = &mKernelBlob[--BlobType]; +- if (CurrentBlob->Data != NULL) { +- FreePool (CurrentBlob->Data); +- CurrentBlob->Size = 0; +- CurrentBlob->Data = NULL; +- } ++ while (mKernelBlobs != NULL) { ++ Blob = mKernelBlobs; ++ mKernelBlobs = Blob->Next; ++ FreePool (Blob->Data); ++ FreePool (Blob); + } + + return Status; +diff --git a/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.inf b/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.inf +index e0331c6..4445407 100644 +--- a/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.inf ++++ b/OvmfPkg/QemuKernelLoaderFsDxe/QemuKernelLoaderFsDxe.inf +@@ -31,6 +31,7 @@ + DebugPrintErrorLevelLib + DevicePathLib + MemoryAllocationLib ++ PrintLib + QemuFwCfgLib + UefiBootServicesTableLib + UefiDriverEntryPoint +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-68160.patch b/SPECS/edk2/CVE-2025-68160.patch new file mode 100644 index 00000000000..b5fe84e03b7 --- /dev/null +++ b/SPECS/edk2/CVE-2025-68160.patch @@ -0,0 +1,81 @@ +From 8433c3ab7b435f3ee12a7177e85cb8b16f93aa39 Mon Sep 17 00:00:00 2001 +From: Neil Horman +Date: Wed, 7 Jan 2026 11:52:09 -0500 +Subject: [PATCH] Fix heap buffer overflow in BIO_f_linebuffer +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +When a FIO_f_linebuffer is part of a bio chain, and the next BIO +preforms short writes, the remainder of the unwritten buffer is copied +unconditionally to the internal buffer ctx->obuf, which may not be +sufficiently sized to handle the remaining data, resulting in a buffer +overflow. + +Fix it by only copying data when ctx->obuf has space, flushing to the +next BIO to increase available storage if needed. + +Fixes openssl/srt#48 + +Fixes CVE-2025-68160 + +Reviewed-by: Nikola Pajkovsky +Reviewed-by: Eugene Syromiatnikov +Reviewed-by: Saša Nedvědický +Reviewed-by: Tomas Mraz +MergeDate: Mon Jan 26 19:41:40 2026 +(cherry picked from commit b21663c35a6f0ed4c8de06855bdc7a6a21f00c2f) +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/475c466ef2fbd8fc1df6fae1c3eed9c813fc8ff6.patch +--- + .../OpensslLib/openssl/crypto/bio/bf_lbuf.c | 32 +++++++++++++++---- + 1 file changed, 26 insertions(+), 6 deletions(-) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/bio/bf_lbuf.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/bio/bf_lbuf.c +index 73f1216..a471b28 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/bio/bf_lbuf.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/bio/bf_lbuf.c +@@ -189,14 +189,34 @@ static int linebuffer_write(BIO *b, const char *in, int inl) + while (foundnl && inl > 0); + /* + * We've written as much as we can. The rest of the input buffer, if +- * any, is text that doesn't and with a NL and therefore needs to be +- * saved for the next trip. ++ * any, is text that doesn't end with a NL and therefore we need to try ++ * free up some space in our obuf so we can make forward progress. + */ +- if (inl > 0) { +- memcpy(&(ctx->obuf[ctx->obuf_len]), in, inl); +- ctx->obuf_len += inl; +- num += inl; ++ while (inl > 0) { ++ size_t avail = (size_t)ctx->obuf_size - (size_t)ctx->obuf_len; ++ size_t to_copy; ++ ++ if (avail == 0) { ++ /* Flush buffered data to make room */ ++ i = BIO_write(b->next_bio, ctx->obuf, ctx->obuf_len); ++ if (i <= 0) { ++ BIO_copy_next_retry(b); ++ return num > 0 ? num : i; ++ } ++ if (i < ctx->obuf_len) ++ memmove(ctx->obuf, ctx->obuf + i, ctx->obuf_len - i); ++ ctx->obuf_len -= i; ++ continue; ++ } ++ ++ to_copy = inl > (int)avail ? avail : (size_t)inl; ++ memcpy(&(ctx->obuf[ctx->obuf_len]), in, to_copy); ++ ctx->obuf_len += (int)to_copy; ++ in += to_copy; ++ inl -= (int)to_copy; ++ num += (int)to_copy; + } ++ + return num; + } + +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-69418.patch b/SPECS/edk2/CVE-2025-69418.patch new file mode 100644 index 00000000000..b0feaefdd15 --- /dev/null +++ b/SPECS/edk2/CVE-2025-69418.patch @@ -0,0 +1,78 @@ +From 3adfe1f39f64b4cde3b4c2b2f3c3a1bc50ad4ffe Mon Sep 17 00:00:00 2001 +From: Norbert Pocs +Date: Thu, 8 Jan 2026 15:04:54 +0100 +Subject: [PATCH] Fix OCB AES-NI/HW stream path unauthenticated/unencrypted + trailing bytes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +When ctx->stream (e.g., AES‑NI or ARMv8 CE) is available, the fast path +encrypts/decrypts full blocks but does not advance in/out pointers. The +tail-handling code then operates on the base pointers, effectively reprocessing +the beginning of the buffer while leaving the actual trailing bytes +unencrypted (encryption) or using the wrong plaintext (decryption). The +authentication checksum excludes the true tail. + +CVE-2025-69418 + +Fixes: https://github.com/openssl/srt/issues/58 + +Signed-off-by: Norbert Pocs + +Reviewed-by: Saša Nedvědický +Reviewed-by: Eugene Syromiatnikov +Reviewed-by: Tomas Mraz +MergeDate: Mon Jan 26 19:48:35 2026 +(cherry picked from commit be9375d5d45dfaf897b56ef148a0b58402491fcb) +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/52d23c86a54adab5ee9f80e48b242b52c4cc2347.patch +--- + .../Library/OpensslLib/openssl/crypto/modes/ocb128.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/modes/ocb128.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/modes/ocb128.c +index b5202ba..95601da 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/modes/ocb128.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/modes/ocb128.c +@@ -342,7 +342,7 @@ int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, + + if (num_blocks && all_num_blocks == (size_t)all_num_blocks + && ctx->stream != NULL) { +- size_t max_idx = 0, top = (size_t)all_num_blocks; ++ size_t max_idx = 0, top = (size_t)all_num_blocks, processed_bytes = 0; + + /* + * See how many L_{i} entries we need to process data at hand +@@ -356,6 +356,9 @@ int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, + ctx->stream(in, out, num_blocks, ctx->keyenc, + (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c, + (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c); ++ processed_bytes = num_blocks * 16; ++ in += processed_bytes; ++ out += processed_bytes; + } else { + /* Loop through all full blocks to be encrypted */ + for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) { +@@ -434,7 +437,7 @@ int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, + + if (num_blocks && all_num_blocks == (size_t)all_num_blocks + && ctx->stream != NULL) { +- size_t max_idx = 0, top = (size_t)all_num_blocks; ++ size_t max_idx = 0, top = (size_t)all_num_blocks, processed_bytes = 0; + + /* + * See how many L_{i} entries we need to process data at hand +@@ -448,6 +451,9 @@ int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, + ctx->stream(in, out, num_blocks, ctx->keydec, + (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c, + (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c); ++ processed_bytes = num_blocks * 16; ++ in += processed_bytes; ++ out += processed_bytes; + } else { + OCB_BLOCK tmp; + +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-69420.patch b/SPECS/edk2/CVE-2025-69420.patch new file mode 100644 index 00000000000..72619b2bc5f --- /dev/null +++ b/SPECS/edk2/CVE-2025-69420.patch @@ -0,0 +1,51 @@ +From 2a7b27649a32878b91c1f0632cca1f502b7ac349 Mon Sep 17 00:00:00 2001 +From: Bob Beck +Date: Wed, 7 Jan 2026 11:29:48 -0700 +Subject: [PATCH] Verify ASN1 object's types before attempting to access them + as a particular type +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Issue was reported in ossl_ess_get_signing_cert but is also present in +ossl_ess_get_signing_cert_v2. + +Fixes: https://github.com/openssl/srt/issues/61 +Fixes CVE-2025-69420 + +Reviewed-by: Norbert Pocs +Reviewed-by: Saša Nedvědický +Reviewed-by: Tomas Mraz +MergeDate: Mon Jan 26 19:53:36 2026 +(cherry picked from commit ea8fc4c345fbd749048809c9f7c881ea656b0b94) +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/4e254b48ad93cc092be3dd62d97015f33f73133a.patch +--- + .../Library/OpensslLib/openssl/crypto/ts/ts_rsp_verify.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/ts/ts_rsp_verify.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/ts/ts_rsp_verify.c +index 792a27c..d940c49 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/ts/ts_rsp_verify.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/ts/ts_rsp_verify.c +@@ -209,7 +209,7 @@ static ESS_SIGNING_CERT *ossl_ess_get_signing_cert(const PKCS7_SIGNER_INFO *si) + const unsigned char *p; + + attr = PKCS7_get_signed_attribute(si, NID_id_smime_aa_signingCertificate); +- if (attr == NULL) ++ if (attr == NULL || attr->type != V_ASN1_SEQUENCE) + return NULL; + p = attr->value.sequence->data; + return d2i_ESS_SIGNING_CERT(NULL, &p, attr->value.sequence->length); +@@ -222,7 +222,7 @@ ESS_SIGNING_CERT_V2 *ossl_ess_get_signing_cert_v2(const PKCS7_SIGNER_INFO *si) + const unsigned char *p; + + attr = PKCS7_get_signed_attribute(si, NID_id_smime_aa_signingCertificateV2); +- if (attr == NULL) ++ if (attr == NULL || attr->type != V_ASN1_SEQUENCE) + return NULL; + p = attr->value.sequence->data; + return d2i_ESS_SIGNING_CERT_V2(NULL, &p, attr->value.sequence->length); +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2025-69421.patch b/SPECS/edk2/CVE-2025-69421.patch new file mode 100644 index 00000000000..48308f977c1 --- /dev/null +++ b/SPECS/edk2/CVE-2025-69421.patch @@ -0,0 +1,41 @@ +From 8743dcc66b5c38ce0f8eac69fd800001f3868337 Mon Sep 17 00:00:00 2001 +From: Andrew Dinh +Date: Thu, 8 Jan 2026 01:24:30 +0900 +Subject: [PATCH] PKCS12_item_decrypt_d2i_ex(): Check oct argument for NULL +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Fixes CVE-2025-69421 + +Reviewed-by: Nikola Pajkovsky +Reviewed-by: Saša Nedvědický +Reviewed-by: Eugene Syromiatnikov +Reviewed-by: Tomas Mraz +MergeDate: Mon Jan 26 19:56:08 2026 +(cherry picked from commit 2c13bf15286328641a805eb3b7c97e27d42881fb) +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/36ecb4960872a4ce04bf6f1e1f4e78d75ec0c0c7.patch +--- + .../Library/OpensslLib/openssl/crypto/pkcs12/p12_decr.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_decr.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_decr.c +index a5adafa..2e14a49 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_decr.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_decr.c +@@ -137,6 +137,11 @@ void *PKCS12_item_decrypt_d2i_ex(const X509_ALGOR *algor, const ASN1_ITEM *it, + void *ret; + int outlen = 0; + ++ if (oct == NULL) { ++ ERR_raise(ERR_LIB_PKCS12, ERR_R_PASSED_NULL_PARAMETER); ++ return NULL; ++ } ++ + if (!PKCS12_pbe_crypt_ex(algor, pass, passlen, oct->data, oct->length, + &out, &outlen, 0, libctx, propq)) + return NULL; +-- +2.45.4 + diff --git a/SPECS/edk2/CVE-2026-22796.patch b/SPECS/edk2/CVE-2026-22796.patch new file mode 100644 index 00000000000..3d8b45a7078 --- /dev/null +++ b/SPECS/edk2/CVE-2026-22796.patch @@ -0,0 +1,77 @@ +From f581ce76c38647656736e8f56912ee65b51ae584 Mon Sep 17 00:00:00 2001 +From: Bob Beck +Date: Wed, 7 Jan 2026 11:29:48 -0700 +Subject: [PATCH] Ensure ASN1 types are checked before use. + +Some of these were fixed by LibreSSL in commit https://github.com/openbsd/src/commit/aa1f637d454961d22117b4353f98253e984b3ba8 +this fix includes the other fixes in that commit, as well as fixes for others found by a scan +for a similar unvalidated access paradigm in the tree. + +Reviewed-by: Kurt Roeckx +Reviewed-by: Shane Lontis +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/29582) + +Signed-off-by: rpm-build +Upstream-reference: https://github.com/openssl/openssl/commit/572844beca95068394c916626a6d3a490f831a49.patch +--- + CryptoPkg/Library/OpensslLib/openssl/apps/s_client.c | 3 ++- + .../OpensslLib/openssl/crypto/pkcs12/p12_kiss.c | 10 ++++++++-- + .../Library/OpensslLib/openssl/crypto/pkcs7/pk7_doit.c | 2 ++ + 3 files changed, 12 insertions(+), 3 deletions(-) + +diff --git a/CryptoPkg/Library/OpensslLib/openssl/apps/s_client.c b/CryptoPkg/Library/OpensslLib/openssl/apps/s_client.c +index a914238..a21e0a6 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/apps/s_client.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/apps/s_client.c +@@ -2650,8 +2650,9 @@ int s_client_main(int argc, char **argv) + goto end; + } + atyp = ASN1_generate_nconf(genstr, cnf); +- if (atyp == NULL) { ++ if (atyp == NULL || atyp->type != V_ASN1_SEQUENCE) { + NCONF_free(cnf); ++ ASN1_TYPE_free(atyp); + BIO_printf(bio_err, "ASN1_generate_nconf failed\n"); + goto end; + } +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_kiss.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_kiss.c +index 229b34c..d7e5f2c 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_kiss.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs12/p12_kiss.c +@@ -190,11 +190,17 @@ static int parse_bag(PKCS12_SAFEBAG *bag, const char *pass, int passlen, + ASN1_BMPSTRING *fname = NULL; + ASN1_OCTET_STRING *lkid = NULL; + +- if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_friendlyName))) ++ if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_friendlyName))) { ++ if (attrib->type != V_ASN1_BMPSTRING) ++ return 0; + fname = attrib->value.bmpstring; ++ } + +- if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_localKeyID))) ++ if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_localKeyID))) { ++ if (attrib->type != V_ASN1_OCTET_STRING) ++ return 0; + lkid = attrib->value.octet_string; ++ } + + switch (PKCS12_SAFEBAG_get_nid(bag)) { + case NID_keyBag: +diff --git a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs7/pk7_doit.c b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs7/pk7_doit.c +index f52d64a..f05ed5e 100644 +--- a/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs7/pk7_doit.c ++++ b/CryptoPkg/Library/OpensslLib/openssl/crypto/pkcs7/pk7_doit.c +@@ -1189,6 +1189,8 @@ ASN1_OCTET_STRING *PKCS7_digest_from_attributes(STACK_OF(X509_ATTRIBUTE) *sk) + ASN1_TYPE *astype; + if ((astype = get_attribute(sk, NID_pkcs9_messageDigest)) == NULL) + return NULL; ++ if (astype->type != V_ASN1_OCTET_STRING) ++ return NULL; + return astype->value.octet_string; + } + +-- +2.45.4 + diff --git a/SPECS/edk2/edk2.spec b/SPECS/edk2/edk2.spec index 3a5e7ab3640..5184da8c1a8 100644 --- a/SPECS/edk2/edk2.spec +++ b/SPECS/edk2/edk2.spec @@ -55,7 +55,7 @@ ExclusiveArch: x86_64 Name: edk2 Version: %{GITDATE}git%{GITCOMMIT} -Release: 10%{?dist} +Release: 14%{?dist} Summary: UEFI firmware for 64-bit virtual machines License: Apache-2.0 AND (BSD-2-Clause OR GPL-2.0-or-later) AND BSD-2-Clause-Patent AND BSD-3-Clause AND BSD-4-Clause AND ISC AND MIT AND LicenseRef-Fedora-Public-Domain URL: https://www.tianocore.org @@ -130,6 +130,7 @@ Patch0017: 0017-silence-.-has-a-LOAD-segment-with-RWX-permissions-wa.patch Patch0018: 0018-NetworkPkg-TcpDxe-Fixed-system-stuck-on-PXE-boot-flo.patch Patch0019: 0019-NetworkPkg-DxeNetLib-adjust-PseudoRandom-error-loggi.patch Patch0020: CVE-2024-38796.patch +Patch0021: CVE-2025-2296.patch # Patches for the vendored OpenSSL are in the range from 1000 to 1999 (inclusive). Patch1000: CVE-2022-3996.patch @@ -140,6 +141,13 @@ Patch1004: CVE-2024-2511.patch Patch1005: CVE-2024-4603.patch Patch1006: CVE-2025-3770.patch Patch1007: CVE-2025-9230.patch +Patch1008: CVE-2025-15467.patch +Patch1009: CVE-2025-2295.patch +Patch1010: CVE-2025-68160.patch +Patch1011: CVE-2025-69418.patch +Patch1012: CVE-2025-69420.patch +Patch1013: CVE-2025-69421.patch +Patch1014: CVE-2026-22796.patch # python3-devel and libuuid-devel are required for building tools. # python3-devel is also needed for varstore template generation and @@ -801,6 +809,18 @@ done /boot/efi/HvLoader.efi %changelog +* Tue Feb 03 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-14 +- Patch for CVE-2026-22796, CVE-2025-69421, CVE-2025-69420, CVE-2025-69418, CVE-2025-68160 + +* Sun Feb 01 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-13 +- Patch for CVE-2025-2295 + +* Fri Jan 30 2026 Azure Linux Security Servicing Account - 20240524git3e722403cd16-12 +- Patch for CVE-2025-15467 + +* Thu Dec 18 2025 Jyoti kanase - 20240524git3e722403cd16-11 +- Patch for CVE-2025-2296 + * Fri Oct 03 2025 Azure Linux Security Servicing Account - 20240524git3e722403cd16-10 - Patch for CVE-2025-9230 diff --git a/SPECS/ethtool/ethtool.spec b/SPECS/ethtool/ethtool.spec index 53bff41002f..017e1ebeee3 100644 --- a/SPECS/ethtool/ethtool.spec +++ b/SPECS/ethtool/ethtool.spec @@ -1,7 +1,7 @@ Summary: Standard Linux utility for controlling network drivers and hardware Name: ethtool Version: 6.4 -Release: 2%{?dist} +Release: 3%{?dist} License: GPLv2 URL: https://www.kernel.org/pub/software/network/ethtool/ Group: Productivity/Networking/Diagnostic @@ -11,6 +11,9 @@ Source0: https://www.kernel.org/pub/software/network/%{name}/%{name}-%{versi BuildRequires: libmnl-devel +# To avoid file conflicts +Conflicts: mlnx-ethtool + %description ethtool is the standard Linux utility for controlling network drivers and hardware, particularly for wired Ethernet devices @@ -38,6 +41,13 @@ make %{?_smp_mflags} check %{_datadir}/bash-completion/completions/ethtool %changelog +* Tue Nov 11 2025 Mayank Singh - 6.4-3 +- Updated dependency handling for kexec-tools: + Changed from hard dependency on a single package. + Allows installation to satisfy dependency with either `ethtool` or `mlnx-ethtool`. + Ensures flexibility for image builds and user choice at install time. + Added mutual exclusivity between providers to prevent file conflicts. + * Thu May 16 2024 Daniel McIlvaney - 6.4-2 - Sanitize license files diff --git a/SPECS/expat/CVE-2026-24515.patch b/SPECS/expat/CVE-2026-24515.patch new file mode 100644 index 00000000000..f83bc89770a --- /dev/null +++ b/SPECS/expat/CVE-2026-24515.patch @@ -0,0 +1,169 @@ +From 8b3805af3dde1f5424bdd704da688def3a81ff9f Mon Sep 17 00:00:00 2001 +From: Sebastian Pipping +Date: Sun, 18 Jan 2026 17:53:37 +0100 +Subject: [PATCH 1/3] lib: Make XML_ExternalEntityParserCreate copy unknown + encoding handler user data + +Patch suggested by Artiphishell Inc. +--- + lib/xmlparse.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/lib/xmlparse.c b/lib/xmlparse.c +index e2847b1..d804753 100644 +--- a/lib/xmlparse.c ++++ b/lib/xmlparse.c +@@ -1742,6 +1742,7 @@ XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context, + XML_ExternalEntityRefHandler oldExternalEntityRefHandler; + XML_SkippedEntityHandler oldSkippedEntityHandler; + XML_UnknownEncodingHandler oldUnknownEncodingHandler; ++ void *oldUnknownEncodingHandlerData; + XML_ElementDeclHandler oldElementDeclHandler; + XML_AttlistDeclHandler oldAttlistDeclHandler; + XML_EntityDeclHandler oldEntityDeclHandler; +@@ -1787,6 +1788,7 @@ XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context, + oldExternalEntityRefHandler = parser->m_externalEntityRefHandler; + oldSkippedEntityHandler = parser->m_skippedEntityHandler; + oldUnknownEncodingHandler = parser->m_unknownEncodingHandler; ++ oldUnknownEncodingHandlerData = parser->m_unknownEncodingHandlerData; + oldElementDeclHandler = parser->m_elementDeclHandler; + oldAttlistDeclHandler = parser->m_attlistDeclHandler; + oldEntityDeclHandler = parser->m_entityDeclHandler; +@@ -1847,6 +1849,7 @@ XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context, + parser->m_externalEntityRefHandler = oldExternalEntityRefHandler; + parser->m_skippedEntityHandler = oldSkippedEntityHandler; + parser->m_unknownEncodingHandler = oldUnknownEncodingHandler; ++ parser->m_unknownEncodingHandlerData = oldUnknownEncodingHandlerData; + parser->m_elementDeclHandler = oldElementDeclHandler; + parser->m_attlistDeclHandler = oldAttlistDeclHandler; + parser->m_entityDeclHandler = oldEntityDeclHandler; +-- +2.45.4 + + +From 0b2599c6eff2f0f9d20ec7423621f2928f989c1a Mon Sep 17 00:00:00 2001 +From: Sebastian Pipping +Date: Sun, 18 Jan 2026 17:26:31 +0100 +Subject: [PATCH 2/3] tests: Cover effect of XML_SetUnknownEncodingHandler user + data + +--- + tests/basic_tests.c | 42 ++++++++++++++++++++++++++++++++++++++++++ + tests/handlers.c | 10 ++++++++++ + tests/handlers.h | 3 +++ + 3 files changed, 55 insertions(+) + +diff --git a/tests/basic_tests.c b/tests/basic_tests.c +index da5c0d4..2db2a76 100644 +--- a/tests/basic_tests.c ++++ b/tests/basic_tests.c +@@ -4440,6 +4440,46 @@ START_TEST(test_unknown_encoding_invalid_attr_value) { + } + END_TEST + ++START_TEST(test_unknown_encoding_user_data_primary) { ++ // This test is based on ideas contributed by Artiphishell Inc. ++ const char *const text = "\n" ++ "\n"; ++ XML_Parser parser = XML_ParserCreate(NULL); ++ XML_SetUnknownEncodingHandler(parser, ++ user_data_checking_unknown_encoding_handler, ++ (void *)(intptr_t)0xC0FFEE); ++ ++ assert_true(_XML_Parse_SINGLE_BYTES(parser, text, (int)strlen(text), XML_TRUE) ++ == XML_STATUS_OK); ++ ++ XML_ParserFree(parser); ++} ++END_TEST ++ ++START_TEST(test_unknown_encoding_user_data_secondary) { ++ // This test is based on ideas contributed by Artiphishell Inc. ++ const char *const text_main = "\n" ++ "]>\n" ++ "&ext;\n"; ++ const char *const text_external = "\n" ++ "data"; ++ ExtTest2 test_data = {text_external, (int)strlen(text_external), NULL, NULL}; ++ XML_Parser parser = XML_ParserCreate(NULL); ++ XML_SetExternalEntityRefHandler(parser, external_entity_loader2); ++ XML_SetUnknownEncodingHandler(parser, ++ user_data_checking_unknown_encoding_handler, ++ (void *)(intptr_t)0xC0FFEE); ++ XML_SetUserData(parser, &test_data); ++ ++ assert_true(_XML_Parse_SINGLE_BYTES(parser, text_main, (int)strlen(text_main), ++ XML_TRUE) ++ == XML_STATUS_OK); ++ ++ XML_ParserFree(parser); ++} ++END_TEST ++ + /* Test an external entity parser set to use latin-1 detects UTF-16 + * BOMs correctly. + */ +@@ -6284,6 +6324,8 @@ make_basic_test_case(Suite *s) { + tcase_add_test(tc_basic, test_unknown_encoding_invalid_surrogate); + tcase_add_test(tc_basic, test_unknown_encoding_invalid_high); + tcase_add_test(tc_basic, test_unknown_encoding_invalid_attr_value); ++ tcase_add_test(tc_basic, test_unknown_encoding_user_data_primary); ++ tcase_add_test(tc_basic, test_unknown_encoding_user_data_secondary); + tcase_add_test__if_xml_ge(tc_basic, test_ext_entity_latin1_utf16le_bom); + tcase_add_test__if_xml_ge(tc_basic, test_ext_entity_latin1_utf16be_bom); + tcase_add_test__if_xml_ge(tc_basic, test_ext_entity_latin1_utf16le_bom2); +diff --git a/tests/handlers.c b/tests/handlers.c +index bdb5b0e..5078014 100644 +--- a/tests/handlers.c ++++ b/tests/handlers.c +@@ -45,6 +45,7 @@ + # undef NDEBUG /* because test suite relies on assert(...) at the moment */ + #endif + ++#include + #include + #include + #include +@@ -407,6 +408,15 @@ long_encoding_handler(void *userData, const XML_Char *encoding, + return XML_STATUS_OK; + } + ++int XMLCALL ++user_data_checking_unknown_encoding_handler(void *userData, ++ const XML_Char *encoding, ++ XML_Encoding *info) { ++ const intptr_t number = (intptr_t)userData; ++ assert_true(number == 0xC0FFEE); ++ return long_encoding_handler(userData, encoding, info); ++} ++ + /* External Entity Handlers */ + + int XMLCALL +diff --git a/tests/handlers.h b/tests/handlers.h +index 4d6a08d..ac4ca94 100644 +--- a/tests/handlers.h ++++ b/tests/handlers.h +@@ -159,6 +159,9 @@ extern int XMLCALL long_encoding_handler(void *userData, + const XML_Char *encoding, + XML_Encoding *info); + ++extern int XMLCALL user_data_checking_unknown_encoding_handler( ++ void *userData, const XML_Char *encoding, XML_Encoding *info); ++ + /* External Entity Handlers */ + + typedef struct ExtOption { +-- +2.45.4 + + +From dea97bc0c894b5494f2e4d46d6076d49dc227e8f Mon Sep 17 00:00:00 2001 +From: Sebastian Pipping +Date: Sun, 18 Jan 2026 18:19:25 +0100 +Subject: [PATCH 3/3] Changes: Document CVE-2026-24515 + +-- +2.45.4 + diff --git a/SPECS/expat/CVE-2026-25210.patch b/SPECS/expat/CVE-2026-25210.patch new file mode 100644 index 00000000000..204ac03cca5 --- /dev/null +++ b/SPECS/expat/CVE-2026-25210.patch @@ -0,0 +1,93 @@ +From 5ffd029337a8db6b3bef77ecd0a040b3e1e573f2 Mon Sep 17 00:00:00 2001 +From: Matthew Fernandez +Date: Thu, 2 Oct 2025 17:15:15 -0700 +Subject: [PATCH 1/3] lib: Make a doubling more readable + +Suggested-by: Sebastian Pipping +--- + lib/xmlparse.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/xmlparse.c b/lib/xmlparse.c +index d804753..a48acd2 100644 +--- a/lib/xmlparse.c ++++ b/lib/xmlparse.c +@@ -3492,7 +3492,7 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, + tag->name.strLen = convLen; + break; + } +- bufSize = (int)(tag->bufEnd - tag->buf) << 1; ++ bufSize = (int)(tag->bufEnd - tag->buf) * 2; + { + char *temp = (char *)REALLOC(parser, tag->buf, bufSize); + if (temp == NULL) +-- +2.45.4 + + +From 07d55b4f18ded4740946a9a436e787b3c178176c Mon Sep 17 00:00:00 2001 +From: Matthew Fernandez +Date: Thu, 2 Oct 2025 17:15:15 -0700 +Subject: [PATCH 2/3] lib: Realign a size with the `REALLOC` type signature it + is passed into + +Note that this implicitly assumes `tag->bufEnd >= tag->buf`, which should +already be guaranteed true. +--- + lib/xmlparse.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/lib/xmlparse.c b/lib/xmlparse.c +index a48acd2..ed505b7 100644 +--- a/lib/xmlparse.c ++++ b/lib/xmlparse.c +@@ -3481,7 +3481,6 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, + const char *fromPtr = tag->rawName; + toPtr = (XML_Char *)tag->buf; + for (;;) { +- int bufSize; + int convLen; + const enum XML_Convert_Result convert_res + = XmlConvert(enc, &fromPtr, rawNameEnd, (ICHAR **)&toPtr, +@@ -3492,7 +3491,7 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, + tag->name.strLen = convLen; + break; + } +- bufSize = (int)(tag->bufEnd - tag->buf) * 2; ++ const size_t bufSize = (size_t)(tag->bufEnd - tag->buf) * 2; + { + char *temp = (char *)REALLOC(parser, tag->buf, bufSize); + if (temp == NULL) +-- +2.45.4 + + +From 3776e1554b8b9506387ec8a0591560898fb1ef87 Mon Sep 17 00:00:00 2001 +From: Matthew Fernandez +Date: Thu, 2 Oct 2025 17:15:15 -0700 +Subject: [PATCH 3/3] lib: Introduce an integer overflow check for tag buffer + reallocation + +Suggested-by: Sebastian Pipping +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/libexpat/libexpat/pull/1075.patch +--- + lib/xmlparse.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/xmlparse.c b/lib/xmlparse.c +index ed505b7..0bf913c 100644 +--- a/lib/xmlparse.c ++++ b/lib/xmlparse.c +@@ -3491,6 +3491,8 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, + tag->name.strLen = convLen; + break; + } ++ if (SIZE_MAX / 2 < (size_t)(tag->bufEnd - tag->buf)) ++ return XML_ERROR_NO_MEMORY; + const size_t bufSize = (size_t)(tag->bufEnd - tag->buf) * 2; + { + char *temp = (char *)REALLOC(parser, tag->buf, bufSize); +-- +2.45.4 + diff --git a/SPECS/expat/expat.spec b/SPECS/expat/expat.spec index 7b2096bedeb..271161533bd 100644 --- a/SPECS/expat/expat.spec +++ b/SPECS/expat/expat.spec @@ -2,7 +2,7 @@ Summary: An XML parser library Name: expat Version: 2.6.4 -Release: 2%{?dist} +Release: 4%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -11,6 +11,8 @@ URL: https://libexpat.github.io/ Source0: https://github.com/libexpat/libexpat/releases/download/R_%{underscore_version}/%{name}-%{version}.tar.bz2 Patch0: CVE-2024-8176.patch Patch1: CVE-2025-59375.patch +Patch2: CVE-2026-24515.patch +Patch3: CVE-2026-25210.patch Requires: %{name}-libs = %{version}-%{release} %description @@ -68,6 +70,12 @@ rm -rf %{buildroot}/%{_docdir}/%{name} %{_libdir}/libexpat.so.1* %changelog +* Mon Feb 02 2026 Azure Linux Security Servicing Account - 2.6.4-4 +- Patch for CVE-2026-25210 + +* Tue Jan 27 2026 Azure Linux Security Servicing Account - 2.6.4-3 +- Patch for CVE-2026-24515 + * Tue Sep 23 2025 Akhila Guruju - 2.6.4-2 - Fix CVE-2025-59375 with a patch diff --git a/SPECS/flannel/CVE-2025-65637.patch b/SPECS/flannel/CVE-2025-65637.patch new file mode 100644 index 00000000000..09ae386d966 --- /dev/null +++ b/SPECS/flannel/CVE-2025-65637.patch @@ -0,0 +1,136 @@ +From ea59e8e56bd6658b7ac5398ae1e5d1afa508e51d Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 1/2] This commit fixes a potential denial of service + vulnerability in logrus.Writer() that could be triggered by logging text + longer than 64kb without newlines. Previously, the bufio.Scanner used by + Writer() would hang indefinitely when reading such text without newlines, + causing the application to become unresponsive. + +--- + vendor/github.com/sirupsen/logrus/writer.go | 33 ++++++++++++++++++++- + 1 file changed, 32 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 72e8e3a..36032d0 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -4,6 +4,7 @@ import ( + "bufio" + "io" + "runtime" ++ "strings" + ) + + // Writer at INFO level. See WriterLevel for details. +@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) + } + ++// Writer returns an io.Writer that writes to the logger at the info log level + func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) + } + ++// WriterLevel returns an io.Writer that writes to the logger at the given log level + func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + ++ // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace +@@ -48,23 +52,50 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + printFunc = entry.Print + } + ++ // Start a new goroutine to scan the input and write it to the logger using the specified print function. ++ // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) ++ ++ // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer + } + ++// writerScanner scans the input from the reader and writes it to the logger + func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) ++ ++ // Set the buffer size to the maximum token size to avoid buffer overflows ++ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) ++ ++ // Define a split function to split the input into chunks of up to 64KB ++ chunkSize := 64 * 1024 // 64KB ++ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { ++ if len(data) > chunkSize { ++ return chunkSize, data[:chunkSize], nil ++ } ++ return 0, nil, nil ++ } ++ ++ //Use the custom split function to split the input ++ scanner.Split(splitFunc) ++ ++ // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { +- printFunc(scanner.Text()) ++ printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } ++ ++ // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } ++ ++ // Close the reader when we are done + reader.Close() + } + ++// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected + func writerFinalizer(writer *io.PipeWriter) { + writer.Close() + } +-- +2.45.4 + + +From c673bd5bba11b57488a4aa0478caa78893c772dc Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 2/2] Scan text in 64KB chunks + +This commit fixes a potential denial of service +vulnerability in logrus.Writer() that could be +triggered by logging text longer than 64KB +without newlines. Previously, the bufio.Scanner +used by Writer() would hang indefinitely when +reading such text without newlines, causing the +application to become unresponsive. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/sirupsen/logrus/pull/1376.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 36032d0..7e7703c 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -75,7 +75,8 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } +- return 0, nil, nil ++ ++ return len(data), data, nil + } + + //Use the custom split function to split the input +-- +2.45.4 + diff --git a/SPECS/flannel/flannel.spec b/SPECS/flannel/flannel.spec index f332b8786ea..cbaab6e41cd 100644 --- a/SPECS/flannel/flannel.spec +++ b/SPECS/flannel/flannel.spec @@ -3,7 +3,7 @@ Summary: Simple and easy way to configure a layer 3 network fabric designed for Kubernetes Name: flannel Version: 0.24.2 -Release: 21%{?dist} +Release: 24%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -16,9 +16,10 @@ Patch1: CVE-2023-44487.patch Patch2: CVE-2023-45288.patch Patch3: CVE-2025-30204.patch Patch4: CVE-2024-51744.patch +Patch5: CVE-2025-65637.patch BuildRequires: gcc BuildRequires: glibc-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: golang < 1.25 BuildRequires: kernel-headers @@ -52,6 +53,15 @@ install -p -m 755 -t %{buildroot}%{_bindir} ./dist/flanneld %{_bindir}/flanneld %changelog +* Thu Jan 22 2026 Kanishk Bansal - 0.24.2-24 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 0.24.2-23 +- Bump to rebuild with updated glibc + +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 0.24.2-22 +- Patch for CVE-2025-65637 + * Mon Nov 10 2025 Andrew Phelps - 0.24.2-21 - Bump to rebuild with updated glibc diff --git a/SPECS/fluent-bit/CVE-2025-12969.patch b/SPECS/fluent-bit/CVE-2025-12969.patch new file mode 100644 index 00000000000..1515d2a49cb --- /dev/null +++ b/SPECS/fluent-bit/CVE-2025-12969.patch @@ -0,0 +1,321 @@ +From e42b5cebbdd3d9c702babf446a3143abf78e2d87 Mon Sep 17 00:00:00 2001 +From: Hiroshi Hatake +Date: Fri, 3 Oct 2025 16:14:34 +0900 +Subject: [PATCH] in_forward: Handle shared_key lifetime correctly + +Signed-off-by: Hiroshi Hatake + +Upstream Patch reference: https://patch-diff.githubusercontent.com/raw/fluent/fluent-bit/pull/10973.diff +--- + plugins/in_forward/fw.c | 10 +++ + plugins/in_forward/fw.h | 4 +- + plugins/in_forward/fw_config.c | 41 +++++++-- + plugins/in_forward/fw_conn.c | 13 ++- + tests/runtime/in_forward.c | 156 +++++++++++++++++++++++++++++++++ + 5 files changed, 215 insertions(+), 9 deletions(-) + +diff --git a/plugins/in_forward/fw.c b/plugins/in_forward/fw.c +index 20c3946..9b55409 100644 +--- a/plugins/in_forward/fw.c ++++ b/plugins/in_forward/fw.c +@@ -334,6 +334,16 @@ static int in_fw_init(struct flb_input_instance *ins, + return -1; + } + ++ /* Users-only configuration must be rejected unless a (possibly empty) shared key is enabled. */ ++ if (mk_list_size(&ctx->users) > 0 && ++ ctx->shared_key == NULL && ++ ctx->empty_shared_key == FLB_FALSE) { ++ flb_plg_error(ctx->ins, "security.users is set but no shared_key or empty_shared_key"); ++ delete_users(ctx); ++ fw_config_destroy(ctx); ++ return -1; ++ } ++ + flb_input_downstream_set(ctx->downstream, ctx->ins); + + flb_net_socket_nonblocking(ctx->downstream->server_fd); +diff --git a/plugins/in_forward/fw.h b/plugins/in_forward/fw.h +index 4cd270e..455826a 100644 +--- a/plugins/in_forward/fw.h ++++ b/plugins/in_forward/fw.h +@@ -60,9 +60,11 @@ struct flb_in_fw_config { + flb_sds_t unix_perm_str; /* Permission (config map) */ + + /* secure forward */ +- flb_sds_t shared_key; /* shared key */ ++ flb_sds_t shared_key; /* shared key */ ++ int owns_shared_key; /* own flag of shared key */ + flb_sds_t self_hostname; /* hostname used in certificate */ + struct mk_list users; /* username and password pairs */ ++ int empty_shared_key; /* use an empty string as shared key */ + + int coll_fd; + struct flb_downstream *downstream; /* Client manager */ +diff --git a/plugins/in_forward/fw_config.c b/plugins/in_forward/fw_config.c +index b3a2293..235ee8c 100644 +--- a/plugins/in_forward/fw_config.c ++++ b/plugins/in_forward/fw_config.c +@@ -26,6 +26,35 @@ + #include "fw_conn.h" + #include "fw_config.h" + ++static void fw_destroy_shared_key(struct flb_in_fw_config *config) ++{ ++ if (config->owns_shared_key && config->shared_key) { ++ flb_sds_destroy(config->shared_key); ++ } ++ ++ config->shared_key = NULL; ++ config->owns_shared_key = FLB_FALSE; ++} ++ ++static int fw_create_empty_shared_key(struct flb_in_fw_config *config, ++ struct flb_input_instance *i_ins) ++{ ++ flb_sds_t empty_key = flb_sds_create(""); ++ if (!empty_key) { ++ flb_plg_error(i_ins, "empty shared_key alloc failed"); ++ return -1; ++ } ++ else { ++ if (config->owns_shared_key && config->shared_key) { ++ flb_sds_destroy(config->shared_key); ++ } ++ config->shared_key = empty_key; ++ config->owns_shared_key = FLB_TRUE; ++ } ++ ++ return 0; ++} ++ + struct flb_in_fw_config *fw_config_init(struct flb_input_instance *i_ins) + { + char tmp[16]; +@@ -86,12 +115,10 @@ struct flb_in_fw_config *fw_config_init(struct flb_input_instance *i_ins) + } + + /* Shared Key */ +- p = flb_input_get_property("shared_key", i_ins); +- if (p) { +- config->shared_key = flb_sds_create(p); +- } +- else { +- config->shared_key = NULL; ++ if (config->empty_shared_key) { ++ if (fw_create_empty_shared_key(config, i_ins) == -1) { ++ return NULL; ++ } + } + + /* Self Hostname */ +@@ -132,7 +159,7 @@ int fw_config_destroy(struct flb_in_fw_config *config) + flb_free(config->tcp_port); + } + +- flb_sds_destroy(config->shared_key); ++ fw_destroy_shared_key(config); + flb_sds_destroy(config->self_hostname); + + flb_free(config); +diff --git a/plugins/in_forward/fw_conn.c b/plugins/in_forward/fw_conn.c +index 9446525..90219d0 100644 +--- a/plugins/in_forward/fw_conn.c ++++ b/plugins/in_forward/fw_conn.c +@@ -142,7 +142,18 @@ struct fw_conn *fw_conn_add(struct flb_connection *connection, struct flb_in_fw_ + } + + conn->handshake_status = FW_HANDSHAKE_ESTABLISHED; +- if (ctx->shared_key != NULL) { ++ /* ++ * Always force the secure-forward handshake when: ++ * - a shared key is configured, or ++ * - empty_shared_key is enabled (empty string shared key), or ++ * - user authentication is configured (users > 0). ++ * ++ * This closes the gap where "users-only" previously skipped authentication entirely. ++ */ ++ conn->handshake_status = FW_HANDSHAKE_ESTABLISHED; /* default */ ++ if (ctx->shared_key != NULL || ++ ctx->empty_shared_key == FLB_TRUE || ++ mk_list_size(&ctx->users) > 0) { + conn->handshake_status = FW_HANDSHAKE_HELO; + helo = flb_malloc(sizeof(struct flb_in_fw_helo)); + if (!helo) { +diff --git a/tests/runtime/in_forward.c b/tests/runtime/in_forward.c +index 6cabfa9..fdef739 100644 +--- a/tests/runtime/in_forward.c ++++ b/tests/runtime/in_forward.c +@@ -566,6 +566,158 @@ void flb_test_unix_perm() + #endif /* FLB_HAVE_UNIX_SOCKET */ + + ++static int cb_count_only(void *record, size_t size, void *data) ++{ ++ int n = get_output_num(); ++ set_output_num(n + 1); ++ flb_free(record); ++ return 0; ++} ++ ++ ++static flb_ctx_t *fw_make_ctx_with_forward(int *in_ffd_out, int *out_ffd_out) ++{ ++ struct flb_lib_out_cb cb = {0}; ++ flb_ctx_t *ctx; ++ int in_ffd, out_ffd, ret; ++ ++ ctx = flb_create(); ++ TEST_CHECK(ctx != NULL); ++ if (!ctx) { return NULL; } ++ ++ flb_service_set(ctx, ++ "Flush", "0.200000000", ++ "Grace", "1", ++ "Log_Level", "error", ++ NULL); ++ ++ /* forward input */ ++ in_ffd = flb_input(ctx, (char *) "forward", NULL); ++ TEST_CHECK(in_ffd >= 0); ++ if (in_ffd < 0) { flb_destroy(ctx); return NULL; } ++ ++ /* lib output: count only (no payload check) */ ++ cb.cb = cb_count_only; ++ cb.data = NULL; ++ out_ffd = flb_output(ctx, (char *) "lib", (void *) &cb); ++ TEST_CHECK(out_ffd >= 0); ++ if (out_ffd < 0) { ++ flb_destroy(ctx); ++ return NULL; ++ } ++ ret = flb_output_set(ctx, out_ffd, ++ "match", "*", ++ "format", "json", ++ NULL); ++ TEST_CHECK(ret == 0); ++ ++ if (in_ffd_out) *in_ffd_out = in_ffd; ++ if (out_ffd_out) *out_ffd_out = out_ffd; ++ return ctx; ++} ++ ++/* 1) users-only => must fail to start (fail-close) */ ++void flb_test_fw_auth_users_only_fail_start() ++{ ++ flb_ctx_t *ctx; ++ int in_ffd, out_ffd, ret; ++ ++ ctx = fw_make_ctx_with_forward(&in_ffd, &out_ffd); ++ TEST_CHECK(ctx != NULL); ++ if (!ctx) { ++ return; ++ } ++ ++ ret = flb_input_set(ctx, in_ffd, ++ "tag", "test", ++ "security.users", "alice s3cr3t", ++ NULL); ++ TEST_CHECK(ret == 0); ++ ++ ret = flb_start(ctx); ++ TEST_CHECK(ret != 0); ++ if (ret == 0) { ++ TEST_MSG("users-only config unexpectedly started; fail-close not enforced"); ++ flb_stop(ctx); ++ } ++ flb_destroy(ctx); ++} ++ ++/* 2) empty_shared_key + users => start OK */ ++void flb_test_fw_auth_empty_shared_key_plus_users_start_ok() ++{ ++ flb_ctx_t *ctx; ++ int in_ffd, out_ffd, ret; ++ ++ ctx = fw_make_ctx_with_forward(&in_ffd, &out_ffd); ++ TEST_CHECK(ctx != NULL); ++ if (!ctx) { return; } ++ ++ ret = flb_input_set(ctx, in_ffd, ++ "tag", "test", ++ "empty_shared_key", "true", ++ "security.users", "alice s3cr3t", ++ NULL); ++ TEST_CHECK(ret == 0); ++ ++ ret = flb_start(ctx); ++ TEST_CHECK(ret == 0); ++ if (ret == 0) { ++ flb_stop(ctx); ++ } ++ flb_destroy(ctx); ++} ++ ++/* 3) shared_key only => start OK (backward compatible) */ ++void flb_test_fw_auth_shared_key_only_start_ok() ++{ ++ flb_ctx_t *ctx; ++ int in_ffd, out_ffd, ret; ++ ++ ctx = fw_make_ctx_with_forward(&in_ffd, &out_ffd); ++ TEST_CHECK(ctx != NULL); ++ if (!ctx) { return; } ++ ++ ret = flb_input_set(ctx, in_ffd, ++ "tag", "test", ++ "shared_key", "k", ++ NULL); ++ TEST_CHECK(ret == 0); ++ ++ ret = flb_start(ctx); ++ TEST_CHECK(ret == 0); ++ if (ret == 0) { ++ flb_stop(ctx); ++ } ++ flb_destroy(ctx); ++} ++ ++/* 4) shared_key + users => start OK (both checks) */ ++void flb_test_fw_auth_shared_key_plus_users_start_ok() ++{ ++ flb_ctx_t *ctx; ++ int in_ffd, out_ffd, ret; ++ ++ ctx = fw_make_ctx_with_forward(&in_ffd, &out_ffd); ++ TEST_CHECK(ctx != NULL); ++ if (!ctx) { return; } ++ ++ ret = flb_input_set(ctx, in_ffd, ++ "tag", "test", ++ "shared_key", "k", ++ "security.users", "alice s3cr3t", ++ NULL); ++ TEST_CHECK(ret == 0); ++ ++ ret = flb_start(ctx); ++ TEST_CHECK(ret == 0); ++ if (ret == 0) { ++ flb_stop(ctx); ++ } ++ flb_destroy(ctx); ++} ++ ++ + TEST_LIST = { + {"forward", flb_test_forward}, + {"forward_port", flb_test_forward_port}, +@@ -574,6 +726,10 @@ TEST_LIST = { + {"unix_path", flb_test_unix_path}, + {"unix_perm", flb_test_unix_perm}, + #endif ++ {"fw_auth_users_only_fail_start", flb_test_fw_auth_users_only_fail_start}, ++ {"fw_auth_empty_shared_key_plus_users_start_ok", flb_test_fw_auth_empty_shared_key_plus_users_start_ok}, ++ {"fw_auth_shared_key_only_start_ok", flb_test_fw_auth_shared_key_only_start_ok}, ++ {"fw_auth_shared_key_plus_users_start_ok", flb_test_fw_auth_shared_key_plus_users_start_ok}, + {NULL, NULL} + }; + +-- +2.45.4 + diff --git a/SPECS/fluent-bit/CVE-2025-12977.patch b/SPECS/fluent-bit/CVE-2025-12977.patch new file mode 100644 index 00000000000..fd7c70f1d14 --- /dev/null +++ b/SPECS/fluent-bit/CVE-2025-12977.patch @@ -0,0 +1,595 @@ +From d9fa9ab839ec684efd0132d6d672a0cf961790a1 Mon Sep 17 00:00:00 2001 +From: Eduardo Silva +Date: Thu, 2 Oct 2025 10:40:39 -0600 +Subject: [PATCH] in_elasticsearch: reimplement tag_key lookup with record + accessor api + +Replace manual key lookup with record accessor pattern for better +performance and support for nested/complex key patterns. + +Signed-off-by: Eduardo Silva + +Upstream Patch reference: https://patch-diff.githubusercontent.com/raw/fluent/fluent-bit/pull/10967.diff +--- + plugins/in_elasticsearch/in_elasticsearch.h | 6 +- + .../in_elasticsearch_bulk_prot.c | 77 ++++--------- + .../in_elasticsearch_config.c | 13 +++ + plugins/in_http/http.h | 4 +- + plugins/in_http/http_config.c | 17 ++- + plugins/in_http/http_prot.c | 104 +++++++----------- + plugins/in_splunk/splunk.c | 6 + + plugins/in_splunk/splunk.h | 4 +- + plugins/in_splunk/splunk_config.c | 14 +++ + plugins/in_splunk/splunk_prot.c | 76 ++++--------- + 10 files changed, 143 insertions(+), 178 deletions(-) + +diff --git a/plugins/in_elasticsearch/in_elasticsearch.h b/plugins/in_elasticsearch/in_elasticsearch.h +index 10deb62..7911cb7 100644 +--- a/plugins/in_elasticsearch/in_elasticsearch.h ++++ b/plugins/in_elasticsearch/in_elasticsearch.h +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -35,14 +36,15 @@ + struct flb_in_elasticsearch { + flb_sds_t listen; + flb_sds_t tcp_port; +- const char *tag_key; +- const char *meta_key; ++ flb_sds_t tag_key; ++ flb_sds_t meta_key; + flb_sds_t hostname; + flb_sds_t es_version; + char cluster_name[16]; + char node_name[12]; + + struct flb_log_event_encoder *log_encoder; ++ struct flb_record_accessor *ra_tag_key; + + struct flb_input_instance *ins; + +diff --git a/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c b/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c +index c705af6..f7414a7 100644 +--- a/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c ++++ b/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c +@@ -22,6 +22,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -245,67 +247,32 @@ static int send_response(struct in_elasticsearch_bulk_conn *conn, int http_statu + /* implements functionality to get tag from key in record */ + static flb_sds_t tag_key(struct flb_in_elasticsearch *ctx, msgpack_object *map) + { +- size_t map_size = map->via.map.size; +- msgpack_object_kv *kv; +- msgpack_object key; +- msgpack_object val; +- char *key_str = NULL; +- char *val_str = NULL; +- size_t key_str_size = 0; +- size_t val_str_size = 0; +- int j; +- int check = FLB_FALSE; +- int found = FLB_FALSE; +- flb_sds_t tag; +- +- kv = map->via.map.ptr; ++ flb_sds_t tag = NULL; ++ struct flb_ra_value *ra_val; + +- for(j=0; j < map_size; j++) { +- check = FLB_FALSE; +- found = FLB_FALSE; +- key = (kv+j)->key; +- if (key.type == MSGPACK_OBJECT_BIN) { +- key_str = (char *) key.via.bin.ptr; +- key_str_size = key.via.bin.size; +- check = FLB_TRUE; +- } +- if (key.type == MSGPACK_OBJECT_STR) { +- key_str = (char *) key.via.str.ptr; +- key_str_size = key.via.str.size; +- check = FLB_TRUE; +- } +- +- if (check == FLB_TRUE) { +- if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) { +- val = (kv+j)->val; +- if (val.type == MSGPACK_OBJECT_BIN) { +- val_str = (char *) val.via.bin.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- if (val.type == MSGPACK_OBJECT_STR) { +- val_str = (char *) val.via.str.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- } +- } ++ /* If no record accessor is configured, return NULL */ ++ if (!ctx->ra_tag_key) { ++ return NULL; + } + +- if (found == FLB_TRUE) { +- tag = flb_sds_create_len(val_str, val_str_size); +- if (!tag) { +- flb_errno(); +- return NULL; +- } +- return tag; ++ /* Use record accessor to get the value */ ++ ra_val = flb_ra_get_value_object(ctx->ra_tag_key, *map); ++ if (!ra_val) { ++ flb_plg_warn(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); ++ return NULL; + } + ++ /* Convert the value to string */ ++ if (ra_val->type == FLB_RA_STRING) { ++ tag = flb_sds_create_len(ra_val->o.via.str.ptr, ra_val->o.via.str.size); ++ } ++ else { ++ flb_plg_error(ctx->ins, "tag_key %s value is not a string or binary", ctx->tag_key); ++ } + +- flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); +- return NULL; ++ /* Clean up the record accessor value */ ++ flb_ra_key_value_destroy(ra_val); ++ return tag; + } + + static int get_write_op(struct flb_in_elasticsearch *ctx, msgpack_object *map, flb_sds_t *out_write_op, size_t *out_key_size) +diff --git a/plugins/in_elasticsearch/in_elasticsearch_config.c b/plugins/in_elasticsearch/in_elasticsearch_config.c +index 56b5a4e..18da268 100644 +--- a/plugins/in_elasticsearch/in_elasticsearch_config.c ++++ b/plugins/in_elasticsearch/in_elasticsearch_config.c +@@ -67,12 +67,25 @@ struct flb_in_elasticsearch *in_elasticsearch_config_create(struct flb_input_ins + return NULL; + } + ++ /* Create record accessor for tag_key if specified */ ++ if (ctx->tag_key) { ++ ctx->ra_tag_key = flb_ra_create(ctx->tag_key, FLB_TRUE); ++ if (!ctx->ra_tag_key) { ++ flb_plg_error(ctx->ins, "invalid record accessor pattern for tag_key: %s", ctx->tag_key); ++ in_elasticsearch_config_destroy(ctx); ++ return NULL; ++ } ++ } + + return ctx; + } + + int in_elasticsearch_config_destroy(struct flb_in_elasticsearch *ctx) + { ++ if (ctx->ra_tag_key) { ++ flb_ra_destroy(ctx->ra_tag_key); ++ } ++ + flb_log_event_encoder_destroy(ctx->log_encoder); + + /* release all connections */ +diff --git a/plugins/in_http/http.h b/plugins/in_http/http.h +index 4298a37..2e37967 100644 +--- a/plugins/in_http/http.h ++++ b/plugins/in_http/http.h +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -36,7 +37,8 @@ struct flb_http { + int successful_response_code; + flb_sds_t listen; + flb_sds_t tcp_port; +- const char *tag_key; ++ flb_sds_t tag_key; ++ struct flb_record_accessor *ra_tag_key; + + /* Success HTTP headers */ + struct mk_list *success_headers; +diff --git a/plugins/in_http/http_config.c b/plugins/in_http/http_config.c +index 343e699..eb3afc8 100644 +--- a/plugins/in_http/http_config.c ++++ b/plugins/in_http/http_config.c +@@ -69,9 +69,7 @@ struct flb_http *http_config_create(struct flb_input_instance *ins) + + if (ret != FLB_EVENT_ENCODER_SUCCESS) { + flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret); +- + http_config_destroy(ctx); +- + return NULL; + } + +@@ -79,10 +77,19 @@ struct flb_http *http_config_create(struct flb_input_instance *ins) + + if (ctx->success_headers_str == NULL) { + http_config_destroy(ctx); +- + return NULL; + } + ++ /* Create record accessor for tag_key if specified */ ++ if (ctx->tag_key) { ++ ctx->ra_tag_key = flb_ra_create(ctx->tag_key, FLB_TRUE); ++ if (!ctx->ra_tag_key) { ++ flb_plg_error(ctx->ins, "invalid record accessor pattern for tag_key: %s", ctx->tag_key); ++ http_config_destroy(ctx); ++ return NULL; ++ } ++ } ++ + flb_config_map_foreach(header_iterator, header_pair, ctx->success_headers) { + header_name = mk_list_entry_first(header_pair->val.list, + struct flb_slist_entry, +@@ -126,6 +133,10 @@ struct flb_http *http_config_create(struct flb_input_instance *ins) + + int http_config_destroy(struct flb_http *ctx) + { ++ if (ctx->ra_tag_key) { ++ flb_ra_destroy(ctx->ra_tag_key); ++ } ++ + /* release all connections */ + http_conn_release_all(ctx); + +diff --git a/plugins/in_http/http_prot.c b/plugins/in_http/http_prot.c +index f38264e..5d9bc8a 100644 +--- a/plugins/in_http/http_prot.c ++++ b/plugins/in_http/http_prot.c +@@ -21,6 +21,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -36,8 +38,8 @@ static inline char hex2nibble(char c) + if ((c >= 0x30) && (c <= '9')) { + return c - 0x30; + } +- // 0x30-0x39 are digits, 0x41-0x46 A-F, +- // so there is a gap at 0x40 ++ ++ /* 0x30-0x39 are digits, 0x41-0x46 A-F, so there is a gap at 0x40 */ + if ((c >= 'A') && (c <= 'F')) { + return (c - 'A') + 10; + } +@@ -145,70 +147,54 @@ static int send_response(struct http_conn *conn, int http_status, char *message) + return 0; + } + +-/* implements functionality to get tag from key in record */ +-static flb_sds_t tag_key(struct flb_http *ctx, msgpack_object *map) ++static void sanitize_tag(flb_sds_t tag) + { +- size_t map_size = map->via.map.size; +- msgpack_object_kv *kv; +- msgpack_object key; +- msgpack_object val; +- char *key_str = NULL; +- char *val_str = NULL; +- size_t key_str_size = 0; +- size_t val_str_size = 0; +- int j; +- int check = FLB_FALSE; +- int found = FLB_FALSE; +- flb_sds_t tag; ++ size_t i; + +- kv = map->via.map.ptr; ++ if (!tag) { ++ return; ++ } + +- for(j=0; j < map_size; j++) { +- check = FLB_FALSE; +- found = FLB_FALSE; +- key = (kv+j)->key; +- if (key.type == MSGPACK_OBJECT_BIN) { +- key_str = (char *) key.via.bin.ptr; +- key_str_size = key.via.bin.size; +- check = FLB_TRUE; +- } +- if (key.type == MSGPACK_OBJECT_STR) { +- key_str = (char *) key.via.str.ptr; +- key_str_size = key.via.str.size; +- check = FLB_TRUE; ++ for (i = 0; i < flb_sds_len(tag); i++) { ++ if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') { ++ tag[i] = '_'; + } ++ } ++} + +- if (check == FLB_TRUE) { +- if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) { +- val = (kv+j)->val; +- if (val.type == MSGPACK_OBJECT_BIN) { +- val_str = (char *) val.via.bin.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- if (val.type == MSGPACK_OBJECT_STR) { +- val_str = (char *) val.via.str.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- } +- } ++/* implements functionality to get tag from key in record */ ++static flb_sds_t tag_key(struct flb_http *ctx, msgpack_object *map) ++{ ++ struct flb_ra_value *ra_val; ++ flb_sds_t tag = NULL; ++ ++ /* If no record accessor is configured, return NULL */ ++ if (!ctx->ra_tag_key) { ++ return NULL; + } + +- if (found == FLB_TRUE) { +- tag = flb_sds_create_len(val_str, val_str_size); +- if (!tag) { +- flb_errno(); +- return NULL; ++ /* Use record accessor to get the value */ ++ ra_val = flb_ra_get_value_object(ctx->ra_tag_key, *map); ++ if (!ra_val) { ++ flb_plg_debug(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); ++ return NULL; ++ } ++ ++ /* Convert the value to string */ ++ if (ra_val->type == FLB_RA_STRING) { ++ tag = flb_sds_create_len(ra_val->o.via.str.ptr, ra_val->o.via.str.size); ++ if (tag) { ++ sanitize_tag(tag); + } +- return tag; ++ } ++ else { ++ flb_plg_debug(ctx->ins, "tag_key %s value is not a string", ctx->tag_key); + } + ++ /* Clean up the record accessor value */ ++ flb_ra_key_value_destroy(ra_val); + +- flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); +- return NULL; ++ return tag; + } + + static int process_pack_record(struct flb_http *ctx, struct flb_time *tm, +@@ -586,7 +572,6 @@ int http_prot_handle(struct flb_http *ctx, struct http_conn *conn, + struct mk_http_session *session, + struct mk_http_request *request) + { +- int i; + int ret; + int len; + char *uri; +@@ -635,12 +620,7 @@ int http_prot_handle(struct flb_http *ctx, struct http_conn *conn, + /* New tag skipping the URI '/' */ + flb_sds_cat_safe(&tag, uri + 1, len - 1); + +- /* Sanitize, only allow alphanum chars */ +- for (i = 0; i < flb_sds_len(tag); i++) { +- if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') { +- tag[i] = '_'; +- } +- } ++ sanitize_tag(tag); + } + + mk_mem_free(uri); +diff --git a/plugins/in_splunk/splunk.c b/plugins/in_splunk/splunk.c +index b10dfa8..edab642 100644 +--- a/plugins/in_splunk/splunk.c ++++ b/plugins/in_splunk/splunk.c +@@ -133,6 +133,9 @@ static int in_splunk_init(struct flb_input_instance *ins, + ctx->http_server.request_callback = splunk_prot_handle_ng; + + flb_input_downstream_set(ctx->http_server.downstream, ctx->ins); ++ ++ flb_plg_info(ctx->ins, "listening on %s:%u", ++ ins->host.listen, ins->host.port); + } + else { + ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP, +@@ -155,6 +158,8 @@ static int in_splunk_init(struct flb_input_instance *ins, + + flb_input_downstream_set(ctx->downstream, ctx->ins); + ++ flb_plg_info(ctx->ins, "listening on %s:%s", ctx->listen, ctx->tcp_port); ++ + /* Collect upon data available on the standard input */ + ret = flb_input_set_collector_socket(ins, + in_splunk_collect, +@@ -170,6 +175,7 @@ static int in_splunk_init(struct flb_input_instance *ins, + ctx->collector_id = ret; + } + ++ + return 0; + } + +diff --git a/plugins/in_splunk/splunk.h b/plugins/in_splunk/splunk.h +index 5dc4645..ef04edf 100644 +--- a/plugins/in_splunk/splunk.h ++++ b/plugins/in_splunk/splunk.h +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -41,7 +42,8 @@ struct flb_splunk_tokens { + struct flb_splunk { + flb_sds_t listen; + flb_sds_t tcp_port; +- const char *tag_key; ++ flb_sds_t tag_key; ++ struct flb_record_accessor *ra_tag_key; + + /* Success HTTP headers */ + struct mk_list *success_headers; +diff --git a/plugins/in_splunk/splunk_config.c b/plugins/in_splunk/splunk_config.c +index a7c5886..3907d5a 100644 +--- a/plugins/in_splunk/splunk_config.c ++++ b/plugins/in_splunk/splunk_config.c +@@ -229,11 +229,25 @@ struct flb_splunk *splunk_config_create(struct flb_input_instance *ins) + } + } + ++ /* Create record accessor for tag_key if specified */ ++ if (ctx->tag_key) { ++ ctx->ra_tag_key = flb_ra_create(ctx->tag_key, FLB_TRUE); ++ if (!ctx->ra_tag_key) { ++ flb_plg_error(ctx->ins, "invalid record accessor pattern for tag_key: %s", ctx->tag_key); ++ splunk_config_destroy(ctx); ++ return NULL; ++ } ++ } ++ + return ctx; + } + + int splunk_config_destroy(struct flb_splunk *ctx) + { ++ if (ctx->ra_tag_key) { ++ flb_ra_destroy(ctx->ra_tag_key); ++ } ++ + /* release all connections */ + splunk_conn_release_all(ctx); + +diff --git a/plugins/in_splunk/splunk_prot.c b/plugins/in_splunk/splunk_prot.c +index cf614b0..f53f24c 100644 +--- a/plugins/in_splunk/splunk_prot.c ++++ b/plugins/in_splunk/splunk_prot.c +@@ -22,6 +22,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -149,67 +151,33 @@ static int send_json_message_response(struct splunk_conn *conn, int http_status, + /* implements functionality to get tag from key in record */ + static flb_sds_t tag_key(struct flb_splunk *ctx, msgpack_object *map) + { +- size_t map_size = map->via.map.size; +- msgpack_object_kv *kv; +- msgpack_object key; +- msgpack_object val; +- char *key_str = NULL; +- char *val_str = NULL; +- size_t key_str_size = 0; +- size_t val_str_size = 0; +- int j; +- int check = FLB_FALSE; +- int found = FLB_FALSE; +- flb_sds_t tag; ++ flb_sds_t tag = NULL; ++ struct flb_ra_value *ra_val; + +- kv = map->via.map.ptr; +- +- for(j=0; j < map_size; j++) { +- check = FLB_FALSE; +- found = FLB_FALSE; +- key = (kv+j)->key; +- if (key.type == MSGPACK_OBJECT_BIN) { +- key_str = (char *) key.via.bin.ptr; +- key_str_size = key.via.bin.size; +- check = FLB_TRUE; +- } +- if (key.type == MSGPACK_OBJECT_STR) { +- key_str = (char *) key.via.str.ptr; +- key_str_size = key.via.str.size; +- check = FLB_TRUE; +- } ++ /* If no record accessor is configured, return NULL */ ++ if (!ctx->ra_tag_key) { ++ return NULL; ++ } + +- if (check == FLB_TRUE) { +- if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) { +- val = (kv+j)->val; +- if (val.type == MSGPACK_OBJECT_BIN) { +- val_str = (char *) val.via.bin.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- if (val.type == MSGPACK_OBJECT_STR) { +- val_str = (char *) val.via.str.ptr; +- val_str_size = val.via.str.size; +- found = FLB_TRUE; +- break; +- } +- } +- } ++ /* Use record accessor to get the value */ ++ ra_val = flb_ra_get_value_object(ctx->ra_tag_key, *map); ++ if (!ra_val) { ++ flb_plg_debug(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); ++ return NULL; + } + +- if (found == FLB_TRUE) { +- tag = flb_sds_create_len(val_str, val_str_size); +- if (!tag) { +- flb_errno(); +- return NULL; +- } +- return tag; ++ /* Convert the value to string */ ++ if (ra_val->type == FLB_RA_STRING) { ++ tag = flb_sds_create_len(ra_val->o.via.str.ptr, ra_val->o.via.str.size); ++ } ++ else { ++ flb_plg_debug(ctx->ins, "tag_key %s value is not a string", ctx->tag_key); + } + ++ /* Clean up the record accessor value */ ++ flb_ra_key_value_destroy(ra_val); + +- flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key); +- return NULL; ++ return tag; + } + + /* +-- +2.45.4 + diff --git a/SPECS/fluent-bit/CVE-2025-62408.patch b/SPECS/fluent-bit/CVE-2025-62408.patch new file mode 100644 index 00000000000..1e00daf2a52 --- /dev/null +++ b/SPECS/fluent-bit/CVE-2025-62408.patch @@ -0,0 +1,357 @@ +From 714bf5675c541bd1e668a8db8e67ce012651e618 Mon Sep 17 00:00:00 2001 +From: Brad House +Date: Mon, 8 Dec 2025 10:12:08 -0500 +Subject: [PATCH] Merge commit from fork + +* reproducer test case + +* enqueue callbacks to be processed within process_answer rather than calling directly as that may lead to the connection being destroyed + +* combine requeue and endqueue into a single queue with a flag to simplify the implementation + +* add back queue notification + +Modified to apply to Azure Linux +Upstream Patch Reference: https://github.com/c-ares/c-ares/commit/714bf5675c541bd1e668a8db8e67ce012651e618.patch +--- + lib/c-ares-1.33.1/src/lib/ares_private.h | 10 +-- + lib/c-ares-1.33.1/src/lib/ares_process.c | 97 ++++++++++++++++----- + lib/c-ares-1.33.1/src/lib/ares_qcache.c | 20 +++-- + lib/c-ares-1.33.1/test/ares-test-mock-ai.cc | 32 +++++++ + 4 files changed, 125 insertions(+), 34 deletions(-) + +diff --git a/lib/c-ares-1.33.1/src/lib/ares_private.h b/lib/c-ares-1.33.1/src/lib/ares_private.h +index 2605c9e..c1bf536 100644 +--- a/lib/c-ares-1.33.1/src/lib/ares_private.h ++++ b/lib/c-ares-1.33.1/src/lib/ares_private.h +@@ -466,7 +466,7 @@ ares_status_t ares__requeue_query(ares_query_t *query, + const ares_timeval_t *now, + ares_status_t status, + ares_bool_t inc_try_count, +- const ares_dns_record_t *dnsrec, ++ ares_dns_record_t *dnsrec, + ares__array_t **requeue); + + /*! Count the number of labels (dots+1) in a domain */ +@@ -764,10 +764,10 @@ ares_status_t ares__qcache_create(ares_rand_state *rand_state, + unsigned int max_ttl, + ares__qcache_t **cache_out); + void ares__qcache_flush(ares__qcache_t *cache); +-ares_status_t ares_qcache_insert(ares_channel_t *channel, +- const ares_timeval_t *now, +- const ares_query_t *query, +- ares_dns_record_t *dnsrec); ++ares_status_t ares_qcache_insert(ares_channel_t *channel, ++ const ares_timeval_t *now, ++ const ares_query_t *query, ++ const ares_dns_record_t *dnsrec); + ares_status_t ares_qcache_fetch(ares_channel_t *channel, + const ares_timeval_t *now, + const ares_dns_record_t *dnsrec, +diff --git a/lib/c-ares-1.33.1/src/lib/ares_process.c b/lib/c-ares-1.33.1/src/lib/ares_process.c +index e84c36a..fa042dd 100644 +--- a/lib/c-ares-1.33.1/src/lib/ares_process.c ++++ b/lib/c-ares-1.33.1/src/lib/ares_process.c +@@ -66,7 +66,8 @@ static ares_bool_t same_address(const struct sockaddr *sa, + const struct ares_addr *aa); + static void end_query(ares_channel_t *channel, ares_server_t *server, + ares_query_t *query, ares_status_t status, +- const ares_dns_record_t *dnsrec); ++ ares_dns_record_t *dnsrec, ++ ares__array_t **requeue); + + static void ares__query_disassociate_from_conn(ares_query_t *query) + { +@@ -300,16 +301,27 @@ static void write_tcp_data(ares_channel_t *channel, fd_set *write_fds, + } + } + ++typedef enum { ++ REQUEUE_REQUEUE = 1, ++ REQUEUE_ENDQUERY = 2 ++} requeue_type_t; ++ + /* Simple data structure to store a query that needs to be requeued with + * optional server */ + typedef struct { +- unsigned short qid; +- ares_server_t *server; /* optional */ ++ requeue_type_t type; /* type of entry, requeue or endquery */ ++ unsigned short qid; /* query id */ ++ ares_server_t *server; /* requeue only: optional */ ++ ares_status_t status; /* endquery only */ ++ ares_dns_record_t *dnsrec; /* endquery only: optional */ + } ares_requeue_t; + +-static ares_status_t ares_append_requeue(ares__array_t **requeue, +- ares_query_t *query, +- ares_server_t *server) ++static ares_status_t ares_append_requeue_int(ares__array_t **requeue, ++ requeue_type_t type, ++ ares_query_t *query, ++ ares_server_t *server, ++ ares_status_t status, ++ ares_dns_record_t *dnsrec) + { + ares_requeue_t entry; + +@@ -322,11 +334,30 @@ static ares_status_t ares_append_requeue(ares__array_t **requeue, + + ares__query_disassociate_from_conn(query); + ++ entry.type = type; + entry.qid = query->qid; + entry.server = server; ++ entry.status = status; ++ entry.dnsrec = dnsrec; + return ares__array_insertdata_last(*requeue, &entry); + } + ++static ares_status_t ares_append_requeue(ares__array_t **requeue, ++ ares_query_t *query, ++ ares_server_t *server) ++{ ++ return ares_append_requeue_int(requeue, REQUEUE_REQUEUE, query, server, 0, ++ NULL); ++} ++ ++static ares_status_t ares_append_endqueue(ares__array_t **requeue, ++ ares_query_t *query, ++ ares_status_t status, ++ ares_dns_record_t *dnsrec) ++{ ++ return ares_append_requeue_int(requeue, REQUEUE_ENDQUERY, query, NULL, status, ++ dnsrec); ++} + + /* If any TCP socket selects true for reading, read some data, + * allocate a buffer if we finish reading the length word, and process +@@ -423,13 +454,24 @@ cleanup: + break; + } + +- /* Query disappeared */ + query = ares__htable_szvp_get_direct(channel->queries_by_qid, entry.qid); +- if (query == NULL) { +- continue; ++ if (entry.type == REQUEUE_REQUEUE) { ++ /* query disappeared */ ++ if (query == NULL) { ++ continue; ++ } ++ ares__send_query(query, now); ++ } else { /* REQUEUE_ENDQUERY */ ++ if (query != NULL) { ++ query->callback(query->arg, entry.status, query->timeouts, entry.dnsrec); ++ ares__free_query(query); ++ } ++ ares_dns_record_destroy(entry.dnsrec); + } +- +- ares__send_query(query, now); ++ } ++ /* Don't forget to send notification if queue emptied */ ++ if (requeue != NULL) { ++ ares_queue_notify_empty(channel); + } + ares__array_destroy(requeue); + } +@@ -749,7 +791,7 @@ static ares_status_t process_answer(ares_channel_t *channel, + ares_dns_get_opt_rr_const(rdnsrec) == NULL) { + status = rewrite_without_edns(query); + if (status != ARES_SUCCESS) { +- end_query(channel, server, query, status, NULL); ++ end_query(channel, server, query, status, NULL, NULL); + goto cleanup; + } + +@@ -795,6 +837,7 @@ static ares_status_t process_answer(ares_channel_t *channel, + server_increment_failures(server, query->using_tcp); + status = ares__requeue_query(query, now, status, ARES_TRUE, rdnsrec, + requeue); ++ rdnsrec = NULL; /* Free'd by ares_requeue_query() */ + + if (status != ARES_ENOMEM) { + /* Should any of these cause a connection termination? +@@ -807,12 +850,11 @@ static ares_status_t process_answer(ares_channel_t *channel, + + /* If cache insertion was successful, it took ownership. We ignore + * other cache insertion failures. */ +- if (ares_qcache_insert(channel, now, query, rdnsrec) == ARES_SUCCESS) { +- is_cached = ARES_TRUE; +- } ++ ares_qcache_insert(channel, now, query, rdnsrec); + + server_set_good(server, query->using_tcp); +- end_query(channel, server, query, ARES_SUCCESS, rdnsrec); ++ end_query(channel, server, query, ARES_SUCCESS, rdnsrec, requeue); ++ rdnsrec = NULL; /* Free'd by the requeue */ + + status = ARES_SUCCESS; + +@@ -845,7 +887,7 @@ ares_status_t ares__requeue_query(ares_query_t *query, + const ares_timeval_t *now, + ares_status_t status, + ares_bool_t inc_try_count, +- const ares_dns_record_t *dnsrec, ++ ares_dns_record_t *dnsrec, + ares__array_t **requeue) + { + ares_channel_t *channel = query->channel; +@@ -862,6 +904,7 @@ ares_status_t ares__requeue_query(ares_query_t *query, + } + + if (query->try_count < max_tries && !query->no_retries) { ++ ares_dns_record_destroy(dnsrec); + if (requeue != NULL) { + return ares_append_requeue(requeue, query, NULL); + } +@@ -873,7 +916,7 @@ ares_status_t ares__requeue_query(ares_query_t *query, + query->error_status = ARES_ETIMEOUT; + } + +- end_query(channel, NULL, query, query->error_status, dnsrec); ++ end_query(channel, NULL, query, query->error_status, dnsrec, requeue); + return ARES_ETIMEOUT; + } + +@@ -1151,7 +1194,7 @@ ares_status_t ares__send_query(ares_query_t *query, const ares_timeval_t *now) + } + + if (server == NULL) { +- end_query(channel, server, query, ARES_ENOSERVER /* ? */, NULL); ++ end_query(channel, server, query, ARES_ENOSERVER /* ? */, NULL, NULL); + return ARES_ENOSERVER; + } + +@@ -1172,7 +1215,7 @@ ares_status_t ares__send_query(ares_query_t *query, const ares_timeval_t *now) + + /* Anything else is not retryable, likely ENOMEM */ + default: +- end_query(channel, server, query, status, NULL); ++ end_query(channel, server, query, status, NULL, NULL); + return status; + } + } +@@ -1186,7 +1229,7 @@ ares_status_t ares__send_query(ares_query_t *query, const ares_timeval_t *now) + + case ARES_ENOMEM: + /* Not retryable */ +- end_query(channel, server, query, status, NULL); ++ end_query(channel, server, query, status, NULL, NULL); + return status; + + /* These conditions are retryable as they are server-specific +@@ -1220,7 +1263,7 @@ ares_status_t ares__send_query(ares_query_t *query, const ares_timeval_t *now) + ares__slist_insert(channel->queries_by_timeout, query); + if (!query->node_queries_by_timeout) { + /* LCOV_EXCL_START: OutOfMemory */ +- end_query(channel, server, query, ARES_ENOMEM, NULL); ++ end_query(channel, server, query, ARES_ENOMEM, NULL, NULL); + return ARES_ENOMEM; + /* LCOV_EXCL_STOP */ + } +@@ -1233,7 +1276,7 @@ ares_status_t ares__send_query(ares_query_t *query, const ares_timeval_t *now) + + if (query->node_queries_to_conn == NULL) { + /* LCOV_EXCL_START: OutOfMemory */ +- end_query(channel, server, query, ARES_ENOMEM, NULL); ++ end_query(channel, server, query, ARES_ENOMEM, NULL, NULL); + return ARES_ENOMEM; + /* LCOV_EXCL_STOP */ + } +@@ -1344,10 +1387,16 @@ static void ares_detach_query(ares_query_t *query) + + static void end_query(ares_channel_t *channel, ares_server_t *server, + ares_query_t *query, ares_status_t status, +- const ares_dns_record_t *dnsrec) ++ ares_dns_record_t *dnsrec, ares__array_t **requeue) + { + ares_metrics_record(query, server, status, dnsrec); + ++ /* Delay calling the query callback */ ++ if (requeue != NULL) { ++ ares_append_endqueue(requeue, query, status, dnsrec); ++ return; ++ } ++ + /* Invoke the callback. */ + query->callback(query->arg, status, query->timeouts, dnsrec); + ares__free_query(query); +diff --git a/lib/c-ares-1.33.1/src/lib/ares_qcache.c b/lib/c-ares-1.33.1/src/lib/ares_qcache.c +index 9725212..8a5b4b0 100644 +--- a/lib/c-ares-1.33.1/src/lib/ares_qcache.c ++++ b/lib/c-ares-1.33.1/src/lib/ares_qcache.c +@@ -422,10 +422,20 @@ done: + return status; + } + +-ares_status_t ares_qcache_insert(ares_channel_t *channel, +- const ares_timeval_t *now, +- const ares_query_t *query, +- ares_dns_record_t *dnsrec) ++ares_status_t ares_qcache_insert(ares_channel_t *channel, ++ const ares_timeval_t *now, ++ const ares_query_t *query, ++ const ares_dns_record_t *dnsrec) + { +- return ares__qcache_insert(channel->qcache, dnsrec, query->query, now); ++ ares_dns_record_t *dupdns = ares_dns_record_duplicate(dnsrec); ++ ares_status_t status; ++ ++ if (dupdns == NULL) { ++ return ARES_ENOMEM; ++ } ++ status = ares_qcache_insert(channel->qcache, dupdns, query->query, now); ++ if (status != ARES_SUCCESS) { ++ ares_dns_record_destroy(dupdns); ++ } ++ return status; + } +diff --git a/lib/c-ares-1.33.1/test/ares-test-mock-ai.cc b/lib/c-ares-1.33.1/test/ares-test-mock-ai.cc +index ad61b04..9771a21 100644 +--- a/lib/c-ares-1.33.1/test/ares-test-mock-ai.cc ++++ b/lib/c-ares-1.33.1/test/ares-test-mock-ai.cc +@@ -805,7 +805,39 @@ TEST_P(MockUDPChannelTestAI, TriggerResendThenConnFailEDNS) { + EXPECT_THAT(result.ai_, IncludesV6Address("2121:0000:0000:0000:0000:0000:0000:0303")); + } + ++TEST_P(MockUDPChannelTestAI, ConnectionRefusedOnSearchDomainRetry) { ++ DNSPacket badrsp4; ++ badrsp4.set_response().set_aa() ++ .add_question(new DNSQuestion("www.google.com", T_A)) ++ .set_rcode(NXDOMAIN); ++ ++ EXPECT_CALL(server_, OnRequest("www.google.com", T_A)) ++ .WillOnce(SetReplyAndFailSend(&server_, &badrsp4)); ++ ++ DNSPacket goodrsp4; ++ goodrsp4.set_response().set_aa() ++ .add_question(new DNSQuestion("www.google.com.first.com", T_A)) ++ .add_answer(new DNSARR("www.google.com.first.com", 0x0100, {0x01, 0x02, 0x03, 0x04})); ++ ++ EXPECT_CALL(server_, OnRequest("www.google.com.first.com", T_A)) ++ .WillOnce(SetReply(&server_, &goodrsp4)); ++ ++ ares_socket_functions sock_funcs; ++ memset(&sock_funcs, 0, sizeof(sock_funcs)); + ++ sock_funcs.asendv = ares_sendv_fail; ++ ++ ares_set_socket_functions(channel_, &sock_funcs, NULL); ++ ++ AddrInfoResult result; ++ struct ares_addrinfo_hints hints = {0, 0, 0, 0}; ++ hints.ai_family = AF_INET; ++ hints.ai_flags = ARES_AI_NOSORT; ++ ares_getaddrinfo(channel_, "www.google.com", NULL, &hints, ++ AddrInfoCallback, &result); ++ ++ Process(); ++} + + class MockEDNSChannelTestAI : public MockFlagsChannelOptsTestAI { + public: +-- +2.43.0 + diff --git a/SPECS/fluent-bit/fluent-bit.spec b/SPECS/fluent-bit/fluent-bit.spec index b2f7852f214..1d6a694d634 100644 --- a/SPECS/fluent-bit/fluent-bit.spec +++ b/SPECS/fluent-bit/fluent-bit.spec @@ -1,7 +1,7 @@ Summary: Fast and Lightweight Log processor and forwarder for Linux, BSD and OSX Name: fluent-bit Version: 3.1.10 -Release: 2%{?dist} +Release: 4%{?dist} License: Apache-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -16,6 +16,9 @@ Patch5: CVE-2025-31498.patch Patch6: CVE-2025-54126.patch Patch7: CVE-2025-58749.patch Patch8: CVE-2025-12970.patch +Patch9: CVE-2025-12977.patch +Patch10: CVE-2025-12969.patch +Patch11: CVE-2025-62408.patch BuildRequires: bison BuildRequires: cmake BuildRequires: cyrus-sasl-devel @@ -90,6 +93,12 @@ Development files for %{name} %{_libdir}/fluent-bit/*.so %changelog +* Wed Dec 17 2025 Azure Linux Security Servicing Account - 3.1.10-4 +- Patch for CVE-2025-62408 + +* Mon Dec 08 2025 BinduSri Adabala - 3.1.10-3 +- Patch for CVE-2025-12977 and CVE-2025-12969 + * Mon Dec 01 2025 Azure Linux Security Servicing Account - 3.1.10-2 - Patch for CVE-2025-12970 diff --git a/SPECS/frr/0001-Fix-frr-c90-complaint-error.patch b/SPECS/frr/0001-Fix-frr-c90-complaint-error.patch new file mode 100644 index 00000000000..6bd3af55d87 --- /dev/null +++ b/SPECS/frr/0001-Fix-frr-c90-complaint-error.patch @@ -0,0 +1,50 @@ +From e954ed6417af175f02bb9a80c3ba4b31b423b89b Mon Sep 17 00:00:00 2001 +From: Archana Shettigar +Date: Sat, 27 Dec 2025 22:38:28 +0530 +Subject: [PATCH] Fix frr c90 complaint error + +--- + isisd/isis_snmp.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/isisd/isis_snmp.c b/isisd/isis_snmp.c +index f9e3780..9328cf1 100644 +--- a/isisd/isis_snmp.c ++++ b/isisd/isis_snmp.c +@@ -841,6 +841,7 @@ static int isis_snmp_area_addr_lookup_exact(oid *oid_idx, size_t oid_idx_len, + struct iso_address *addr = NULL; + struct listnode *addr_node; + struct isis *isis = isis_lookup_by_vrfid(VRF_DEFAULT); ++ int res; + + if (isis == NULL) + return 0; +@@ -852,7 +853,7 @@ static int isis_snmp_area_addr_lookup_exact(oid *oid_idx, size_t oid_idx_len, + + area = listgetdata(listhead(isis->area_list)); + +- int res = isis_snmp_conv_exact(cmp_buf, sizeof(cmp_buf), &addr_len, ++ res = isis_snmp_conv_exact(cmp_buf, sizeof(cmp_buf), &addr_len, + oid_idx, oid_idx_len); + + +@@ -891,6 +892,7 @@ static int isis_snmp_area_addr_lookup_next(oid *oid_idx, size_t oid_idx_len, + struct iso_address *addr = NULL; + struct listnode *addr_node; + struct isis *isis = isis_lookup_by_vrfid(VRF_DEFAULT); ++ int res; + + if (isis == NULL) + return 0; +@@ -902,7 +904,7 @@ static int isis_snmp_area_addr_lookup_next(oid *oid_idx, size_t oid_idx_len, + + area = listgetdata(listhead(isis->area_list)); + +- int res = isis_snmp_conv_next(cmp_buf, sizeof(cmp_buf), &addr_len, ++ res = isis_snmp_conv_next(cmp_buf, sizeof(cmp_buf), &addr_len, + &try_exact, oid_idx, oid_idx_len); + + if (!res) +-- +2.45.4 + diff --git a/SPECS/frr/CVE-2025-61099.patch b/SPECS/frr/CVE-2025-61099.patch new file mode 100644 index 00000000000..05c7328ded5 --- /dev/null +++ b/SPECS/frr/CVE-2025-61099.patch @@ -0,0 +1,587 @@ +From b7d9b7aa47627b31e4b50795284408ab6de98660 Mon Sep 17 00:00:00 2001 +From: s1awwhy +Date: Sun, 24 Aug 2025 21:17:55 +0800 +Subject: [PATCH] Address CVE-2025-61099 +[PATCH 1/4] ospfd: Add null check for vty_out in check_tlv_size +[PATCH 2/4] ospfd: Fix NULL Pointer Deference when dumping link info +[PATCH 3/4] ospfd: skip subsequent tlvs after invalid length +[PATCH 4/4] ospfd: reformat check_tlv_size macro + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/FRRouting/frr/pull/19983.patch +--- + ospfd/ospf_ext.c | 334 +++++++++++++++++++++++++++++++++++------------ + ospfd/ospf_ri.c | 23 ++-- + ospfd/ospf_te.c | 23 ++-- + 3 files changed, 278 insertions(+), 102 deletions(-) + +diff --git a/ospfd/ospf_ext.c b/ospfd/ospf_ext.c +index d82c214..60eaf71 100644 +--- a/ospfd/ospf_ext.c ++++ b/ospfd/ospf_ext.c +@@ -31,6 +31,7 @@ + #include "network.h" + #include "if.h" + #include "libospf.h" /* for ospf interface types */ ++#include + + #include "ospfd/ospfd.h" + #include "ospfd/ospf_interface.h" +@@ -1704,34 +1705,49 @@ static void ospf_ext_lsa_schedule(struct ext_itf *exti, enum lsa_opcode op) + * ------------------------------------ + */ + +-#define check_tlv_size(size, msg) \ +- do { \ +- if (ntohs(tlvh->length) != size) { \ +- vty_out(vty, " Wrong %s TLV size: %d(%d). Abort!\n", \ +- msg, ntohs(tlvh->length), size); \ +- return size + TLV_HDR_SIZE; \ +- } \ ++/* Check NULL for vty. If vty is not available, dump info via zlog */ ++#define check_tlv_size(size, msg) \ ++ do { \ ++ if (ntohs(tlvh->length) != size) { \ ++ if (vty != NULL) \ ++ vty_out(vty, \ ++ " Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!\n", \ ++ msg, ntohs(tlvh->length), size); \ ++ else \ ++ zlog_debug(" Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!", \ ++ msg, ntohs(tlvh->length), size); \ ++ return OSPF_MAX_LSA_SIZE + 1; \ ++ } \ + } while (0) + + /* Cisco experimental SubTLV */ + static uint16_t show_vty_ext_link_rmt_itf_addr(struct vty *vty, +- struct tlv_header *tlvh) ++ struct tlv_header *tlvh, json_object *json) + { + struct ext_subtlv_rmt_itf_addr *top = + (struct ext_subtlv_rmt_itf_addr *)tlvh; + + check_tlv_size(EXT_SUBTLV_RMT_ITF_ADDR_SIZE, "Remote Itf. Address"); + +- vty_out(vty, +- " Remote Interface Address Sub-TLV: Length %u\n Address: %pI4\n", +- ntohs(top->header.length), &top->value); +- ++ if (!json) ++ if (vty != NULL) { ++ vty_out(vty, ++ " Remote Interface Address Sub-TLV: Length %u\n Address: %pI4\n", ++ ntohs(top->header.length), &top->value); ++ } else { ++ zlog_debug(" Remote Interface Address Sub-TLV: Length %u", ++ ntohs(top->header.length)); ++ zlog_debug(" Address: %pI4", &top->value); ++ } ++ else ++ json_object_string_addf(json, "remoteInterfaceAddress", "%pI4", ++ &top->value); + return TLV_SIZE(tlvh); + } + + /* Adjacency SID SubTLV */ + static uint16_t show_vty_ext_link_adj_sid(struct vty *vty, +- struct tlv_header *tlvh) ++ struct tlv_header *tlvh, json_object *json) + { + struct ext_subtlv_adj_sid *top = (struct ext_subtlv_adj_sid *)tlvh; + uint8_t tlv_size; +@@ -1741,21 +1757,46 @@ static uint16_t show_vty_ext_link_adj_sid(struct vty *vty, + : SID_INDEX_SIZE(EXT_SUBTLV_ADJ_SID_SIZE); + check_tlv_size(tlv_size, "Adjacency SID"); + +- vty_out(vty, +- " Adj-SID Sub-TLV: Length %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\tWeight: 0x%x\n\t%s: %u\n", +- ntohs(top->header.length), top->flags, top->mtid, top->weight, +- CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" +- : "Index", +- CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) +- ? GET_LABEL(ntohl(top->value)) +- : ntohl(top->value)); ++ if (!json) { ++ /* Add security check for vty_out. If vty is not available, dump info via zlog.*/ ++ if (vty != NULL) ++ vty_out(vty, ++ " Adj-SID Sub-TLV: Length %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\tWeight: 0x%x\n\t%s: %u\n", ++ ntohs(top->header.length), top->flags, top->mtid, top->weight, ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ else { ++ zlog_debug(" Adj-SID Sub-TLV: Length %u", ntohs(top->header.length)); ++ zlog_debug(" Flags: 0x%x", top->flags); ++ zlog_debug(" MT-ID:0x%x", top->mtid); ++ zlog_debug(" Weight: 0x%x", top->weight); ++ zlog_debug(" %s: %u", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ } ++ } else { ++ json_object_string_addf(json, "flags", "0x%x", top->flags); ++ json_object_string_addf(json, "mtID", "0x%x", top->mtid); ++ json_object_string_addf(json, "weight", "0x%x", top->weight); ++ if (CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG)) ++ json_object_int_add(json, "label", ++ GET_LABEL(ntohl(top->value))); ++ else ++ json_object_int_add(json, "index", ntohl(top->value)); ++ } + + return TLV_SIZE(tlvh); + } + + /* LAN Adjacency SubTLV */ + static uint16_t show_vty_ext_link_lan_adj_sid(struct vty *vty, +- struct tlv_header *tlvh) ++ struct tlv_header *tlvh, json_object *json) + { + struct ext_subtlv_lan_adj_sid *top = + (struct ext_subtlv_lan_adj_sid *)tlvh; +@@ -1766,57 +1807,128 @@ static uint16_t show_vty_ext_link_lan_adj_sid(struct vty *vty, + : SID_INDEX_SIZE(EXT_SUBTLV_LAN_ADJ_SID_SIZE); + check_tlv_size(tlv_size, "LAN-Adjacency SID"); + +- vty_out(vty, +- " LAN-Adj-SID Sub-TLV: Length %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\tWeight: 0x%x\n\tNeighbor ID: %pI4\n\t%s: %u\n", +- ntohs(top->header.length), top->flags, top->mtid, top->weight, +- &top->neighbor_id, +- CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" +- : "Index", +- CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) +- ? GET_LABEL(ntohl(top->value)) +- : ntohl(top->value)); ++ if (!json) { ++ /* Add security check for vty_out. If vty is not available, dump info via zlog. */ ++ if (vty != NULL) { ++ vty_out(vty, ++ " LAN-Adj-SID Sub-TLV: Length %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\tWeight: 0x%x\n\tNeighbor ID: %pI4\n\t%s: %u\n", ++ ntohs(top->header.length), top->flags, top->mtid, top->weight, ++ &top->neighbor_id, ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ } else { ++ zlog_debug(" LAN-Adj-SID Sub-TLV: Length %u", ntohs(top->header.length)); ++ zlog_debug(" Flags: 0x%x", top->flags); ++ zlog_debug(" MT-ID:0x%x", top->mtid); ++ zlog_debug(" Weight: 0x%x", top->weight); ++ zlog_debug(" Neighbor ID: %pI4", &top->neighbor_id); ++ zlog_debug(" %s: %u", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ } ++ } else { ++ json_object_string_addf(json, "flags", "0x%x", top->flags); ++ json_object_string_addf(json, "mtID", "0x%x", top->mtid); ++ json_object_string_addf(json, "weight", "0x%x", top->weight); ++ json_object_string_addf(json, "neighborID", "%pI4", ++ &top->neighbor_id); ++ if (CHECK_FLAG(top->flags, EXT_SUBTLV_LINK_ADJ_SID_VFLG)) ++ json_object_int_add(json, "label", ++ GET_LABEL(ntohl(top->value))); ++ else ++ json_object_int_add(json, "index", ntohl(top->value)); ++ } + + return TLV_SIZE(tlvh); + } + + static uint16_t show_vty_unknown_tlv(struct vty *vty, struct tlv_header *tlvh, +- size_t buf_size) ++ size_t buf_size, json_object *json) + { ++ json_object *obj; ++ ++ /* Add security check for vty_out. If vty is not available, dump info via zlog. */ + if (TLV_SIZE(tlvh) > buf_size) { +- vty_out(vty, " TLV size %d exceeds buffer size. Abort!", +- TLV_SIZE(tlvh)); ++ if (vty != NULL) ++ vty_out(vty, " TLV size %d exceeds buffer size. Abort!", TLV_SIZE(tlvh)); ++ else ++ zlog_debug(" TLV size %d exceeds buffer size. Abort!", TLV_SIZE(tlvh)); ++ + return buf_size; + } +- +- vty_out(vty, " Unknown TLV: [type(0x%x), length(0x%x)]\n", +- ntohs(tlvh->type), ntohs(tlvh->length)); ++ if (!json) ++ if (vty != NULL) { ++ vty_out(vty, " Unknown TLV: [type(0x%x), length(0x%x)]\n", ++ ntohs(tlvh->type), ntohs(tlvh->length)); ++ } else { ++ zlog_debug(" Unknown TLV: [type(0x%x), length(0x%x)]", ++ ntohs(tlvh->type), ntohs(tlvh->length)); ++ } ++ else { ++ obj = json_object_new_object(); ++ json_object_string_addf(obj, "type", "0x%x", ++ ntohs(tlvh->type)); ++ json_object_string_addf(obj, "length", "0x%x", ++ ntohs(tlvh->length)); ++ json_object_object_add(json, "unknownTLV", obj); ++ } + + return TLV_SIZE(tlvh); + } + + /* Extended Link Sub TLVs */ + static uint16_t show_vty_link_info(struct vty *vty, struct tlv_header *ext, +- size_t buf_size) ++ size_t buf_size, json_object *json) + { + struct ext_tlv_link *top = (struct ext_tlv_link *)ext; + struct tlv_header *tlvh; + uint16_t length = ntohs(top->header.length); + uint16_t sum = 0; ++ json_object *jadj = NULL, *obj = NULL; + + /* Verify that TLV length is valid against remaining buffer size */ + if (length > buf_size) { +- vty_out(vty, +- " Extended Link TLV size %d exceeds buffer size. Abort!\n", +- length); ++ /* Add security check for vty_out. If vty is not available, dump info via zlog. */ ++ if (vty != NULL) { ++ vty_out(vty, " Extended Link TLV size %d exceeds buffer size. Abort!\n", ++ length); ++ } else { ++ zlog_debug(" Extended Link TLV size %d exceeds buffer size. Abort!", ++ length); ++ } + return buf_size; + } + +- vty_out(vty, +- " Extended Link TLV: Length %u\n Link Type: 0x%x\n" +- " Link ID: %pI4\n", +- ntohs(top->header.length), top->link_type, +- &top->link_id); +- vty_out(vty, " Link data: %pI4\n", &top->link_data); ++ if (!json) { ++ /* Add security check for vty_out. If vty is not available, dump info via zlog. */ ++ if (vty != NULL) { ++ vty_out(vty, ++ " Extended Link TLV: Length %u\n Link Type: 0x%x\n" ++ " Link ID: %pI4\n", ++ ntohs(top->header.length), top->link_type, &top->link_id); ++ vty_out(vty, " Link data: %pI4\n", &top->link_data); ++ } else { ++ zlog_debug(" Extended Link TLV: Length %u", ntohs(top->header.length)); ++ zlog_debug(" Link Type: 0x%x", top->link_type); ++ zlog_debug(" Link ID: %pI4", &top->link_id); ++ zlog_debug(" Link data: %pI4", &top->link_data); ++ } ++ } else { ++ json_object_string_addf(json, "linkType", "0x%x", ++ top->link_type); ++ json_object_string_addf(json, "linkID", "%pI4", &top->link_id); ++ json_object_string_addf(json, "linkData", "%pI4", ++ &top->link_data); ++ jadj = json_object_new_array(); ++ json_object_object_add(json, "adjacencySID", jadj); ++ } ++ + + /* Skip Extended TLV and parse sub-TLVs */ + length -= EXT_TLV_LINK_SIZE; +@@ -1825,16 +1937,26 @@ static uint16_t show_vty_link_info(struct vty *vty, struct tlv_header *ext, + for (; sum < length && tlvh; tlvh = TLV_HDR_NEXT(tlvh)) { + switch (ntohs(tlvh->type)) { + case EXT_SUBTLV_ADJ_SID: +- sum += show_vty_ext_link_adj_sid(vty, tlvh); ++ if (json) { ++ obj = json_object_new_object(); ++ json_object_array_add(jadj, obj); ++ } else ++ obj = NULL; ++ sum += show_vty_ext_link_adj_sid(vty, tlvh, obj); + break; + case EXT_SUBTLV_LAN_ADJ_SID: +- sum += show_vty_ext_link_lan_adj_sid(vty, tlvh); ++ if (json) { ++ obj = json_object_new_object(); ++ json_object_array_add(jadj, obj); ++ } else ++ obj = NULL; ++ sum += show_vty_ext_link_lan_adj_sid(vty, tlvh, obj); + break; + case EXT_SUBTLV_RMT_ITF_ADDR: +- sum += show_vty_ext_link_rmt_itf_addr(vty, tlvh); ++ sum += show_vty_ext_link_rmt_itf_addr(vty, tlvh, json); + break; + default: +- sum += show_vty_unknown_tlv(vty, tlvh, length - sum); ++ sum += show_vty_unknown_tlv(vty, tlvh, length - sum, json); + break; + } + } +@@ -1849,9 +1971,12 @@ static void ospf_ext_link_show_info(struct vty *vty, struct json_object *json, + struct lsa_header *lsah = lsa->data; + struct tlv_header *tlvh; + uint16_t length = 0, sum = 0; ++ json_object *jlink = NULL; + +- if (json) +- return; ++ if (json) { ++ jlink = json_object_new_object(); ++ json_object_object_add(json, "extendedLink", jlink); ++ } + + /* Initialize TLV browsing */ + length = lsa->size - OSPF_LSA_HEADER_SIZE; +@@ -1860,10 +1985,10 @@ static void ospf_ext_link_show_info(struct vty *vty, struct json_object *json, + tlvh = TLV_HDR_NEXT(tlvh)) { + switch (ntohs(tlvh->type)) { + case EXT_TLV_LINK: +- sum += show_vty_link_info(vty, tlvh, length - sum); ++ sum += show_vty_link_info(vty, tlvh, length - sum, jlink); + break; + default: +- sum += show_vty_unknown_tlv(vty, tlvh, length - sum); ++ sum += show_vty_unknown_tlv(vty, tlvh, length - sum, jlink); + break; + } + } +@@ -1871,7 +1996,7 @@ static void ospf_ext_link_show_info(struct vty *vty, struct json_object *json, + + /* Prefix SID SubTLV */ + static uint16_t show_vty_ext_pref_pref_sid(struct vty *vty, +- struct tlv_header *tlvh) ++ struct tlv_header *tlvh, json_object *json) + { + struct ext_subtlv_prefix_sid *top = + (struct ext_subtlv_prefix_sid *)tlvh; +@@ -1882,41 +2007,87 @@ static uint16_t show_vty_ext_pref_pref_sid(struct vty *vty, + : SID_INDEX_SIZE(EXT_SUBTLV_PREFIX_SID_SIZE); + check_tlv_size(tlv_size, "Prefix SID"); + +- vty_out(vty, +- " Prefix SID Sub-TLV: Length %u\n\tAlgorithm: %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\t%s: %u\n", +- ntohs(top->header.length), top->algorithm, top->flags, +- top->mtid, +- CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) ? "Label" +- : "Index", +- CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) +- ? GET_LABEL(ntohl(top->value)) +- : ntohl(top->value)); ++ if (!json) { ++ if (vty != NULL) { ++ vty_out(vty, ++ " Prefix SID Sub-TLV: Length %u\n\tAlgorithm: %u\n\tFlags: 0x%x\n\tMT-ID:0x%x\n\t%s: %u\n", ++ ntohs(top->header.length), top->algorithm, top->flags, top->mtid, ++ CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ } else { ++ zlog_debug(" Prefix SID Sub-TLV: Length %u", ntohs(top->header.length)); ++ zlog_debug(" Algorithm: %u", top->algorithm); ++ zlog_debug(" Flags: 0x%x", top->flags); ++ zlog_debug(" MT-ID:0x%x", top->mtid); ++ zlog_debug(" %s: %u", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) ? "Label" ++ : "Index", ++ CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG) ++ ? GET_LABEL(ntohl(top->value)) ++ : ntohl(top->value)); ++ } ++ } else { ++ json_object_int_add(json, "algorithm", top->algorithm); ++ json_object_string_addf(json, "flags", "0x%x", top->flags); ++ json_object_string_addf(json, "mtID", "0x%x", top->mtid); ++ if (CHECK_FLAG(top->flags, EXT_SUBTLV_PREFIX_SID_VFLG)) ++ json_object_int_add(json, "label", ++ GET_LABEL(ntohl(top->value))); ++ else ++ json_object_int_add(json, "index", ntohl(top->value)); ++ } + + return TLV_SIZE(tlvh); + } + + /* Extended Prefix SubTLVs */ + static uint16_t show_vty_pref_info(struct vty *vty, struct tlv_header *ext, +- size_t buf_size) ++ size_t buf_size, json_object *json) + { + struct ext_tlv_prefix *top = (struct ext_tlv_prefix *)ext; + struct tlv_header *tlvh; + uint16_t length = ntohs(top->header.length); + uint16_t sum = 0; ++ json_object *jsid = NULL; + + /* Verify that TLV length is valid against remaining buffer size */ + if (length > buf_size) { +- vty_out(vty, +- " Extended Link TLV size %d exceeds buffer size. Abort!\n", +- length); ++ if (vty != NULL) { ++ vty_out(vty, " Extended Link TLV size %d exceeds buffer size. Abort!\n", ++ length); ++ } else { ++ zlog_debug(" Extended Link TLV size %d exceeds buffer size. Abort!", ++ length); ++ } + return buf_size; + } + +- vty_out(vty, +- " Extended Prefix TLV: Length %u\n\tRoute Type: %u\n" +- "\tAddress Family: 0x%x\n\tFlags: 0x%x\n\tAddress: %pI4/%u\n", +- ntohs(top->header.length), top->route_type, top->af, top->flags, +- &top->address, top->pref_length); ++ if (!json) { ++ if (vty != NULL) { ++ vty_out(vty, ++ " Extended Prefix TLV: Length %u\n\tRoute Type: %u\n" ++ "\tAddress Family: 0x%x\n\tFlags: 0x%x\n\tAddress: %pI4/%u\n", ++ ntohs(top->header.length), top->route_type, top->af, top->flags, ++ &top->address, top->pref_length); ++ } else { ++ zlog_debug(" Extended Prefix TLV: Length %u", ntohs(top->header.length)); ++ zlog_debug(" Route Type: %u", top->route_type); ++ zlog_debug(" Address Family: 0x%x", top->af); ++ zlog_debug(" Flags: 0x%x", top->flags); ++ zlog_debug(" Address: %pI4/%u", &top->address, top->pref_length); ++ } ++ } else { ++ json_object_int_add(json, "routeType", top->route_type); ++ json_object_string_addf(json, "addressFamily", "0x%x", top->af); ++ json_object_string_addf(json, "flags", "0x%x", top->flags); ++ json_object_string_addf(json, "address", "%pI4", &top->address); ++ json_object_int_add(json, "prefixLength", top->pref_length); ++ jsid = json_object_new_object(); ++ json_object_object_add(json, "prefixSID", jsid); ++ } + + /* Skip Extended Prefix TLV and parse sub-TLVs */ + length -= EXT_TLV_PREFIX_SIZE; +@@ -1925,10 +2096,10 @@ static uint16_t show_vty_pref_info(struct vty *vty, struct tlv_header *ext, + for (; sum < length && tlvh; tlvh = TLV_HDR_NEXT(tlvh)) { + switch (ntohs(tlvh->type)) { + case EXT_SUBTLV_PREFIX_SID: +- sum += show_vty_ext_pref_pref_sid(vty, tlvh); ++ sum += show_vty_ext_pref_pref_sid(vty, tlvh, jsid); + break; + default: +- sum += show_vty_unknown_tlv(vty, tlvh, length - sum); ++ sum += show_vty_unknown_tlv(vty, tlvh, length - sum, json); + break; + } + } +@@ -1943,9 +2114,12 @@ static void ospf_ext_pref_show_info(struct vty *vty, struct json_object *json, + struct lsa_header *lsah = lsa->data; + struct tlv_header *tlvh; + uint16_t length = 0, sum = 0; ++ json_object *jpref = NULL; + +- if (json) +- return; ++ if (json) { ++ jpref = json_object_new_object(); ++ json_object_object_add(json, "extendedPrefix", jpref); ++ } + + /* Initialize TLV browsing */ + length = lsa->size - OSPF_LSA_HEADER_SIZE; +@@ -1954,10 +2128,10 @@ static void ospf_ext_pref_show_info(struct vty *vty, struct json_object *json, + tlvh = TLV_HDR_NEXT(tlvh)) { + switch (ntohs(tlvh->type)) { + case EXT_TLV_PREFIX: +- sum += show_vty_pref_info(vty, tlvh, length - sum); ++ sum += show_vty_pref_info(vty, tlvh, length - sum, jpref); + break; + default: +- sum += show_vty_unknown_tlv(vty, tlvh, length - sum); ++ sum += show_vty_unknown_tlv(vty, tlvh, length - sum, jpref); + break; + } + } +diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c +index 80e7f59..40fc006 100644 +--- a/ospfd/ospf_ri.c ++++ b/ospfd/ospf_ri.c +@@ -1202,17 +1202,18 @@ static int ospf_router_info_lsa_update(struct ospf_lsa *lsa) + * Following are vty session control functions. + *------------------------------------------------------------------------*/ + +-#define check_tlv_size(size, msg) \ +- do { \ +- if (ntohs(tlvh->length) > size) { \ +- if (vty != NULL) \ +- vty_out(vty, " Wrong %s TLV size: %d(%d)\n", \ +- msg, ntohs(tlvh->length), size); \ +- else \ +- zlog_debug(" Wrong %s TLV size: %d(%d)", \ +- msg, ntohs(tlvh->length), size); \ +- return size + TLV_HDR_SIZE; \ +- } \ ++#define check_tlv_size(size, msg) \ ++ do { \ ++ if (ntohs(tlvh->length) > size) { \ ++ if (vty != NULL) \ ++ vty_out(vty, \ ++ " Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!\n", \ ++ msg, ntohs(tlvh->length), size); \ ++ else \ ++ zlog_debug(" Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!", \ ++ msg, ntohs(tlvh->length), size); \ ++ return OSPF_MAX_LSA_SIZE + 1; \ ++ } \ + } while (0) + + static uint16_t show_vty_router_cap(struct vty *vty, struct tlv_header *tlvh) +diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c +index 844b28d..851b48d 100644 +--- a/ospfd/ospf_te.c ++++ b/ospfd/ospf_te.c +@@ -3185,17 +3185,18 @@ static void ospf_te_init_ted(struct ls_ted *ted, struct ospf *ospf) + /*------------------------------------------------------------------------* + * Following are vty session control functions. + *------------------------------------------------------------------------*/ +-#define check_tlv_size(size, msg) \ +- do { \ +- if (ntohs(tlvh->length) > size) { \ +- if (vty != NULL) \ +- vty_out(vty, " Wrong %s TLV size: %d(%d)\n", \ +- msg, ntohs(tlvh->length), size); \ +- else \ +- zlog_debug(" Wrong %s TLV size: %d(%d)", \ +- msg, ntohs(tlvh->length), size); \ +- return size + TLV_HDR_SIZE; \ +- } \ ++ #define check_tlv_size(size, msg) \ ++ do { \ ++ if (ntohs(tlvh->length) > size) { \ ++ if (vty != NULL) \ ++ vty_out(vty, \ ++ " Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!\n", \ ++ msg, ntohs(tlvh->length), size); \ ++ else \ ++ zlog_debug(" Wrong %s TLV size: %d(expected %d). Skip subsequent TLVs!", \ ++ msg, ntohs(tlvh->length), size); \ ++ return OSPF_MAX_LSA_SIZE + 1; \ ++ } \ + } while (0) + + static uint16_t show_vty_router_addr(struct vty *vty, struct tlv_header *tlvh) +-- +2.45.4 + diff --git a/SPECS/frr/CVE-2025-61100.nopatch b/SPECS/frr/CVE-2025-61100.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61101.nopatch b/SPECS/frr/CVE-2025-61101.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61102.nopatch b/SPECS/frr/CVE-2025-61102.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61103.nopatch b/SPECS/frr/CVE-2025-61103.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61104.nopatch b/SPECS/frr/CVE-2025-61104.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61105.nopatch b/SPECS/frr/CVE-2025-61105.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61106.nopatch b/SPECS/frr/CVE-2025-61106.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/CVE-2025-61107.nopatch b/SPECS/frr/CVE-2025-61107.nopatch new file mode 100644 index 00000000000..e69de29bb2d diff --git a/SPECS/frr/frr.spec b/SPECS/frr/frr.spec index 64f6026a82d..a279f7fdc4b 100644 --- a/SPECS/frr/frr.spec +++ b/SPECS/frr/frr.spec @@ -3,7 +3,7 @@ Summary: Routing daemon Name: frr Version: 9.1.1 -Release: 3%{?dist} +Release: 5%{?dist} License: GPL-2.0-or-later Vendor: Microsoft Corporation Distribution: Azure Linux @@ -18,7 +18,10 @@ Patch3: 0003-fips-mode.patch Patch4: 0004-remove-grpc-test.patch Patch5: CVE-2024-44070.patch Patch6: CVE-2024-55553.patch - +Patch7: 0001-Fix-frr-c90-complaint-error.patch +# Following CVE-2025-61099 fixes CVE-2025-61100, CVE-2025-61101, CVE-2025-61102, +# CVE-2025-61103, CVE-2025-61104, CVE-2025-61106 and CVE-2025-61107. +Patch8: CVE-2025-61099.patch BuildRequires: autoconf BuildRequires: automake BuildRequires: bison @@ -200,6 +203,13 @@ rm tests/lib/*grpc* %{_sysusersdir}/%{name}.conf %changelog +* Wed Jan 21 2026 Archana Shettigar - 9.1.1-5 +- Patch CVE-2025-61099, CVE-2025-61100, CVE-2025-61101, CVE-2025-61102, + CVE-2025-61103, CVE-2025-61104, CVE-2025-61105, CVE-2025-61106 and CVE-2025-61107 + +* Mon Dec 29 2025 Archana Shettigar - 9.1.1-4 +- Rebuilt for net-snmp version up with c90 fix + * Tue Jun 17 2025 Kanishk Bansal - 9.1.1-3 - Backport Patch CVE-2024-55553 diff --git a/SPECS/fwctl-hwe/fwctl-hwe.signatures.json b/SPECS/fwctl-hwe/fwctl-hwe.signatures.json deleted file mode 100644 index e5d4ed40f30..00000000000 --- a/SPECS/fwctl-hwe/fwctl-hwe.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "fwctl-24.10.tgz": "ec00a549851d9c506a8e2aed365db2506e3d8bb31dad970da82f8f665191deec" - } -} \ No newline at end of file diff --git a/SPECS/fwctl-hwe/fwctl-hwe.spec b/SPECS/fwctl-hwe/fwctl-hwe.spec deleted file mode 100644 index 622a0616760..00000000000 --- a/SPECS/fwctl-hwe/fwctl-hwe.spec +++ /dev/null @@ -1,329 +0,0 @@ -# -# Copyright (c) 2024 Nvidia Inc. All rights reserved. -# -# This software is available to you under a choice of one of two -# licenses. You may choose to be licensed under the terms of the GNU -# General Public License (GPL) Version 2, available from the file -# COPYING in the main directory of this source tree, or the -# OpenIB.org BSD license below: -# -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the following -# conditions are met: -# -# - Redistributions of source code must retain the above -# copyright notice, this list of conditions and the following -# disclaimer. -# -# - Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials -# provided with the distribution. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# - -%if 0%{azl} -# hard code versions due to ADO bug:58993948 -%global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 -%global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} -%global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} -%else -%global target_kernel_version_full f.a.k.e -%endif - -%global KVERSION %{target_kernel_version_full} -%global K_SRC /lib/modules/%{target_kernel_version_full}/build - -%{!?_name: %define _name fwctl-hwe} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-24%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} - -# KMP is disabled by default -%{!?KMP: %global KMP 0} - -# take kernel version or default to uname -r -%{!?KVERSION: %global KVERSION %{target_kernel_version_full}} -%global kernel_version %{KVERSION} -%global krelver %(echo -n %{KVERSION} | sed -e 's/-/_/g') - -# take path to kernel sources if provided, otherwise look in default location (for non KMP rpms). -%{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} - -# define release version -%{!?src_release: %global src_release %{_release}_%{krelver}} -%if "%{KMP}" != "1" -%global _release1 %{src_release} -%else -%global _release1 %{_release} -%endif -%global _kmp_rel %{_release1}%{?_kmp_build_num}%{?_dist} - -Summary: %{_name} Driver -Name: fwctl-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} -License: GPLv2 -Url: http://nvidia.com -Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/fwctl-24.10.tgz#/fwctl-%{_version}.tgz -BuildRoot: /var/tmp/%{name}-%{version}-build -Vendor: Microsoft Corporation -Distribution: Azure Linux -ExclusiveArch: aarch64 - -BuildRequires: gcc -BuildRequires: make -BuildRequires: kernel-hwe-devel = %{target_kernel_version_full} -BuildRequires: binutils -BuildRequires: systemd -BuildRequires: kmod -BuildRequires: mlnx-ofa_kernel-hwe-devel = %{_mofed_full_version} - -Requires: mlnx-ofa_kernel -Requires: mlnx-ofa_kernel-hwe-modules = %{_mofed_full_version} -Requires: kernel-hwe = %{target_kernel_version_full} -Requires: kmod -Conflicts: fwctl - -%description -%{name} kernel modules - -# build KMP rpms? -%if "%{KMP}" == "1" -%global kernel_release() $(make -s -C %{1} kernelrelease M=$PWD) -BuildRequires: %kernel_module_package_buildreqs -%(mkdir -p %{buildroot}) -%(echo '%defattr (-,root,root)' > %{buildroot}/file_list) -%(echo '/lib/modules/%2-%1' >> %{buildroot}/file_list) -%(echo '%config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*-%1.conf' >> %{buildroot}/file_list) -%{kernel_module_package -f %{buildroot}/file_list -x xen -r %{_kmp_rel} } -%else -%global kernel_source() %{K_SRC} -%global kernel_release() %{KVERSION} -%global flavors_to_build default -%endif - -# -# setup module sign scripts if paths to the keys are given -# -%global WITH_MOD_SIGN %(if ( test -f "$MODULE_SIGN_PRIV_KEY" && test -f "$MODULE_SIGN_PUB_KEY" ); \ - then \ - echo -n '1'; \ - else \ - echo -n '0'; fi) - -%if "%{WITH_MOD_SIGN}" == "1" -# call module sign script -%global __modsign_install_post \ - %{_builddir}/%{name}-%{version}/source/tools/sign-modules %{buildroot}/lib/modules/ %{kernel_source default} || exit 1 \ -%{nil} - -%global __debug_package 1 -%global buildsubdir %{name}-%{version} -# Disgusting hack alert! We need to ensure we sign modules *after* all -# invocations of strip occur, which is in __debug_install_post if -# find-debuginfo.sh runs, and __os_install_post if not. -# -%global __spec_install_post \ - %{?__debug_package:%{__debug_install_post}} \ - %{__arch_install_post} \ - %{__os_install_post} \ - %{__modsign_install_post} \ -%{nil} - -%endif # end of setup module sign scripts -# - -%if "%{_vendor}" == "suse" -%debug_package -%endif - -%if 0%{?anolis} == 8 -%global __find_requires %{nil} -%endif - -# set modules dir -%if "%{_vendor}" == "redhat" || ("%{_vendor}" == "openEuler") -%if 0%{?fedora} -%global install_mod_dir updates/%{name} -%else -%global install_mod_dir extra/%{name} -%endif -%endif - -%if "%{_vendor}" == "suse" -%global install_mod_dir updates/%{name} -%endif - -%{!?install_mod_dir: %global install_mod_dir updates/%{name}} - -%prep -%setup -n fwctl-%{_version} -set -- * -mkdir source -mv "$@" source/ -mkdir obj - -%build -export EXTRA_CFLAGS='-DVERSION=\"%version\"' -export INSTALL_MOD_DIR=%{install_mod_dir} -export CONF_OPTIONS="%{configure_options}" -for flavor in %{flavors_to_build}; do - export K_BUILD=%{kernel_source $flavor} - export KVER=%{kernel_release $K_BUILD} - export LIB_MOD_DIR=/lib/modules/$KVER/$INSTALL_MOD_DIR - rm -rf obj/$flavor - cp -r source obj/$flavor - cd $PWD/obj/$flavor - make - cd - -done - -%install -export INSTALL_MOD_PATH=%{buildroot} -export INSTALL_MOD_DIR=%{install_mod_dir} -export PREFIX=%{_prefix} -for flavor in %flavors_to_build; do - export K_BUILD=%{kernel_source $flavor} - export KVER=%{kernel_release $K_BUILD} - cd $PWD/obj/$flavor - make install KERNELRELEASE=$KVER - # Cleanup unnecessary kernel-generated module dependency files. - find $INSTALL_MOD_PATH/lib/modules -iname 'modules.*' -exec rm {} \; - cd - -done - -# Set the module(s) to be executable, so that they will be stripped when packaged. -find %{buildroot} \( -type f -name '*.ko' -o -name '*ko.gz' \) -exec %{__chmod} u+x \{\} \; - -%{__install} -d %{buildroot}%{_sysconfdir}/depmod.d/ -for module in `find %{buildroot}/ -name '*.ko' -o -name '*.ko.gz' | sort` -do -ko_name=${module##*/} -mod_name=${ko_name/.ko*/} -mod_path=${module/*\/%{name}} -mod_path=${mod_path/\/${ko_name}} -%if "%{_vendor}" == "suse" - for flavor in %{flavors_to_build}; do - if [[ $module =~ $flavor ]] || [ "X%{KMP}" != "X1" ];then - echo "override ${mod_name} * updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{_name}-${mod_name}-$flavor.conf - fi - done -%else - %if 0%{?fedora} - echo "override ${mod_name} * updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{_name}-${mod_name}.conf - %else - %if "%{_vendor}" == "redhat" || ("%{_vendor}" == "openEuler") - echo "override ${mod_name} * weak-updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{_name}-${mod_name}.conf - %endif - echo "override ${mod_name} * extra/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{_name}-${mod_name}.conf - %endif -%endif -done - - -%clean -rm -rf %{buildroot} - -%post -if [ $1 -ge 1 ]; then # 1 : This package is being installed or reinstalled - /sbin/depmod %{KVERSION} -fi # 1 : closed -# END of post - -%postun -/sbin/depmod %{KVERSION} - -%if "%{KMP}" != "1" -%files -%defattr(-,root,root,-) -%license source/debian/copyright -/lib/modules/%{KVERSION}/%{install_mod_dir}/ -%config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{_name}-*.conf -%endif - -%changelog -* Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 -- Bump to match kernel-hwe - -* Fri Oct 10 2025 Pawel Winogrodzki - 24.10-23_6.12.50.2-1 -- Adjusted package dependencies on user space components. - -* Fri Oct 06 2025 Siddharth Chintamaneni - 24.10-22_6.12.50.2-1 -- Bump to match kernel-hwe - -* Fri Sep 12 2025 Rachel Menge - 24.10-21 -- Bump to match kernel-hwe - -* Mon Sep 08 2025 Elaheh Dehghani - 24.10-20 -- Build using kernel-hwe for aarch64 architecture - -* Fri May 23 2025 CBL-Mariner Servicing Account - 24.10-19 -- Bump release to rebuild for new kernel release - -* Tue May 13 2025 Siddharth Chintamaneni - 24.10-18 -- Bump release to rebuild for new kernel release - -* Tue Apr 29 2025 Siddharth Chintamaneni - 24.10-17 -- Bump release to rebuild for new kernel release - -* Fri Apr 25 2025 Chris Co - 24.10-16 -- Bump release to rebuild for new kernel release - -* Tue Apr 08 2025 Pawel Winogrodzki - 24.10-15 -- Bump release to match "signed" spec changes. - -* Sat Apr 05 2025 CBL-Mariner Servicing Account - 24.10-14 -- Bump release to rebuild for new kernel release - -* Fri Mar 14 2025 CBL-Mariner Servicing Account - 24.10-13 -- Bump release to rebuild for new kernel release - -* Tue Mar 11 2025 CBL-Mariner Servicing Account - 24.10-12 -- Bump release to rebuild for new kernel release - -* Mon Mar 10 2025 Chris Co - 24.10-11 -- Bump release to rebuild for new kernel release - -* Wed Mar 05 2025 Rachel Menge - 24.10-10 -- Bump release to rebuild for new kernel release - -* Tue Mar 04 2025 Rachel Menge - 24.10-9 -- Bump release to rebuild for new kernel release - -* Wed Feb 19 2025 Chris Co - 24.10-8 -- Bump release to rebuild for new kernel release - -* Tue Feb 11 2025 Rachel Menge - 24.10-7 -- Bump release to rebuild for new kernel release - -* Wed Feb 05 2025 Tobias Brick - 24.10-6 -- Bump release to rebuild for new kernel release - -* Tue Feb 04 2025 Alberto David Perez Guevara - 24.10-5 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-4 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-3 -- Bump release to match kernel - -* Thu Jan 30 2025 Rachel Menge - 24.10-2 -- Bump release to match kernel - -* Tue Dec 17 2024 Binu Jose Philip - 24.10-1 -- Initial Azure Linux import from NVIDIA (license: GPLv2) -- License verified -* Mon Jul 29 2024 -- Initial packaging diff --git a/SPECS/fwctl/fwctl.signatures.json b/SPECS/fwctl/fwctl.signatures.json deleted file mode 100644 index e5d4ed40f30..00000000000 --- a/SPECS/fwctl/fwctl.signatures.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "Signatures": { - "fwctl-24.10.tgz": "ec00a549851d9c506a8e2aed365db2506e3d8bb31dad970da82f8f665191deec" - } -} \ No newline at end of file diff --git a/SPECS/fwctl/fwctl.spec b/SPECS/fwctl/fwctl.spec deleted file mode 100644 index 328ef424393..00000000000 --- a/SPECS/fwctl/fwctl.spec +++ /dev/null @@ -1,320 +0,0 @@ -# -# Copyright (c) 2024 Nvidia Inc. All rights reserved. -# -# This software is available to you under a choice of one of two -# licenses. You may choose to be licensed under the terms of the GNU -# General Public License (GPL) Version 2, available from the file -# COPYING in the main directory of this source tree, or the -# OpenIB.org BSD license below: -# -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the following -# conditions are met: -# -# - Redistributions of source code must retain the above -# copyright notice, this list of conditions and the following -# disclaimer. -# -# - Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials -# provided with the distribution. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# - -%if 0%{azl} -%global target_kernel_version_full %(/bin/rpm -q --queryformat '%{RPMTAG_VERSION}-%{RPMTAG_RELEASE}' $(/bin/rpm -q --whatprovides kernel-headers)) -%global target_azl_build_kernel_version %(/bin/rpm -q --queryformat '%{RPMTAG_VERSION}' $(/bin/rpm -q --whatprovides kernel-headers)) -%global target_kernel_release %(/bin/rpm -q --queryformat '%{RPMTAG_RELEASE}' $(/bin/rpm -q --whatprovides kernel-headers) | /bin/cut -d . -f 1) -%global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} -%else -%global target_kernel_version_full f.a.k.e -%endif - -%global KVERSION %{target_kernel_version_full} -%global K_SRC /lib/modules/%{target_kernel_version_full}/build - -%{!?_name: %define _name fwctl} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-21%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} - -# KMP is disabled by default -%{!?KMP: %global KMP 0} - -# take kernel version or default to uname -r -%{!?KVERSION: %global KVERSION %{target_kernel_version_full}} -%global kernel_version %{KVERSION} -%global krelver %(echo -n %{KVERSION} | sed -e 's/-/_/g') - -# take path to kernel sources if provided, otherwise look in default location (for non KMP rpms). -%{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} - -# define release version -%{!?src_release: %global src_release %{_release}_%{krelver}} -%if "%{KMP}" != "1" -%global _release1 %{src_release} -%else -%global _release1 %{_release} -%endif -%global _kmp_rel %{_release1}%{?_kmp_build_num}%{?_dist} - -Summary: %{_name} Driver -Name: fwctl -Version: 24.10 -Release: 21%{release_suffix}%{?dist} -License: GPLv2 -Url: http://nvidia.com -Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/fwctl-24.10.tgz#/%{_name}-%{_version}.tgz -BuildRoot: /var/tmp/%{name}-%{version}-build -Vendor: Microsoft Corporation -Distribution: Azure Linux -ExclusiveArch: x86_64 - -BuildRequires: gcc -BuildRequires: make -BuildRequires: kernel-devel = %{target_kernel_version_full} -BuildRequires: kernel-headers = %{target_kernel_version_full} -BuildRequires: binutils -BuildRequires: systemd -BuildRequires: kmod -BuildRequires: mlnx-ofa_kernel-devel = %{_mofed_full_version} -BuildRequires: mlnx-ofa_kernel-source = %{_mofed_full_version} - -Requires: mlnx-ofa_kernel = %{_mofed_full_version} -Requires: mlnx-ofa_kernel-modules = %{_mofed_full_version} -Requires: kernel = %{target_kernel_version_full} -Requires: kmod - -%description -%{name} kernel modules - -# build KMP rpms? -%if "%{KMP}" == "1" -%global kernel_release() $(make -s -C %{1} kernelrelease M=$PWD) -BuildRequires: %kernel_module_package_buildreqs -%(mkdir -p %{buildroot}) -%(echo '%defattr (-,root,root)' > %{buildroot}/file_list) -%(echo '/lib/modules/%2-%1' >> %{buildroot}/file_list) -%(echo '%config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{name}-*-%1.conf' >> %{buildroot}/file_list) -%{kernel_module_package -f %{buildroot}/file_list -x xen -r %{_kmp_rel} } -%else -%global kernel_source() %{K_SRC} -%global kernel_release() %{KVERSION} -%global flavors_to_build default -%endif - -# -# setup module sign scripts if paths to the keys are given -# -%global WITH_MOD_SIGN %(if ( test -f "$MODULE_SIGN_PRIV_KEY" && test -f "$MODULE_SIGN_PUB_KEY" ); \ - then \ - echo -n '1'; \ - else \ - echo -n '0'; fi) - -%if "%{WITH_MOD_SIGN}" == "1" -# call module sign script -%global __modsign_install_post \ - %{_builddir}/%{name}-%{version}/source/tools/sign-modules %{buildroot}/lib/modules/ %{kernel_source default} || exit 1 \ -%{nil} - -%global __debug_package 1 -%global buildsubdir %{name}-%{version} -# Disgusting hack alert! We need to ensure we sign modules *after* all -# invocations of strip occur, which is in __debug_install_post if -# find-debuginfo.sh runs, and __os_install_post if not. -# -%global __spec_install_post \ - %{?__debug_package:%{__debug_install_post}} \ - %{__arch_install_post} \ - %{__os_install_post} \ - %{__modsign_install_post} \ -%{nil} - -%endif # end of setup module sign scripts -# - -%if "%{_vendor}" == "suse" -%debug_package -%endif - -%if 0%{?anolis} == 8 -%global __find_requires %{nil} -%endif - -# set modules dir -%if "%{_vendor}" == "redhat" || ("%{_vendor}" == "openEuler") -%if 0%{?fedora} -%global install_mod_dir updates/%{name} -%else -%global install_mod_dir extra/%{name} -%endif -%endif - -%if "%{_vendor}" == "suse" -%global install_mod_dir updates/%{name} -%endif - -%{!?install_mod_dir: %global install_mod_dir updates/%{name}} - -%prep -%setup -set -- * -mkdir source -mv "$@" source/ -mkdir obj - -%build -export EXTRA_CFLAGS='-DVERSION=\"%version\"' -export INSTALL_MOD_DIR=%{install_mod_dir} -export CONF_OPTIONS="%{configure_options}" -for flavor in %{flavors_to_build}; do - export K_BUILD=%{kernel_source $flavor} - export KVER=%{kernel_release $K_BUILD} - export LIB_MOD_DIR=/lib/modules/$KVER/$INSTALL_MOD_DIR - rm -rf obj/$flavor - cp -r source obj/$flavor - cd $PWD/obj/$flavor - make - cd - -done - -%install -export INSTALL_MOD_PATH=%{buildroot} -export INSTALL_MOD_DIR=%{install_mod_dir} -export PREFIX=%{_prefix} -for flavor in %flavors_to_build; do - export K_BUILD=%{kernel_source $flavor} - export KVER=%{kernel_release $K_BUILD} - cd $PWD/obj/$flavor - make install KERNELRELEASE=$KVER - # Cleanup unnecessary kernel-generated module dependency files. - find $INSTALL_MOD_PATH/lib/modules -iname 'modules.*' -exec rm {} \; - cd - -done - -# Set the module(s) to be executable, so that they will be stripped when packaged. -find %{buildroot} \( -type f -name '*.ko' -o -name '*ko.gz' \) -exec %{__chmod} u+x \{\} \; - -%{__install} -d %{buildroot}%{_sysconfdir}/depmod.d/ -for module in `find %{buildroot}/ -name '*.ko' -o -name '*.ko.gz' | sort` -do -ko_name=${module##*/} -mod_name=${ko_name/.ko*/} -mod_path=${module/*\/%{name}} -mod_path=${mod_path/\/${ko_name}} -%if "%{_vendor}" == "suse" - for flavor in %{flavors_to_build}; do - if [[ $module =~ $flavor ]] || [ "X%{KMP}" != "X1" ];then - echo "override ${mod_name} * updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{name}-${mod_name}-$flavor.conf - fi - done -%else - %if 0%{?fedora} - echo "override ${mod_name} * updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{name}-${mod_name}.conf - %else - %if "%{_vendor}" == "redhat" || ("%{_vendor}" == "openEuler") - echo "override ${mod_name} * weak-updates/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{name}-${mod_name}.conf - %endif - echo "override ${mod_name} * extra/%{name}${mod_path}" >> %{buildroot}%{_sysconfdir}/depmod.d/zz02-%{name}-${mod_name}.conf - %endif -%endif -done - - -%clean -rm -rf %{buildroot} - -%post -if [ $1 -ge 1 ]; then # 1 : This package is being installed or reinstalled - /sbin/depmod %{KVERSION} -fi # 1 : closed -# END of post - -%postun -/sbin/depmod %{KVERSION} - -%if "%{KMP}" != "1" -%files -%defattr(-,root,root,-) -%license source/debian/copyright -/lib/modules/%{KVERSION}/%{install_mod_dir}/ -%config(noreplace) %{_sysconfdir}/depmod.d/zz02-%{name}-*.conf -%endif - -%changelog -* Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 -- Bump mofed release number - -* Thu May 29 2025 Nicolas Guibourge - 24.10-20 -- Add kernel version and release nb into release nb - -* Fri May 23 2025 CBL-Mariner Servicing Account - 24.10-19 -- Bump release to rebuild for new kernel release - -* Tue May 13 2025 Siddharth Chintamaneni - 24.10-18 -- Bump release to rebuild for new kernel release - -* Tue Apr 29 2025 Siddharth Chintamaneni - 24.10-17 -- Bump release to rebuild for new kernel release - -* Fri Apr 25 2025 Chris Co - 24.10-16 -- Bump release to rebuild for new kernel release - -* Tue Apr 08 2025 Pawel Winogrodzki - 24.10-15 -- Bump release to match "signed" spec changes. - -* Sat Apr 05 2025 CBL-Mariner Servicing Account - 24.10-14 -- Bump release to rebuild for new kernel release - -* Fri Mar 14 2025 CBL-Mariner Servicing Account - 24.10-13 -- Bump release to rebuild for new kernel release - -* Tue Mar 11 2025 CBL-Mariner Servicing Account - 24.10-12 -- Bump release to rebuild for new kernel release - -* Mon Mar 10 2025 Chris Co - 24.10-11 -- Bump release to rebuild for new kernel release - -* Wed Mar 05 2025 Rachel Menge - 24.10-10 -- Bump release to rebuild for new kernel release - -* Tue Mar 04 2025 Rachel Menge - 24.10-9 -- Bump release to rebuild for new kernel release - -* Wed Feb 19 2025 Chris Co - 24.10-8 -- Bump release to rebuild for new kernel release - -* Tue Feb 11 2025 Rachel Menge - 24.10-7 -- Bump release to rebuild for new kernel release - -* Wed Feb 05 2025 Tobias Brick - 24.10-6 -- Bump release to rebuild for new kernel release - -* Tue Feb 04 2025 Alberto David Perez Guevara - 24.10-5 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-4 -- Bump release to rebuild for new kernel release - -* Fri Jan 31 2025 Alberto David Perez Guevara - 24.10-3 -- Bump release to match kernel - -* Thu Jan 30 2025 Rachel Menge - 24.10-2 -- Bump release to match kernel - -* Tue Dec 17 2024 Binu Jose Philip - 24.10-1 -- Initial Azure Linux import from NVIDIA (license: GPLv2) -- License verified -* Mon Jul 29 2024 -- Initial packaging diff --git a/SPECS/glib/CVE-2025-14087.patch b/SPECS/glib/CVE-2025-14087.patch new file mode 100644 index 00000000000..490616026e0 --- /dev/null +++ b/SPECS/glib/CVE-2025-14087.patch @@ -0,0 +1,70 @@ +From 992b2a40e60981d1028928f9658d9827b2cb52fe Mon Sep 17 00:00:00 2001 +From: Philip Withnall +Date: Tue, 25 Nov 2025 19:02:56 +0000 +Subject: [PATCH] gvariant-parser: Fix potential integer overflow parsing + (byte)strings + +The termination condition for parsing string and bytestring literals in +GVariant text format input was subject to an integer overflow for input +string (or bytestring) literals longer than `INT_MAX`. + +Fix that by counting as a `size_t` rather than as an `int`. The counter +can never correctly be negative. + +Spotted by treeplus. Thanks to the Sovereign Tech Resilience programme +from the Sovereign Tech Agency. ID: #YWH-PGM9867-145 + +Signed-off-by: Philip Withnall +Fixes: #3834 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.gnome.org/GNOME/glib/-/commit/3e72fe0fbb32c18a66486c4da8bc851f656af287.patch +--- + glib/gvariant-parser.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/glib/gvariant-parser.c b/glib/gvariant-parser.c +index 7973ecf..4074c2c 100644 +--- a/glib/gvariant-parser.c ++++ b/glib/gvariant-parser.c +@@ -597,7 +597,7 @@ ast_resolve (AST *ast, + { + GVariant *value; + gchar *pattern; +- gint i, j = 0; ++ size_t i, j = 0; + + pattern = ast_get_pattern (ast, error); + +@@ -1621,9 +1621,9 @@ string_free (AST *ast) + * No leading/trailing space allowed. */ + static gboolean + unicode_unescape (const gchar *src, +- gint *src_ofs, ++ size_t *src_ofs, + gchar *dest, +- gint *dest_ofs, ++ size_t *dest_ofs, + gsize length, + SourceRef *ref, + GError **error) +@@ -1684,7 +1684,7 @@ string_parse (TokenStream *stream, + gsize length; + gchar quote; + gchar *str; +- gint i, j; ++ size_t i, j; + + token_stream_start_ref (stream, &ref); + token = token_stream_get (stream); +@@ -1814,7 +1814,7 @@ bytestring_parse (TokenStream *stream, + gsize length; + gchar quote; + gchar *str; +- gint i, j; ++ size_t i, j; + + token_stream_start_ref (stream, &ref); + token = token_stream_get (stream); +-- +2.45.4 + diff --git a/SPECS/glib/CVE-2025-14512.patch b/SPECS/glib/CVE-2025-14512.patch new file mode 100644 index 00000000000..09ea82109ca --- /dev/null +++ b/SPECS/glib/CVE-2025-14512.patch @@ -0,0 +1,71 @@ +From 504f3b9709a098936bbcbaa75daa5a7dc7d3de45 Mon Sep 17 00:00:00 2001 +From: Philip Withnall +Date: Thu, 4 Dec 2025 16:37:19 +0000 +Subject: [PATCH] gfileattribute: Fix integer overflow calculating escaping for + byte strings + +The number of invalid characters in the byte string (characters which +would have to be percent-encoded) was only stored in an `int`, which +gave the possibility of a long string largely full of invalid +characters overflowing this and allowing an attacker-controlled buffer +size to be allocated. + +This could be triggered by an attacker controlled file attribute (of +type `G_FILE_ATTRIBUTE_TYPE_BYTE_STRING`), such as +`G_FILE_ATTRIBUTE_THUMBNAIL_PATH` or `G_FILE_ATTRIBUTE_STANDARD_NAME`, +being read by user code. + +Spotted by Codean Labs. + +Signed-off-by: Philip Withnall + +Fixes: #3845 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.gnome.org/GNOME/glib/-/commit/4f0399c0aaf3ffc86b5625424580294bc7460404.patch +--- + gio/gfileattribute.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/gio/gfileattribute.c b/gio/gfileattribute.c +index 124eb4d..d4a0412 100644 +--- a/gio/gfileattribute.c ++++ b/gio/gfileattribute.c +@@ -22,6 +22,7 @@ + + #include "config.h" + ++#include + #include + + #include "gfileattribute.h" +@@ -273,11 +274,12 @@ valid_char (char c) + return c >= 32 && c <= 126 && c != '\\'; + } + ++/* Returns NULL on error */ + static char * + escape_byte_string (const char *str) + { + size_t i, len; +- int num_invalid; ++ size_t num_invalid; + char *escaped_val, *p; + unsigned char c; + const char hex_digits[] = "0123456789abcdef"; +@@ -295,7 +297,12 @@ escape_byte_string (const char *str) + return g_strdup (str); + else + { +- escaped_val = g_malloc (len + num_invalid*3 + 1); ++ /* Check for overflow. We want to check the inequality: ++ * !(len + num_invalid * 3 + 1 > SIZE_MAX) */ ++ if (num_invalid >= (SIZE_MAX - len) / 3) ++ return NULL; ++ ++ escaped_val = g_malloc (len + num_invalid * 3 + 1); + + p = escaped_val; + for (i = 0; i < len; i++) +-- +2.45.4 + diff --git a/SPECS/glib/CVE-2026-1484.patch b/SPECS/glib/CVE-2026-1484.patch new file mode 100644 index 00000000000..41b50946373 --- /dev/null +++ b/SPECS/glib/CVE-2026-1484.patch @@ -0,0 +1,95 @@ +From fe3ce5d676a1b64a6fe071b21119e06a7ccb7046 Mon Sep 17 00:00:00 2001 +From: Marco Trevisan +Date: Fri, 23 Jan 2026 18:48:30 +0100 +Subject: [PATCH 1/2] gbase64: Use gsize to prevent potential overflow +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Both g_base64_encode_step() and g_base64_encode_close() return gsize +values, but these are summed to an int value. + +If the sum of these returned values is bigger than MAXINT, we overflow +while doing the null byte write. + +Spotted by treeplus. +Thanks to the Sovereign Tech Resilience programme from the Sovereign +Tech Agency. + +ID: #YWH-PGM9867-168 +Closes: #3870 + +(cherry picked from commit 6845f7776982849a2be1d8c9b0495e389092bff2) + +Co-authored-by: Marco Trevisan (Treviño) +--- + glib/gbase64.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/glib/gbase64.c b/glib/gbase64.c +index 3c427f8..60c8560 100644 +--- a/glib/gbase64.c ++++ b/glib/gbase64.c +@@ -264,8 +264,9 @@ g_base64_encode (const guchar *data, + gsize len) + { + gchar *out; +- gint state = 0, outlen; ++ gint state = 0; + gint save = 0; ++ gsize outlen; + + g_return_val_if_fail (data != NULL || len == 0, NULL); + +-- +2.45.4 + + +From e9754df0897c47f2b5a6fe2a65e6facf362ee614 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marco=20Trevisan=20=28Trevi=C3=B1o=29?= +Date: Wed, 21 Jan 2026 20:09:44 +0100 +Subject: [PATCH 2/2] gbase64: Ensure that the out value is within allocated + size + +We do not want to deference or write to it + +Related to: #3870 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.gnome.org/GNOME/glib/-/merge_requests/4979.patch +--- + glib/gbase64.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/glib/gbase64.c b/glib/gbase64.c +index 60c8560..0827e83 100644 +--- a/glib/gbase64.c ++++ b/glib/gbase64.c +@@ -267,6 +267,7 @@ g_base64_encode (const guchar *data, + gint state = 0; + gint save = 0; + gsize outlen; ++ gsize allocsize; + + g_return_val_if_fail (data != NULL || len == 0, NULL); + +@@ -274,10 +275,15 @@ g_base64_encode (const guchar *data, + +1 is needed for trailing \0, also check for unlikely integer overflow */ + g_return_val_if_fail (len < ((G_MAXSIZE - 1) / 4 - 1) * 3, NULL); + +- out = g_malloc ((len / 3 + 1) * 4 + 1); ++ allocsize = (len / 3 + 1) * 4 + 1; ++ out = g_malloc (allocsize); + + outlen = g_base64_encode_step (data, len, FALSE, out, &state, &save); ++ g_assert (outlen <= allocsize); ++ + outlen += g_base64_encode_close (FALSE, out + outlen, &state, &save); ++ g_assert (outlen <= allocsize); ++ + out[outlen] = '\0'; + + return (gchar *) out; +-- +2.45.4 + diff --git a/SPECS/glib/glib.spec b/SPECS/glib/glib.spec index 9f09acbdbe2..0021a1ce02d 100644 --- a/SPECS/glib/glib.spec +++ b/SPECS/glib/glib.spec @@ -2,7 +2,7 @@ Summary: Low-level libraries useful for providing data structure handling for C. Name: glib Version: 2.78.6 -Release: 5%{?dist} +Release: 7%{?dist} License: LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -15,6 +15,9 @@ Patch2: CVE-2025-4373.patch Patch3: CVE-2025-6052.patch Patch4: CVE-2025-7039.patch Patch5: CVE-2025-13601.patch +Patch6: CVE-2025-14087.patch +Patch7: CVE-2025-14512.patch +Patch8: CVE-2026-1484.patch BuildRequires: cmake BuildRequires: gtk-doc BuildRequires: libffi-devel @@ -114,6 +117,7 @@ touch %{buildroot}%{_libdir}/gio/modules/giomodule.cache %{_libdir}/glib-*/* %{_includedir}/* %{_datadir}/* +%license %{_datadir}/licenses/glib/LGPL-2.1-or-later.txt %exclude %{_datadir}/gtk-doc/html/ %exclude %{_datadir}/glib-2.0/schemas/ @@ -127,6 +131,12 @@ touch %{buildroot}%{_libdir}/gio/modules/giomodule.cache %doc %{_datadir}/gtk-doc/html/* %changelog +* Mon Feb 02 2026 Azure Linux Security Servicing Account - 2.78.6-7 +- Patch for CVE-2026-1484 + +* Mon Dec 15 2025 Azure Linux Security Servicing Account - 2.78.6-6 +- Patch for CVE-2025-14087, CVE-2025-14512 + * Sat Nov 29 2025 Azure Linux Security Servicing Account - 2.78.6-5 - Patch for CVE-2025-13601 diff --git a/SPECS/glibc/CVE-2025-15281.patch b/SPECS/glibc/CVE-2025-15281.patch new file mode 100644 index 00000000000..31ecc94c68b --- /dev/null +++ b/SPECS/glibc/CVE-2025-15281.patch @@ -0,0 +1,177 @@ +From bf108a5fe1a992e5a6057fefa5d3d54f06300d3b Mon Sep 17 00:00:00 2001 +From: Adhemerval Zanella +Date: Thu, 15 Jan 2026 10:32:19 -0300 +Subject: [PATCH] posix: Reset wordexp_t fields with WRDE_REUSE (CVE-2025-15281 + / BZ 33814) + +The wordexp fails to properly initialize the input wordexp_t when +WRDE_REUSE is used. The wordexp_t struct is properly freed, but +reuses the old wc_wordc value and updates the we_wordv in the +wrong position. A later wordfree will then call free with an +invalid pointer. + +Checked on x86_64-linux-gnu and i686-linux-gnu. + +Reviewed-by: Carlos O'Donell +(cherry picked from commit 80cc58ea2de214f85b0a1d902a3b668ad2ecb302) +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/bminor/glibc/commit/d5409a1be010699794264162c551ba60f05ee6c3.patch +--- + posix/Makefile | 11 +++++ + posix/tst-wordexp-reuse.c | 89 +++++++++++++++++++++++++++++++++++++++ + posix/wordexp.c | 2 + + 3 files changed, 102 insertions(+) + create mode 100644 posix/tst-wordexp-reuse.c + +diff --git a/posix/Makefile b/posix/Makefile +index 1fc0f565..7db396fb 100644 +--- a/posix/Makefile ++++ b/posix/Makefile +@@ -328,6 +328,7 @@ tests := \ + tst-wait4 \ + tst-waitid \ + tst-wordexp-nocmd \ ++ tst-wordexp-reuse \ + tstgetopt \ + # tests + +@@ -453,6 +454,8 @@ generated += \ + tst-rxspencer-no-utf8.mtrace \ + tst-vfork3-mem.out \ + tst-vfork3.mtrace \ ++ tst-wordexp-reuse-mem.out \ ++ tst-wordexp-reuse.mtrace \ + wordexp-tst.out \ + # generated + +@@ -484,6 +487,7 @@ tests-special += \ + $(objpfx)tst-pcre-mem.out \ + $(objpfx)tst-rxspencer-no-utf8-mem.out \ + $(objpfx)tst-vfork3-mem.out \ ++ $(objpfx)tst-wordexp-reuse.out \ + # tests-special + endif + +@@ -765,3 +769,10 @@ $(objpfx)posix-conf-vars-def.h: $(..)scripts/gen-posix-conf-vars.awk \ + $(make-target-directory) + $(AWK) -f $(filter-out Makefile, $^) > $@.tmp + mv -f $@.tmp $@ ++ ++tst-wordexp-reuse-ENV += MALLOC_TRACE=$(objpfx)tst-wordexp-reuse.mtrace \ ++ LD_PRELOAD=$(common-objpfx)/malloc/libc_malloc_debug.so ++ ++$(objpfx)tst-wordexp-reuse-mem.out: $(objpfx)tst-wordexp-reuse.out ++ $(common-objpfx)malloc/mtrace $(objpfx)tst-wordexp-reuse.mtrace > $@; \ ++ $(evaluate-test) +diff --git a/posix/tst-wordexp-reuse.c b/posix/tst-wordexp-reuse.c +new file mode 100644 +index 00000000..3926b9f5 +--- /dev/null ++++ b/posix/tst-wordexp-reuse.c +@@ -0,0 +1,89 @@ ++/* Test for wordexp with WRDE_REUSE flag. ++ Copyright (C) 2026 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ . */ ++ ++#include ++#include ++ ++#include ++ ++static int ++do_test (void) ++{ ++ mtrace (); ++ ++ { ++ wordexp_t p = { 0 }; ++ TEST_COMPARE (wordexp ("one", &p, 0), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[0], "one"); ++ TEST_COMPARE (wordexp ("two", &p, WRDE_REUSE), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[0], "two"); ++ wordfree (&p); ++ } ++ ++ { ++ wordexp_t p = { .we_offs = 2 }; ++ TEST_COMPARE (wordexp ("one", &p, 0), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[0], "one"); ++ TEST_COMPARE (wordexp ("two", &p, WRDE_REUSE | WRDE_DOOFFS), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[p.we_offs + 0], "two"); ++ wordfree (&p); ++ } ++ ++ { ++ wordexp_t p = { 0 }; ++ TEST_COMPARE (wordexp ("one", &p, 0), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[0], "one"); ++ TEST_COMPARE (wordexp ("two", &p, WRDE_REUSE | WRDE_APPEND), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[0], "two"); ++ wordfree (&p); ++ } ++ ++ { ++ wordexp_t p = { .we_offs = 2 }; ++ TEST_COMPARE (wordexp ("one", &p, WRDE_DOOFFS), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[p.we_offs + 0], "one"); ++ TEST_COMPARE (wordexp ("two", &p, WRDE_REUSE ++ | WRDE_DOOFFS), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[p.we_offs + 0], "two"); ++ wordfree (&p); ++ } ++ ++ { ++ wordexp_t p = { .we_offs = 2 }; ++ TEST_COMPARE (wordexp ("one", &p, WRDE_DOOFFS), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[p.we_offs + 0], "one"); ++ TEST_COMPARE (wordexp ("two", &p, WRDE_REUSE ++ | WRDE_DOOFFS | WRDE_APPEND), 0); ++ TEST_COMPARE (p.we_wordc, 1); ++ TEST_COMPARE_STRING (p.we_wordv[p.we_offs + 0], "two"); ++ wordfree (&p); ++ } ++ ++ return 0; ++} ++ ++#include +diff --git a/posix/wordexp.c b/posix/wordexp.c +index 994d7916..5c5863b5 100644 +--- a/posix/wordexp.c ++++ b/posix/wordexp.c +@@ -2216,7 +2216,9 @@ wordexp (const char *words, wordexp_t *pwordexp, int flags) + { + /* Minimal implementation of WRDE_REUSE for now */ + wordfree (pwordexp); ++ old_word.we_wordc = 0; + old_word.we_wordv = NULL; ++ pwordexp->we_wordc = 0; + } + + if ((flags & WRDE_APPEND) == 0) +-- +2.45.4 + diff --git a/SPECS/glibc/CVE-2026-0861.patch b/SPECS/glibc/CVE-2026-0861.patch new file mode 100644 index 00000000000..406dd57efae --- /dev/null +++ b/SPECS/glibc/CVE-2026-0861.patch @@ -0,0 +1,90 @@ +From 320a9e12e379c819fb3bfe14590d0f8bdff20115 Mon Sep 17 00:00:00 2001 +From: Siddhesh Poyarekar +Date: Thu, 15 Jan 2026 06:06:40 -0500 +Subject: [PATCH] memalign: reinstate alignment overflow check (CVE-2026-0861) + +The change to cap valid sizes to PTRDIFF_MAX inadvertently dropped the +overflow check for alignment in memalign functions, _mid_memalign and +_int_memalign. Reinstate the overflow check in _int_memalign, aligned +with the PTRDIFF_MAX change since that is directly responsible for the +CVE. The missing _mid_memalign check is not relevant (and does not have +a security impact) and may need a different approach to fully resolve, +so it has been omitted. + +CVE-Id: CVE-2026-0861 +Vulnerable-Commit: 9bf8e29ca136094f73f69f725f15c51facc97206 +Reported-by: Igor Morgenstern, Aisle Research +Fixes: BZ #33796 +Reviewed-by: Wilco Dijkstra +Signed-off-by: Siddhesh Poyarekar +(cherry picked from commit c9188d333717d3ceb7e3020011651f424f749f93) +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/bminor/glibc/commit/744b63026a29f7eedbbc8e3a01a7f48a6eb0a085.patch +--- + malloc/malloc.c | 7 +++++-- + malloc/tst-malloc-too-large.c | 10 ++-------- + 2 files changed, 7 insertions(+), 10 deletions(-) + +diff --git a/malloc/malloc.c b/malloc/malloc.c +index d0bbbf37..70bf56d1 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -5042,7 +5042,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) + INTERNAL_SIZE_T size; + + nb = checked_request2size (bytes); +- if (nb == 0) ++ if (nb == 0 || alignment > PTRDIFF_MAX) + { + __set_errno (ENOMEM); + return NULL; +@@ -5058,7 +5058,10 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) + we don't find anything in those bins, the common malloc code will + scan starting at 2x. */ + +- /* Call malloc with worst case padding to hit alignment. */ ++ /* Call malloc with worst case padding to hit alignment. ALIGNMENT is a ++ power of 2, so it tops out at (PTRDIFF_MAX >> 1) + 1, leaving plenty of ++ space to add MINSIZE and whatever checked_request2size adds to BYTES to ++ get NB. Consequently, total below also does not overflow. */ + m = (char *) (_int_malloc (av, nb + alignment + MINSIZE)); + + if (m == 0) +diff --git a/malloc/tst-malloc-too-large.c b/malloc/tst-malloc-too-large.c +index 5be6800b..206184ac 100644 +--- a/malloc/tst-malloc-too-large.c ++++ b/malloc/tst-malloc-too-large.c +@@ -151,7 +151,6 @@ test_large_allocations (size_t size) + } + + +-static long pagesize; + + /* This function tests the following aligned memory allocation functions + using several valid alignments and precedes each allocation test with a +@@ -170,8 +169,8 @@ test_large_aligned_allocations (size_t size) + + /* All aligned memory allocation functions expect an alignment that is a + power of 2. Given this, we test each of them with every valid +- alignment from 1 thru PAGESIZE. */ +- for (align = 1; align <= pagesize; align *= 2) ++ alignment for the type of ALIGN, i.e. until it wraps to 0. */ ++ for (align = 1; align > 0; align <<= 1) + { + test_setup (); + #if __GNUC_PREREQ (7, 0) +@@ -264,11 +263,6 @@ do_test (void) + DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than="); + #endif + +- /* Aligned memory allocation functions need to be tested up to alignment +- size equivalent to page size, which should be a power of 2. */ +- pagesize = sysconf (_SC_PAGESIZE); +- TEST_VERIFY_EXIT (powerof2 (pagesize)); +- + /* Loop 1: Ensure that all allocations with SIZE close to SIZE_MAX, i.e. + in the range (SIZE_MAX - 2^14, SIZE_MAX], fail. + +-- +2.45.4 + diff --git a/SPECS/glibc/CVE-2026-0915.patch b/SPECS/glibc/CVE-2026-0915.patch new file mode 100644 index 00000000000..8e923b6e3cd --- /dev/null +++ b/SPECS/glibc/CVE-2026-0915.patch @@ -0,0 +1,79 @@ +From ab36888721c2d315887b6056f996218dc689168f Mon Sep 17 00:00:00 2001 +From: Carlos O'Donell +Date: Thu, 15 Jan 2026 15:09:38 -0500 +Subject: [PATCH] resolv: Fix NSS DNS backend for getnetbyaddr (CVE-2026-0915) + +The default network value of zero for net was never tested for and +results in a DNS query constructed from uninitialized stack bytes. +The solution is to provide a default query for the case where net +is zero. + +Adding a test case for this was straight forward given the existence of +tst-resolv-network and if the test is added without the fix you observe +this failure: + +FAIL: resolv/tst-resolv-network +original exit status 1 +error: tst-resolv-network.c:174: invalid QNAME: \146\218\129\128 +error: 1 test failures + +With a random QNAME resulting from the use of uninitialized stack bytes. + +After the fix the test passes. + +Additionally verified using wireshark before and after to ensure +on-the-wire bytes for the DNS query were as expected. + +No regressions on x86_64. + +Reviewed-by: Florian Weimer +(cherry picked from commit e56ff82d5034ec66c6a78f517af6faa427f65b0b) +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/bminor/glibc/commit/49125ffc8e1674dc2a100dfdc5b78796f22e16f2.patch +--- + resolv/nss_dns/dns-network.c | 4 ++++ + resolv/tst-resolv-network.c | 6 ++++++ + 2 files changed, 10 insertions(+) + +diff --git a/resolv/nss_dns/dns-network.c b/resolv/nss_dns/dns-network.c +index 1e6511a4..4c365660 100644 +--- a/resolv/nss_dns/dns-network.c ++++ b/resolv/nss_dns/dns-network.c +@@ -207,6 +207,10 @@ _nss_dns_getnetbyaddr_r (uint32_t net, int type, struct netent *result, + sprintf (qbuf, "%u.%u.%u.%u.in-addr.arpa", net_bytes[3], net_bytes[2], + net_bytes[1], net_bytes[0]); + break; ++ default: ++ /* Default network (net is originally zero). */ ++ strcpy (qbuf, "0.0.0.0.in-addr.arpa"); ++ break; + } + + net_buffer.buf = orig_net_buffer = (querybuf *) alloca (1024); +diff --git a/resolv/tst-resolv-network.c b/resolv/tst-resolv-network.c +index ada71371..19b847d8 100644 +--- a/resolv/tst-resolv-network.c ++++ b/resolv/tst-resolv-network.c +@@ -46,6 +46,9 @@ handle_code (const struct resolv_response_context *ctx, + { + switch (code) + { ++ case 0: ++ send_ptr (b, qname, qclass, qtype, "0.in-addr.arpa"); ++ break; + case 1: + send_ptr (b, qname, qclass, qtype, "1.in-addr.arpa"); + break; +@@ -265,6 +268,9 @@ do_test (void) + "error: TRY_AGAIN\n"); + + /* Lookup by address, success cases. */ ++ check_reverse (0, ++ "name: 0.in-addr.arpa\n" ++ "net: 0x00000000\n"); + check_reverse (1, + "name: 1.in-addr.arpa\n" + "net: 0x00000001\n"); +-- +2.45.4 + diff --git a/SPECS/glibc/glibc.spec b/SPECS/glibc/glibc.spec index b1b849ac3a0..016480cfd34 100644 --- a/SPECS/glibc/glibc.spec +++ b/SPECS/glibc/glibc.spec @@ -10,7 +10,7 @@ Summary: Main C library Name: glibc Version: 2.38 -Release: 16%{?dist} +Release: 18%{?dist} License: BSD AND GPLv2+ AND Inner-Net AND ISC AND LGPLv2+ AND MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -53,6 +53,9 @@ Patch22: CVE-2025-8058.patch # Patches for testing Patch100: 0001-Remove-Wno-format-cflag-from-tests.patch +Patch101: CVE-2026-0861.patch +Patch102: CVE-2026-0915.patch +Patch103: CVE-2025-15281.patch BuildRequires: bison BuildRequires: gawk @@ -382,6 +385,12 @@ grep "^FAIL: string/test-mempcpy" tests.sum >/dev/null && n=$((n+1)) ||: %exclude %{_libdir}/locale/C.utf8 %changelog +* Thu Jan 22 2026 Azure Linux Security Servicing Account - 2.38-18 +- Patch for CVE-2025-15281 + +* Mon Jan 19 2026 Azure Linux Security Servicing Account - 2.38-17 +- Patch for CVE-2026-0861, CVE-2026-0915 + * Fri Nov 07 2025 Andrew Phelps - 2.38-16 - Ignore additional expected package test failures diff --git a/SPECS/gnupg2/CVE-2026-24882.patch b/SPECS/gnupg2/CVE-2026-24882.patch new file mode 100644 index 00000000000..d1548c18b15 --- /dev/null +++ b/SPECS/gnupg2/CVE-2026-24882.patch @@ -0,0 +1,64 @@ +From b23aa406f96b773fc2c8cc2cf63451f59a435350 Mon Sep 17 00:00:00 2001 +From: Werner Koch +Date: Mon, 26 Jan 2026 11:13:44 +0100 +Subject: [PATCH] tpm: Fix possible buffer overflow in PKDECRYPT + +* tpm2d/tpm2.c (tpm2_ecc_decrypt): Bail out on too long CIPHERTEXT. +(tpm2_rsa_decrypt): Ditto. +-- + +GnuPG-bug-id: 8045 +Co-authored-by: NIIBE Yutaka +Reported-by: OpenAI Security Research +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/gpg/gnupg/commit/93fa34d9a346.patch +--- + tpm2d/tpm2.c | 22 +++++++++++++++++++++- + 1 file changed, 21 insertions(+), 1 deletion(-) + +diff --git a/tpm2d/tpm2.c b/tpm2d/tpm2.c +index 3e908dd..cd0347c 100644 +--- a/tpm2d/tpm2.c ++++ b/tpm2d/tpm2.c +@@ -917,10 +917,20 @@ tpm2_ecc_decrypt (ctrl_t ctrl, TSS_CONTEXT *tssc, TPM_HANDLE key, + size_t len; + int ret; + ++#if defined(TPM2_MAX_ECC_KEY_BYTES) /* Intel stack */ ++ if (ciphertext_len > 2*TPM2_MAX_ECC_KEY_BYTES + 1) ++ return GPG_ERR_TOO_LARGE; ++#elif defined(MAX_ECC_KEY_BYTES) /* IBM stack */ ++ if (ciphertext_len > 2*MAX_ECC_KEY_BYTES + 1) ++ return GPG_ERR_TOO_LARGE; ++#else ++# error TMP2 header are not correctly installed ++#endif ++ + /* This isn't really a decryption per se. The ciphertext actually + * contains an EC Point which we must multiply by the private key number. + * +- * The reason is to generate a diffe helman agreement on a shared ++ * The reason is to generate a diffie-hellman agreement on a shared + * point. This shared point is then used to generate the per + * session encryption key. + */ +@@ -976,6 +986,16 @@ tpm2_rsa_decrypt (ctrl_t ctrl, TSS_CONTEXT *tssc, TPM_HANDLE key, + TPM_HANDLE ah; + char *auth; + ++#if defined(TPM2_MAX_RSA_KEY_BYTES) /* Intel stack */ ++ if (ciphertext_len > TPM2_MAX_RSA_KEY_BYTES) ++ return GPG_ERR_TOO_LARGE; ++#elif defined(MAX_RSA_KEY_BYTES) /* IBM stack */ ++ if (ciphertext_len > MAX_RSA_KEY_BYTES) ++ return GPG_ERR_TOO_LARGE; ++#else ++# error TMP2 header are not correctly installed ++#endif ++ + inScheme.scheme = TPM_ALG_RSAES; + /* + * apparent gcrypt error: occasionally rsa ciphertext will +-- +2.45.4 + diff --git a/SPECS/gnupg2/gnupg2.signatures.json b/SPECS/gnupg2/gnupg2.signatures.json index eff1528cffc..253cb6ed082 100644 --- a/SPECS/gnupg2/gnupg2.signatures.json +++ b/SPECS/gnupg2/gnupg2.signatures.json @@ -1,5 +1,5 @@ { - "Signatures": { - "gnupg-2.4.7.tar.bz2": "7b24706e4da7e0e3b06ca068231027401f238102c41c909631349dcc3b85eb46" - } -} \ No newline at end of file + "Signatures": { + "gnupg-2.4.9.tar.bz2": "dd17ab2e9a04fd79d39d853f599cbc852062ddb9ab52a4ddeb4176fd8b302964" + } +} diff --git a/SPECS/gnupg2/gnupg2.spec b/SPECS/gnupg2/gnupg2.spec index 23b380ed71a..d62ae77da0e 100644 --- a/SPECS/gnupg2/gnupg2.spec +++ b/SPECS/gnupg2/gnupg2.spec @@ -1,13 +1,14 @@ Summary: OpenPGP standard implementation used for encrypted communication and data storage. Name: gnupg2 -Version: 2.4.7 -Release: 1%{?dist} +Version: 2.4.9 +Release: 2%{?dist} License: BSD and CC0 and GPLv2+ and LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux Group: Applications/Cryptography. URL: https://gnupg.org/index.html Source0: https://gnupg.org/ftp/gcrypt/gnupg/gnupg-%{version}.tar.bz2 +Patch0: CVE-2026-24882.patch BuildRequires: zlib-devel BuildRequires: bzip2-devel BuildRequires: readline-devel @@ -48,7 +49,7 @@ Requires: %{name} = %{version}-%{release} These are the additional language files of gnupg2 %prep -%autosetup -n gnupg-%{version} +%autosetup -p1 -n gnupg-%{version} %build # Prevent GnuPG from using keyboxd for storing keys. @@ -93,7 +94,6 @@ ln -s $(pwd)/bin/gpg $(pwd)/bin/gpg2 %{_mandir}/man1/* %{_mandir}/man7/* %{_mandir}/man8/* -%{_mandir}/manh/* %{_infodir}/gnupg* %{_libexecdir}/* %{_datadir}/gnupg/* @@ -105,6 +105,15 @@ ln -s $(pwd)/bin/gpg $(pwd)/bin/gpg2 %defattr(-,root,root) %changelog +* Tue Feb 03 2026 Azure Linux Security Servicing Account - 2.4.9-2 +- Patch for CVE-2026-24882 + +* Mon Jan 05 2026 CBL-Mariner Servicing Account - 2.4.9-1 +- Auto-upgrade to 2.4.9 - for CVE-2025-68973 + +* Mon Dec 22 2025 Ratiranjan Behera - 2.4.8-1 +- Upgrade gnupg2 to 2.4.8 for CVE-2025-30258 + * Mon Jun 23 2025 Kavya Sree Kaitepalli - 2.4.7-1 - Upgrade to version 2.4.7 diff --git a/SPECS/gnutls/CVE-2025-13151.patch b/SPECS/gnutls/CVE-2025-13151.patch new file mode 100644 index 00000000000..5ad15b7ba72 --- /dev/null +++ b/SPECS/gnutls/CVE-2025-13151.patch @@ -0,0 +1,42 @@ +From eef7621da1f785e18481bf746af4e08e5f21495e Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Mon, 12 Jan 2026 16:41:13 +0000 +Subject: [PATCH] Fix CVE-2025-13151: Prevent stack-based buffer overflow by + increasing buffer size in decoding.c; Update NEWS with vulnerability fix note + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://gitlab.com/gnutls/libtasn1/-/commit/d276cc495a2a32b182c3c39851f1ba58f2d9f9b8.patch +--- + NEWS | 2 ++ + lib/minitasn1/decoding.c | 2 +- + 2 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/NEWS b/NEWS +index 5e32a3b..c9852ba 100644 +--- a/NEWS ++++ b/NEWS +@@ -5,6 +5,8 @@ Copyright (C) 2000-2016 Free Software Foundation, Inc. + Copyright (C) 2013-2019 Nikos Mavrogiannopoulos + See the end for copying conditions. + ++- Fix for vulnerbaility CVE-2025-13151 Stack-based buffer overflow ++ + * Version 3.8.3 (released 2024-01-16) + + ** libgnutls: Fix more timing side-channel inside RSA-PSK key exchange +diff --git a/lib/minitasn1/decoding.c b/lib/minitasn1/decoding.c +index b9245c4..bc45138 100644 +--- a/lib/minitasn1/decoding.c ++++ b/lib/minitasn1/decoding.c +@@ -1976,7 +1976,7 @@ int + asn1_expand_octet_string (asn1_node_const definitions, asn1_node * element, + const char *octetName, const char *objectName) + { +- char name[2 * ASN1_MAX_NAME_SIZE + 1], value[ASN1_MAX_NAME_SIZE]; ++ char name[2 * ASN1_MAX_NAME_SIZE + 2], value[ASN1_MAX_NAME_SIZE]; + int retCode = ASN1_SUCCESS, result; + int len, len2, len3; + asn1_node_const p2; +-- +2.45.4 + diff --git a/SPECS/gnutls/CVE-2025-9820.patch b/SPECS/gnutls/CVE-2025-9820.patch new file mode 100644 index 00000000000..c31dc2d8c31 --- /dev/null +++ b/SPECS/gnutls/CVE-2025-9820.patch @@ -0,0 +1,236 @@ +From 1d56f96f6ab5034d677136b9d50b5a75dff0faf5 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Tue, 18 Nov 2025 13:17:55 +0900 +Subject: [PATCH] pkcs11: avoid stack overwrite when initializing a token + +If gnutls_pkcs11_token_init is called with label longer than 32 +characters, the internal storage used to blank-fill it would +overflow. This adds a guard to prevent that. + +Signed-off-by: Daiki Ueno + +Upstream Patch reference: +https://gitlab.com/gnutls/gnutls/-/commit/1d56f96f6ab5034d677136b9d50b5a75dff0faf5.patch +--- + lib/pkcs11_write.c | 5 +- + tests/Makefile.am | 2 +- + tests/pkcs11/long-label.c | 164 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 168 insertions(+), 3 deletions(-) + create mode 100644 tests/pkcs11/long-label.c + +diff --git a/lib/pkcs11_write.c b/lib/pkcs11_write.c +index a3201dd..e923dcd 100644 +--- a/lib/pkcs11_write.c ++++ b/lib/pkcs11_write.c +@@ -28,6 +28,7 @@ + #include "pkcs11x.h" + #include "x509/common.h" + #include "pk.h" ++#include "minmax.h" + + static const ck_bool_t tval = 1; + static const ck_bool_t fval = 0; +@@ -1170,7 +1171,7 @@ int gnutls_pkcs11_delete_url(const char *object_url, unsigned int flags) + * gnutls_pkcs11_token_init: + * @token_url: A PKCS #11 URL specifying a token + * @so_pin: Security Officer's PIN +- * @label: A name to be used for the token ++ * @label: A name to be used for the token, at most 32 characters + * + * This function will initialize (format) a token. If the token is + * at a factory defaults state the security officer's PIN given will be +@@ -1208,7 +1209,7 @@ int gnutls_pkcs11_token_init(const char *token_url, const char *so_pin, + /* so it seems memset has other uses than zeroing! */ + memset(flabel, ' ', sizeof(flabel)); + if (label != NULL) +- memcpy(flabel, label, strlen(label)); ++ memcpy(flabel, label, MIN(sizeof(flabel), strlen(label))); + + rv = pkcs11_init_token(module, slot, (uint8_t *)so_pin, strlen(so_pin), + (uint8_t *)flabel); +diff --git a/tests/Makefile.am b/tests/Makefile.am +index babf3be..5367ff2 100644 +--- a/tests/Makefile.am ++++ b/tests/Makefile.am +@@ -493,7 +493,7 @@ pathbuf_CPPFLAGS = $(AM_CPPFLAGS) \ + if ENABLE_PKCS11 + if !WINDOWS + ctests += tls13/post-handshake-with-cert-pkcs11 pkcs11/tls-neg-pkcs11-no-key \ +- global-init-override pkcs11/distrust-after ++ global-init-override pkcs11/distrust-after pkcs11/long-label + tls13_post_handshake_with_cert_pkcs11_DEPENDENCIES = libpkcs11mock2.la libutils.la + tls13_post_handshake_with_cert_pkcs11_LDADD = $(LDADD) $(LIBDL) + pkcs11_tls_neg_pkcs11_no_key_DEPENDENCIES = libpkcs11mock2.la libutils.la +diff --git a/tests/pkcs11/long-label.c b/tests/pkcs11/long-label.c +new file mode 100644 +index 0000000..a70bc97 +--- /dev/null ++++ b/tests/pkcs11/long-label.c +@@ -0,0 +1,164 @@ ++/* ++ * Copyright (C) 2025 Red Hat, Inc. ++ * ++ * Author: Daiki Ueno ++ * ++ * This file is part of GnuTLS. ++ * ++ * GnuTLS is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * GnuTLS is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this program. If not, see ++ */ ++ ++#ifdef HAVE_CONFIG_H ++#include "config.h" ++#endif ++ ++#include ++#include ++#include ++ ++#if defined(_WIN32) ++ ++int main(void) ++{ ++ exit(77); ++} ++ ++#else ++ ++#include ++#include ++#include ++ ++#include "cert-common.h" ++#include "pkcs11/softhsm.h" ++#include "utils.h" ++ ++/* This program tests that a token can be initialized with ++ * a label longer than 32 characters. ++ */ ++ ++static void tls_log_func(int level, const char *str) ++{ ++ fprintf(stderr, "server|<%d>| %s", level, str); ++} ++ ++#define PIN "1234" ++ ++#define CONFIG_NAME "softhsm-long-label" ++#define CONFIG CONFIG_NAME ".config" ++ ++static int pin_func(void *userdata, int attempt, const char *url, ++ const char *label, unsigned flags, char *pin, ++ size_t pin_max) ++{ ++ if (attempt == 0) { ++ strcpy(pin, PIN); ++ return 0; ++ } ++ return -1; ++} ++ ++static void test(const char *provider) ++{ ++ int ret; ++ size_t i; ++ ++ gnutls_pkcs11_init(GNUTLS_PKCS11_FLAG_MANUAL, NULL); ++ ++ success("test with %s\n", provider); ++ ++ if (debug) { ++ gnutls_global_set_log_function(tls_log_func); ++ gnutls_global_set_log_level(4711); ++ } ++ ++ /* point to SoftHSM token that libpkcs11mock4.so internally uses */ ++ setenv(SOFTHSM_ENV, CONFIG, 1); ++ ++ gnutls_pkcs11_set_pin_function(pin_func, NULL); ++ ++ ret = gnutls_pkcs11_add_provider(provider, "trusted"); ++ if (ret != 0) { ++ fail("gnutls_pkcs11_add_provider: %s\n", gnutls_strerror(ret)); ++ } ++ ++ /* initialize softhsm token */ ++ ret = gnutls_pkcs11_token_init( ++ SOFTHSM_URL, PIN, ++ "this is a very long label whose length exceeds 32"); ++ if (ret < 0) { ++ fail("gnutls_pkcs11_token_init: %s\n", gnutls_strerror(ret)); ++ } ++ ++ for (i = 0;; i++) { ++ char *url = NULL; ++ ++ ret = gnutls_pkcs11_token_get_url(i, 0, &url); ++ if (ret < 0) ++ break; ++ if (strstr(url, ++ "token=this%20is%20a%20very%20long%20label%20whose")) ++ break; ++ } ++ if (ret < 0) ++ fail("gnutls_pkcs11_token_get_url: %s\n", gnutls_strerror(ret)); ++ ++ gnutls_pkcs11_deinit(); ++} ++ ++void doit(void) ++{ ++ const char *bin; ++ const char *lib; ++ char buf[128]; ++ ++ if (gnutls_fips140_mode_enabled()) ++ exit(77); ++ ++ /* this must be called once in the program */ ++ global_init(); ++ ++ /* we call gnutls_pkcs11_init manually */ ++ gnutls_pkcs11_deinit(); ++ ++ /* check if softhsm module is loadable */ ++ lib = softhsm_lib(); ++ ++ /* initialize SoftHSM token that libpkcs11mock4.so internally uses */ ++ bin = softhsm_bin(); ++ ++ set_softhsm_conf(CONFIG); ++ snprintf(buf, sizeof(buf), ++ "%s --init-token --slot 0 --label test --so-pin " PIN ++ " --pin " PIN, ++ bin); ++ system(buf); ++ ++ test(lib); ++ ++ lib = getenv("P11MOCKLIB4"); ++ if (lib == NULL) { ++ fail("P11MOCKLIB4 is not set\n"); ++ } ++ ++ set_softhsm_conf(CONFIG); ++ snprintf(buf, sizeof(buf), ++ "%s --init-token --slot 0 --label test --so-pin " PIN ++ " --pin " PIN, ++ bin); ++ system(buf); ++ ++ test(lib); ++} ++#endif /* _WIN32 */ +-- +2.43.0 + diff --git a/SPECS/gnutls/gnutls.spec b/SPECS/gnutls/gnutls.spec index 40142f7d226..1e7de6aa25d 100644 --- a/SPECS/gnutls/gnutls.spec +++ b/SPECS/gnutls/gnutls.spec @@ -1,7 +1,7 @@ Summary: The GnuTLS Transport Layer Security Library Name: gnutls Version: 3.8.3 -Release: 6%{?dist} +Release: 8%{?dist} License: GPLv3+ AND LGPLv2.1+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -18,6 +18,8 @@ Patch5: CVE-2025-32990.patch Patch6: CVE-2025-32989.patch Patch7: CVE-2025-32988.patch Patch8: CVE-2025-6395.patch +Patch9: CVE-2025-13151.patch +Patch10: CVE-2025-9820.patch BuildRequires: autogen-libopts-devel BuildRequires: gc-devel BuildRequires: libtasn1-devel @@ -99,6 +101,12 @@ sed -i 's/TESTS += test-ciphers-openssl.sh//' tests/slow/Makefile.am %{_mandir}/man3/* %changelog +* Wed Jan 28 2026 Akhila Guruju - 3.8.3-8 +- Patch CVE-2025-9820 + +* Mon Jan 12 2026 Azure Linux Security Servicing Account - 3.8.3-7 +- Patch for CVE-2025-13151 + * Tue Jul 15 2025 Azure Linux Security Servicing Account - 3.8.3-6 - Patch for CVE-2025-6395 diff --git a/SPECS/golang/golang-1.24.signatures.json b/SPECS/golang/golang-1.24.signatures.json index d9f98f24ad7..a313ebdedbb 100644 --- a/SPECS/golang/golang-1.24.signatures.json +++ b/SPECS/golang/golang-1.24.signatures.json @@ -3,7 +3,7 @@ "go.20230802.5.src.tar.gz": "56b9e0e0c3c13ca95d5efa6de4e7d49a9d190eca77919beff99d33cd3fa74e95", "go.20240206.2.src.tar.gz": "7982e0011aa9ab95fd0530404060410af4ba57326d26818690f334fdcb6451cd", "go1.22.12-20250211.4.src.tar.gz": "e1cc3bff8fdf1f24843ffc9f0eaddfd344eb40fd9ca0d9ba2965165be519eeb7", - "go1.24.11-20251202.3.src.tar.gz": "7867b785bb3eb9c581f5d2a5f4bcf3b9d5111805ceec222065a9c31c049a8e05", + "go1.24.12-20260116.10.src.tar.gz": "d449be4b3ec37831f8515da709764c9a17e8ad758be981b71b9f79b2ed6e38bd", "go1.4-bootstrap-20171003.tar.gz": "f4ff5b5eb3a3cae1c993723f3eab519c5bae18866b5e5f96fe1102f0cb5c3e52" } } diff --git a/SPECS/golang/golang-1.24.spec b/SPECS/golang/golang-1.24.spec index 8af2e8d898f..6a0551cfdf8 100644 --- a/SPECS/golang/golang-1.24.spec +++ b/SPECS/golang/golang-1.24.spec @@ -1,6 +1,6 @@ %global goroot %{_libdir}/golang %global gopath %{_datadir}/gocode -%global ms_go_filename go1.24.11-20251202.3.src.tar.gz +%global ms_go_filename go1.24.12-20260116.10.src.tar.gz %global ms_go_revision 1 %ifarch aarch64 %global gohostarch arm64 @@ -14,7 +14,7 @@ %define __find_requires %{nil} Summary: Go Name: golang -Version: 1.24.11 +Version: 1.24.12 Release: 1%{?dist} License: BSD-3-Clause Vendor: Microsoft Corporation @@ -160,6 +160,9 @@ fi %{_bindir}/* %changelog +* Tue Jan 20 2026 bot-for-go[bot] <199222863+bot-for-go[bot]@users.noreply.github.com> - 1.24.12-1 +- Bump version to 1.24.12-1 + * Tue Dec 02 2025 bot-for-go[bot] <199222863+bot-for-go[bot]@users.noreply.github.com> - 1.24.11-1 - Bump version to 1.24.11-1 diff --git a/SPECS/golang/golang.signatures.json b/SPECS/golang/golang.signatures.json index 686ca395338..11abfdb80ef 100644 --- a/SPECS/golang/golang.signatures.json +++ b/SPECS/golang/golang.signatures.json @@ -3,7 +3,7 @@ "go.20230802.5.src.tar.gz": "56b9e0e0c3c13ca95d5efa6de4e7d49a9d190eca77919beff99d33cd3fa74e95", "go.20240206.2.src.tar.gz": "7982e0011aa9ab95fd0530404060410af4ba57326d26818690f334fdcb6451cd", "go1.22.12-20250211.4.src.tar.gz": "e1cc3bff8fdf1f24843ffc9f0eaddfd344eb40fd9ca0d9ba2965165be519eeb7", - "go1.25.5-20251202.2.src.tar.gz": "7632d7b5bee467330560019413aa2c1c9f6464ff2ac5f88fbbe3fda2d8a0f542", + "go1.25.6-20260116.7.src.tar.gz": "31e05b6f41bd327a9c9503a5b4fd3039d3a60923575d4942cf31eedbaf258db0", "go1.4-bootstrap-20171003.tar.gz": "f4ff5b5eb3a3cae1c993723f3eab519c5bae18866b5e5f96fe1102f0cb5c3e52" } } diff --git a/SPECS/golang/golang.spec b/SPECS/golang/golang.spec index 713603138c8..4fd6f9bdb83 100644 --- a/SPECS/golang/golang.spec +++ b/SPECS/golang/golang.spec @@ -1,6 +1,6 @@ %global goroot %{_libdir}/golang %global gopath %{_datadir}/gocode -%global ms_go_filename go1.25.5-20251202.2.src.tar.gz +%global ms_go_filename go1.25.6-20260116.7.src.tar.gz %global ms_go_revision 1 %ifarch aarch64 %global gohostarch arm64 @@ -14,7 +14,7 @@ %define __find_requires %{nil} Summary: Go Name: golang -Version: 1.25.5 +Version: 1.25.6 Release: 1%{?dist} License: BSD-3-Clause Vendor: Microsoft Corporation @@ -160,6 +160,9 @@ fi %{_bindir}/* %changelog +* Tue Jan 20 2026 bot-for-go[bot] <199222863+bot-for-go[bot]@users.noreply.github.com> - 1.25.6-1 +- Bump version to 1.25.6-1 + * Tue Dec 02 2025 bot-for-go[bot] <199222863+bot-for-go[bot]@users.noreply.github.com> - 1.25.5-1 - Bump version to 1.25.5-1 diff --git a/SPECS/grub2/CVE-2025-61661.patch b/SPECS/grub2/CVE-2025-61661.patch new file mode 100644 index 00000000000..355cf91cfb0 --- /dev/null +++ b/SPECS/grub2/CVE-2025-61661.patch @@ -0,0 +1,35 @@ +From 549a9cc372fd0b96a4ccdfad0e12140476cc62a3 Mon Sep 17 00:00:00 2001 +From: Jamie +Date: Mon, 14 Jul 2025 09:52:59 +0100 +Subject: [PATCH] commands/usbtest: Use correct string length field + +An incorrect length field is used for buffer allocation. This leads to +grub_utf16_to_utf8() receiving an incorrect/different length and possibly +causing OOB write. This makes sure to use the correct length. + +Fixes: CVE-2025-61661 + +Reported-by: Jamie +Signed-off-by: Jamie +Reviewed-by: Daniel Kiper + +Upstream Patch Reference: https://gitweb.git.savannah.gnu.org/gitweb/?p=grub.git;a=patch;h=549a9cc372fd0b96a4ccdfad0e12140476cc62a3 +--- + grub-core/commands/usbtest.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/grub-core/commands/usbtest.c b/grub-core/commands/usbtest.c +index 2c6d93fe6..8ef187a9a 100644 +--- a/grub-core/commands/usbtest.c ++++ b/grub-core/commands/usbtest.c +@@ -99,7 +99,7 @@ grub_usb_get_string (grub_usb_device_t dev, grub_uint8_t index, int langid, + return GRUB_USB_ERR_NONE; + } + +- *string = grub_malloc (descstr.length * 2 + 1); ++ *string = grub_malloc (descstrp->length * 2 + 1); + if (! *string) + { + grub_free (descstrp); +-- +2.39.5 diff --git a/SPECS/grub2/CVE-2025-61662.patch b/SPECS/grub2/CVE-2025-61662.patch new file mode 100644 index 00000000000..e5a1fe21dda --- /dev/null +++ b/SPECS/grub2/CVE-2025-61662.patch @@ -0,0 +1,67 @@ +From 8ed78fd9f0852ab218cc1f991c38e5a229e43807 Mon Sep 17 00:00:00 2001 +From: Alec Brown +Date: Thu, 21 Aug 2025 21:14:06 +0000 +Subject: [PATCH] gettext/gettext: Unregister gettext command on module unload + +When the gettext module is loaded, the gettext command is registered but +isn't unregistered when the module is unloaded. We need to add a call to +grub_unregister_command() when unloading the module. + +Fixes: CVE-2025-61662 + +Reported-by: Alec Brown +Signed-off-by: Alec Brown +Reviewed-by: Daniel Kiper + +Upstream Patch Reference: https://gitweb.git.savannah.gnu.org/gitweb/?p=grub.git;a=patch;h=8ed78fd9f0852ab218cc1f991c38e5a229e43807 +--- + grub-core/gettext/gettext.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +diff --git a/grub-core/gettext/gettext.c b/grub-core/gettext/gettext.c +index 4d02e62..47509c3 100644 +--- a/grub-core/gettext/gettext.c ++++ b/grub-core/gettext/gettext.c +@@ -497,6 +497,8 @@ grub_cmd_translate (grub_command_t cmd __attribute__ ((unused)), + return 0; + } + ++static grub_command_t cmd; ++ + GRUB_MOD_INIT (gettext) + { + const char *lang; +@@ -516,13 +518,14 @@ GRUB_MOD_INIT (gettext) + grub_register_variable_hook ("locale_dir", NULL, read_main); + grub_register_variable_hook ("secondary_locale_dir", NULL, read_secondary); + +- grub_register_command_p1 ("gettext", grub_cmd_translate, +- N_("STRING"), +- /* TRANSLATORS: It refers to passing the string through gettext. +- So it's "translate" in the same meaning as in what you're +- doing now. +- */ +- N_("Translates the string with the current settings.")); ++ cmd = grub_register_command_p1 ("gettext", grub_cmd_translate, ++ N_("STRING"), ++ /* ++ * TRANSLATORS: It refers to passing the string through gettext. ++ * So it's "translate" in the same meaning as in what you're ++ * doing now. ++ */ ++ N_("Translates the string with the current settings.")); + + /* Reload .mo file information if lang changes. */ + grub_register_variable_hook ("lang", NULL, grub_gettext_env_write_lang); +@@ -535,6 +538,8 @@ GRUB_MOD_INIT (gettext) + + GRUB_MOD_FINI (gettext) + { ++ grub_unregister_command (cmd); ++ + grub_gettext_delete_list (&main_context); + grub_gettext_delete_list (&secondary_context); + +-- +2.43.0 + diff --git a/SPECS/grub2/CVE-2025-61663.patch b/SPECS/grub2/CVE-2025-61663.patch new file mode 100644 index 00000000000..7d265f1ecff --- /dev/null +++ b/SPECS/grub2/CVE-2025-61663.patch @@ -0,0 +1,60 @@ +From 05d3698b8b03eccc49e53491bbd75dba15f40917 Mon Sep 17 00:00:00 2001 +From: Alec Brown +Date: Thu, 21 Aug 2025 21:14:07 +0000 +Subject: [PATCH] normal/main: Unregister commands on module unload + +When the normal module is loaded, the normal and normal_exit commands +are registered but aren't unregistered when the module is unloaded. We +need to add calls to grub_unregister_command() when unloading the module +for these commands. + +Fixes: CVE-2025-61663 +Fixes: CVE-2025-61664 + +Reported-by: Alec Brown +Signed-off-by: Alec Brown +Reviewed-by: Daniel Kiper + +Upstream Patch Reference: https://gitweb.git.savannah.gnu.org/gitweb/?p=grub.git;a=patch;h=05d3698b8b03eccc49e53491bbd75dba15f40917 +--- + grub-core/normal/main.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/grub-core/normal/main.c b/grub-core/normal/main.c +index c4ebe9e..65fa95b 100644 +--- a/grub-core/normal/main.c ++++ b/grub-core/normal/main.c +@@ -499,7 +499,7 @@ grub_mini_cmd_clear (struct grub_command *cmd __attribute__ ((unused)), + return 0; + } + +-static grub_command_t cmd_clear; ++static grub_command_t cmd_clear, cmd_normal, cmd_normal_exit; + + static void (*grub_xputs_saved) (const char *str); + static const char *features[] = { +@@ -541,10 +541,10 @@ GRUB_MOD_INIT(normal) + grub_env_export ("pager"); + + /* Register a command "normal" for the rescue mode. */ +- grub_register_command ("normal", grub_cmd_normal, +- 0, N_("Enter normal mode.")); +- grub_register_command ("normal_exit", grub_cmd_normal_exit, +- 0, N_("Exit from normal mode.")); ++ cmd_normal = grub_register_command ("normal", grub_cmd_normal, ++ 0, N_("Enter normal mode.")); ++ cmd_normal_exit = grub_register_command ("normal_exit", grub_cmd_normal_exit, ++ 0, N_("Exit from normal mode.")); + + /* Reload terminal colors when these variables are written to. */ + grub_register_variable_hook ("color_normal", NULL, grub_env_write_color_normal); +@@ -584,4 +584,6 @@ GRUB_MOD_FINI(normal) + grub_register_variable_hook ("pager", 0, 0); + grub_fs_autoload_hook = 0; + grub_unregister_command (cmd_clear); ++ grub_unregister_command (cmd_normal); ++ grub_unregister_command (cmd_normal_exit); + } +-- +2.43.0 + diff --git a/SPECS/grub2/grub2.spec b/SPECS/grub2/grub2.spec index 97fcdc4cdc8..6b32d4e0e50 100644 --- a/SPECS/grub2/grub2.spec +++ b/SPECS/grub2/grub2.spec @@ -7,7 +7,7 @@ Summary: GRand Unified Bootloader Name: grub2 Version: 2.06 -Release: 25%{?dist} +Release: 26%{?dist} License: GPLv3+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -127,6 +127,9 @@ Patch: CVE-2025-0690.patch Patch: CVE-2024-45778.patch Patch: CVE-2025-0678.patch Patch: CVE-2024-45780.patch +Patch: CVE-2025-61661.patch +Patch: CVE-2025-61662.patch +Patch: CVE-2025-61663.patch # Following are included as part of above CVEs and kept here as nopatch # and commented out, because from patch command perspective, these files # have garbage content. @@ -464,6 +467,9 @@ cp $GRUB_PXE_MODULE_SOURCE $EFI_BOOT_DIR/$GRUB_PXE_MODULE_NAME %config(noreplace) %{_sysconfdir}/grub.d/41_custom %changelog +* Mon Nov 24 2025 Akhila Guruju - 2.06-26 +- Patch CVE-2025-61661, CVE-2025-61662 & CVE-2025-61663 + * Tue Jun 17 2025 Kshitiz Godara - 2.06-25 - Addressed following grub2 CVEs - CVE-2025-0684 diff --git a/SPECS/harfbuzz/CVE-2026-22693.patch b/SPECS/harfbuzz/CVE-2026-22693.patch new file mode 100644 index 00000000000..2a20c19bcd1 --- /dev/null +++ b/SPECS/harfbuzz/CVE-2026-22693.patch @@ -0,0 +1,31 @@ +From bb423ce7d5d1a7b8c111dc52a08020df0dc5bf67 Mon Sep 17 00:00:00 2001 +From: Behdad Esfahbod +Date: Fri, 9 Jan 2026 04:54:42 -0700 +Subject: [PATCH] malloc fail test (#5710) + +Fixes https://github.com/harfbuzz/harfbuzz/security/advisories/GHSA-xvjr-f2r9-c7ww + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/harfbuzz/harfbuzz/commit/1265ff8d990284f04d8768f35b0e20ae5f60daae.patch +--- + src/hb-ot-cmap-table.hh | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/src/hb-ot-cmap-table.hh b/src/hb-ot-cmap-table.hh +index e2e2581..2f7d727 100644 +--- a/src/hb-ot-cmap-table.hh ++++ b/src/hb-ot-cmap-table.hh +@@ -1534,6 +1534,10 @@ struct SubtableUnicodesCache { + { + SubtableUnicodesCache* cache = + (SubtableUnicodesCache*) hb_malloc (sizeof(SubtableUnicodesCache)); ++ ++ if (unlikely (!cache)) ++ return nullptr; ++ + new (cache) SubtableUnicodesCache (source_table); + return cache; + } +-- +2.45.4 + diff --git a/SPECS/harfbuzz/harfbuzz.spec b/SPECS/harfbuzz/harfbuzz.spec index 93362b045ec..01b68255a0c 100644 --- a/SPECS/harfbuzz/harfbuzz.spec +++ b/SPECS/harfbuzz/harfbuzz.spec @@ -1,7 +1,7 @@ Summary: opentype text shaping engine Name: harfbuzz Version: 8.3.0 -Release: 3%{?dist} +Release: 4%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -9,6 +9,7 @@ Group: System Environment/Libraries URL: https://harfbuzz.github.io/ Source0: https://github.com/%{name}/%{name}/releases/download/%{version}/%{name}-%{version}.tar.xz Patch0: CVE-2024-56732.patch +Patch1: CVE-2026-22693.patch BuildRequires: pkgconfig(cairo) BuildRequires: pkgconfig(freetype2) BuildRequires: pkgconfig(glib-2.0) @@ -91,6 +92,9 @@ find . -type f -name "*.py" -exec sed -i'' -e '1 s|^#!\s*/usr/bin/env\s\+python3 %{_libdir}/libharfbuzz-icu.so.* %changelog +* Mon Jan 12 2026 Azure Linux Security Servicing Account - 8.3.0-4 +- Patch for CVE-2026-22693 + * Wed Jan 08 2025 Sudipta Pandit - 8.3.0-3 - Patch for CVE-2024-56732 diff --git a/SPECS/hdf5/CVE-2025-2153.patch b/SPECS/hdf5/CVE-2025-2153.patch new file mode 100644 index 00000000000..5e0a5a0261e --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2153.patch @@ -0,0 +1,43 @@ +From 4be883f34d8906bd907dcf0ddb17d47dad5357d3 Mon Sep 17 00:00:00 2001 +From: Glenn Song +Date: Mon, 8 Sep 2025 17:06:52 -0500 +Subject: [PATCH 01/14] Add release text + +Upstream patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5795.patch +--- + src/H5Ocache.c | 4 ++-- + src/H5Omessage.c | 3 +++ + 2 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/src/H5Ocache.c b/src/H5Ocache.c +index 87f321c..12c30cf 100644 +--- a/src/H5Ocache.c ++++ b/src/H5Ocache.c +@@ -1399,8 +1399,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t + else { + /* Check for message of unshareable class marked as "shareable" + */ +- if ((flags & H5O_MSG_FLAG_SHAREABLE) && H5O_msg_class_g[id] && +- !(H5O_msg_class_g[id]->share_flags & H5O_SHARE_IS_SHARABLE)) ++ if (((flags & H5O_MSG_FLAG_SHARED) || (flags & H5O_MSG_FLAG_SHAREABLE)) && ++ H5O_msg_class_g[id] && !(H5O_msg_class_g[id]->share_flags & H5O_SHARE_IS_SHARABLE)) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, + "message of unshareable class flagged as shareable"); + +diff --git a/src/H5Omessage.c b/src/H5Omessage.c +index 7190e46..fb9006c 100644 +--- a/src/H5Omessage.c ++++ b/src/H5Omessage.c +@@ -354,6 +354,9 @@ H5O__msg_write_real(H5F_t *f, H5O_t *oh, const H5O_msg_class_t *type, unsigned m + */ + assert(!(mesg_flags & H5O_MSG_FLAG_DONTSHARE)); + ++ /* Sanity check to see if the type is not sharable */ ++ assert(type->share_flags & H5O_SHARE_IS_SHARABLE); ++ + /* Remove the old message from the SOHM index */ + /* (It would be more efficient to try to share the message first, then + * delete it (avoiding thrashing the index in the case the ref. +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-2310.patch b/SPECS/hdf5/CVE-2025-2310.patch new file mode 100644 index 00000000000..9a834ac8239 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2310.patch @@ -0,0 +1,37 @@ +From 2af87ef880bf562f1607aa7b6559e5c596cc0233 Mon Sep 17 00:00:00 2001 +From: Matthew Larson +Date: Wed, 24 Sep 2025 15:26:20 -0500 +Subject: [PATCH 1/4] Add null-termination check during attr decode + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5872.patch +--- + src/H5Oattr.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/hdf5-1.14.6/src/H5Oattr.c b/hdf5-1.14.6/src/H5Oattr.c +index 6d1d237..7bdaef7 100644 +--- a/src/H5Oattr.c ++++ b/src/H5Oattr.c +@@ -167,6 +167,11 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, name_len); /* Including null */ ++ ++ /* Verify that retrieved name length (including null byte) is valid */ ++ if (name_len <= 1) ++ HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "decoded name length is invalid"); ++ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, attr->shared->dt_size); +@@ -190,6 +195,7 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u + */ + if (H5_IS_BUFFER_OVERFLOW(p, name_len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); ++ + if (NULL == (attr->shared->name = H5MM_strndup((const char *)p, name_len - 1))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-2914.patch b/SPECS/hdf5/CVE-2025-2914.patch new file mode 100644 index 00000000000..2c978d35fe5 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2914.patch @@ -0,0 +1,47 @@ +From 54f404b5ad8e63d99e3283646b543b2842a22fd3 Mon Sep 17 00:00:00 2001 +From: Binh-Minh +Date: Tue, 12 Aug 2025 20:06:42 -0400 +Subject: [PATCH] Refix of the attempts in PR-5209 + +This PR addresses the root cause of the issue by adding a sanity-check immediately +after reading the file space page size from the file. + +The same fuzzer in GH-5376 was used to verify that the assert before the vulnerability +had occurred and that an error indicating a corrupted file space page size replaced it. + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5722.patch +--- + src/H5Fsuper.c | 2 ++ + src/H5Ofsinfo.c | 3 +++ + 2 files changed, 5 insertions(+) + +diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c +index d9fe3a7..1c8dc6c 100644 +--- a/src/H5Fsuper.c ++++ b/src/H5Fsuper.c +@@ -746,6 +746,8 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) + if (!(flags & H5O_MSG_FLAG_WAS_UNKNOWN)) { + H5O_fsinfo_t fsinfo; /* File space info message from superblock extension */ + ++ memset(&fsinfo, 0, sizeof(H5O_fsinfo_t)); ++ + /* f->shared->null_fsm_addr: Whether to drop free-space to the floor */ + /* The h5clear tool uses this property to tell the library + * to drop free-space to the floor +diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c +index 5b69235..2bb6ea6 100644 +--- a/src/H5Ofsinfo.c ++++ b/src/H5Ofsinfo.c +@@ -182,6 +182,9 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, fsinfo->page_size); /* File space page size */ ++ /* Basic sanity check */ ++ if (fsinfo->page_size == 0 || fsinfo->page_size > H5F_FILE_SPACE_PAGE_SIZE_MAX) ++ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid page size in file space info"); + + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-2924.patch b/SPECS/hdf5/CVE-2025-2924.patch new file mode 100644 index 00000000000..0916a4f3b4f --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2924.patch @@ -0,0 +1,36 @@ +From 422035e1c0a30f3b363a3994e62ac46f92db9b75 Mon Sep 17 00:00:00 2001 +From: Glenn Song +Date: Thu, 11 Sep 2025 16:24:33 -0500 +Subject: [PATCH 1/4] Add to sanity check + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5814.patch +--- + src/H5HLcache.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/src/H5HLcache.c b/src/H5HLcache.c +index d0836fe..7f412d2 100644 +--- a/src/H5HLcache.c ++++ b/src/H5HLcache.c +@@ -225,6 +225,7 @@ H5HL__fl_deserialize(H5HL_t *heap) + /* check arguments */ + assert(heap); + assert(!heap->freelist); ++ HDcompile_assert(sizeof(hsize_t) == sizeof(uint64_t)); + + /* Build free list */ + free_block = heap->free_block; +@@ -232,6 +233,10 @@ H5HL__fl_deserialize(H5HL_t *heap) + const uint8_t *image; /* Pointer into image buffer */ + + /* Sanity check */ ++ ++ if (free_block > UINT64_MAX - (2 * heap->sizeof_size)) ++ HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "decoded heap block address overflow"); ++ + if ((free_block + (2 * heap->sizeof_size)) > heap->dblk_size) + HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "bad heap free list"); + +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-2925.patch b/SPECS/hdf5/CVE-2025-2925.patch new file mode 100644 index 00000000000..964805e8838 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2925.patch @@ -0,0 +1,43 @@ +From c731305ad3717924a9f48d4e4929956e80ce2cb3 Mon Sep 17 00:00:00 2001 +From: Glenn Song +Date: Thu, 21 Aug 2025 11:36:23 -0500 +Subject: [PATCH 01/10] Fix issue5383 + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5739.patch +--- + src/H5Centry.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/src/H5Centry.c b/src/H5Centry.c +index 1ca7479..aedcad8 100644 +--- a/src/H5Centry.c ++++ b/src/H5Centry.c +@@ -1051,9 +1051,14 @@ H5C__load_entry(H5F_t *f, + */ + do { + if (actual_len != len) { ++ /* Verify that the length isn't a bad value */ ++ if (len == 0) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "len is a bad value"); ++ + if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()"); + image = (uint8_t *)new_image; ++ + #if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); + #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ +@@ -1104,6 +1109,10 @@ H5C__load_entry(H5F_t *f, + if (H5C__verify_len_eoa(f, type, addr, &actual_len, true) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA"); + ++ /* Verify that the length isn't 0 */ ++ if (actual_len == 0) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len is a bad value"); ++ + /* Expand buffer to new size */ + if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()"); +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-2926.patch b/SPECS/hdf5/CVE-2025-2926.patch new file mode 100644 index 00000000000..23ccd33c56a --- /dev/null +++ b/SPECS/hdf5/CVE-2025-2926.patch @@ -0,0 +1,31 @@ +From b36c123a68f9f67f5a6de07fcd9caaf8586289c8 Mon Sep 17 00:00:00 2001 +From: Binh-Minh +Date: Tue, 16 Sep 2025 11:57:03 -0400 +Subject: [PATCH 1/7] Fix CVE-2025-2926, CVE-2025-2913 + +An image size was corrupted and decoded as 0 resulting in a NULL image buffer, +which caused a NULL pointer dereference when the image being copied to the buffer. +The invalid image size was caught in the PR #5710. This change catches right +before the copying. + +Fixes GH issue #5384 +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5841.patch +--- + src/H5Ocache.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/H5Ocache.c b/src/H5Ocache.c +index 12c30cf..4337d6e 100644 +--- a/src/H5Ocache.c ++++ b/src/H5Ocache.c +@@ -602,6 +602,7 @@ H5O__cache_chk_get_initial_load_size(void *_udata, size_t *image_len) + assert(udata); + assert(udata->oh); + assert(image_len); ++ assert(udata->size); + + /* Set the image length size */ + *image_len = udata->size; +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-44905.patch b/SPECS/hdf5/CVE-2025-44905.patch new file mode 100644 index 00000000000..f2fbc357d18 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-44905.patch @@ -0,0 +1,38 @@ +From 28ab45329218d9e41bd77929fd3e9cd8a80bd3c7 Mon Sep 17 00:00:00 2001 +From: Christian Wojek +Date: Sat, 11 Oct 2025 12:43:06 +0200 +Subject: [PATCH 1/5] Fixing CVE-2025-44905. A malformed HDF5 can cause reading + beyond a heap allocation. + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5915.patch +--- + src/H5Zscaleoffset.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c +index 048344b..fbf12d6 100644 +--- a/src/H5Zscaleoffset.c ++++ b/src/H5Zscaleoffset.c +@@ -1205,6 +1205,9 @@ H5Z__filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_valu + unsigned minval_size = 0; + + minbits = 0; ++ if (H5_IS_BUFFER_OVERFLOW((unsigned char *)*buf, 5, (unsigned char *)*buf + *buf_size - 1)) ++ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "buffer too short"); ++ + for (i = 0; i < 4; i++) { + minbits_mask = ((unsigned char *)*buf)[i]; + minbits_mask <<= i * 8; +@@ -1220,6 +1223,9 @@ H5Z__filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_valu + minval_size = sizeof(unsigned long long) <= ((unsigned char *)*buf)[4] ? sizeof(unsigned long long) + : ((unsigned char *)*buf)[4]; + minval = 0; ++ if (H5_IS_BUFFER_OVERFLOW((unsigned char *)*buf, 5 + minval_size, ++ (unsigned char *)*buf + *buf_size - 1)) ++ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "buffer too short"); + for (i = 0; i < minval_size; i++) { + minval_mask = ((unsigned char *)*buf)[5 + i]; + minval_mask <<= i * 8; +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-6269.patch b/SPECS/hdf5/CVE-2025-6269.patch new file mode 100644 index 00000000000..325faedcf65 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-6269.patch @@ -0,0 +1,279 @@ +From 2597b336272fa2d8e277826cd5a96507bad54cd6 Mon Sep 17 00:00:00 2001 +From: Binh-Minh +Date: Mon, 1 Sep 2025 03:23:38 -0400 +Subject: [PATCH 1/9] Fix security issue CVE-2025-6269 + +The GitHub issue #5579 included several security vulnerabilities in function +H5C__reconstruct_cache_entry(). + +This PR addressed them by: +- adding buffer size argument to the function +- adding buffer overflow checks +- adding input validations +- releasing allocated resource on failure + +These changes addressed the crashes reported. However, there is a skiplist +crash during the unwinding process that has to be investigated. + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5850.patch +--- + src/H5Cimage.c | 90 ++++++++++++++++++++++++++++++++++++++------------ + src/H5Ocont.c | 5 +-- + 2 files changed, 71 insertions(+), 24 deletions(-) + +diff --git a/src/H5Cimage.c b/src/H5Cimage.c +index ec1af78..cca65fd 100644 +--- a/src/H5Cimage.c ++++ b/src/H5Cimage.c +@@ -118,7 +118,7 @@ do { \ + /* Helper routines */ + static size_t H5C__cache_image_block_entry_header_size(const H5F_t *f); + static size_t H5C__cache_image_block_header_size(const H5F_t *f); +-static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf); ++static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, size_t buf_size); + #ifndef NDEBUG /* only used in assertions */ + static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, + unsigned entry_num); +@@ -131,7 +131,8 @@ static void H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_ + static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr); + static herr_t H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr); + static herr_t H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr); +-static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf); ++static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, ++ const uint8_t **buf); + static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, bool create); + static herr_t H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr); + static herr_t H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr); +@@ -299,7 +300,7 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) + /* needed for sanity checks */ + fake_cache_ptr->image_len = cache_ptr->image_len; + q = (const uint8_t *)cache_ptr->image_buffer; +- status = H5C__decode_cache_image_header(f, fake_cache_ptr, &q); ++ status = H5C__decode_cache_image_header(f, fake_cache_ptr, &q, cache_ptr->image_len + 1); + assert(status >= 0); + + assert(NULL != p); +@@ -1269,7 +1270,7 @@ H5C__cache_image_block_header_size(const H5F_t *f) + *------------------------------------------------------------------------- + */ + static herr_t +-H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf) ++H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf, size_t buf_size) + { + uint8_t version; + uint8_t flags; +@@ -2372,6 +2373,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) + { + H5C_cache_entry_t *pf_entry_ptr; /* Pointer to prefetched entry */ + H5C_cache_entry_t *parent_ptr; /* Pointer to parent of prefetched entry */ ++ hsize_t image_len; /* Image length */ + const uint8_t *p; /* Pointer into image buffer */ + unsigned u, v; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ +@@ -2387,10 +2389,11 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) + assert(cache_ptr->image_len > 0); + + /* Decode metadata cache image header */ +- p = (uint8_t *)cache_ptr->image_buffer; +- if (H5C__decode_cache_image_header(f, cache_ptr, &p) < 0) ++ p = (uint8_t *)cache_ptr->image_buffer; ++ image_len = cache_ptr->image_len; ++ if (H5C__decode_cache_image_header(f, cache_ptr, &p, image_len + 1) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "cache image header decode failed"); +- assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_len); ++ assert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < image_len); + + /* The image_data_len and # of entries should be defined now */ + assert(cache_ptr->image_data_len > 0); +@@ -2402,7 +2405,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) + /* Create the prefetched entry described by the ith + * entry in cache_ptr->image_entrise. + */ +- if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &p))) ++ if (NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &image_len, &p))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed"); + + /* Note that we make no checks on available cache space before +@@ -2558,20 +2561,21 @@ done: + *------------------------------------------------------------------------- + */ + static H5C_cache_entry_t * +-H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **buf) ++H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, hsize_t *buf_size, const uint8_t **buf) + { + H5C_cache_entry_t *pf_entry_ptr = NULL; /* Reconstructed cache entry */ + uint8_t flags = 0; + bool is_dirty = false; ++ haddr_t eoa; ++ bool is_fd_parent = false; + #ifndef NDEBUG /* only used in assertions */ +- bool in_lru = false; +- bool is_fd_parent = false; +- bool is_fd_child = false; ++ bool in_lru = false; ++ bool is_fd_child = false; + #endif +- const uint8_t *p; + bool file_is_rw; +- H5C_cache_entry_t *ret_value = NULL; /* Return value */ +- ++ const uint8_t *p; ++ const uint8_t *p_end = *buf + *buf_size - 1; /* Pointer to last valid byte in buffer */ ++ H5C_cache_entry_t *ret_value = NULL; /* Return value */ + FUNC_ENTER_PACKAGE + + /* Sanity checks */ +@@ -2590,9 +2594,15 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + p = *buf; + + /* Decode type id */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + pf_entry_ptr->prefetch_type_id = *p++; ++ if (pf_entry_ptr->prefetch_type_id < H5AC_BT_ID || pf_entry_ptr->prefetch_type_id >= H5AC_NTYPES) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "type id is out of valid range"); + + /* Decode flags */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + flags = *p++; + if (flags & H5C__MDCI_ENTRY_DIRTY_FLAG) + is_dirty = true; +@@ -2620,19 +2630,31 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + pf_entry_ptr->is_dirty = (is_dirty && file_is_rw); + + /* Decode ring */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + pf_entry_ptr->ring = *p++; +- assert(pf_entry_ptr->ring > (uint8_t)(H5C_RING_UNDEFINED)); +- assert(pf_entry_ptr->ring < (uint8_t)(H5C_RING_NTYPES)); ++ if (pf_entry_ptr->ring >= (uint8_t)(H5C_RING_NTYPES)) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "ring is out of valid range"); + + /* Decode age */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + pf_entry_ptr->age = *p++; ++ if (pf_entry_ptr->age > H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "entry age is out of policy range"); + + /* Decode dependency child count */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, pf_entry_ptr->fd_child_count); +- assert((is_fd_parent && pf_entry_ptr->fd_child_count > 0) || +- (!is_fd_parent && pf_entry_ptr->fd_child_count == 0)); ++ if (is_fd_parent && pf_entry_ptr->fd_child_count <= 0) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "parent entry has no children"); ++ //else if (!is_fd_parent && pf_entry_ptr->fd_child_count != 0) ++ // HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "non-parent entry has children"); + + /* Decode dirty dependency child count */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count); + if (!file_is_rw) + pf_entry_ptr->fd_dirty_child_count = 0; +@@ -2640,20 +2662,32 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count"); + + /* Decode dependency parent count */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, pf_entry_ptr->fd_parent_count); + assert((is_fd_child && pf_entry_ptr->fd_parent_count > 0) || + (!is_fd_child && pf_entry_ptr->fd_parent_count == 0)); + + /* Decode index in LRU */ ++ if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + INT32DECODE(p, pf_entry_ptr->lru_rank); + assert((in_lru && pf_entry_ptr->lru_rank >= 0) || (!in_lru && pf_entry_ptr->lru_rank == -1)); + + /* Decode entry offset */ ++ if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_addr_decode(f, &p, &pf_entry_ptr->addr); +- if (!H5_addr_defined(pf_entry_ptr->addr)) +- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry offset"); ++ ++ /* Validate address range */ ++ eoa = H5F_get_eoa(f, H5FD_MEM_DEFAULT); ++ if (!H5_addr_defined(pf_entry_ptr->addr) || H5_addr_overflow(pf_entry_ptr->addr, pf_entry_ptr->size) || ++ H5_addr_ge(pf_entry_ptr->addr + pf_entry_ptr->size, eoa)) ++ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry address range"); + + /* Decode entry length */ ++ if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f), p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, pf_entry_ptr->size); + if (pf_entry_ptr->size == 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry size"); +@@ -2674,6 +2708,9 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + "memory allocation failed for fd parent addrs buffer"); + + for (u = 0; u < pf_entry_ptr->fd_parent_count; u++) { ++ ++ if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u])); + if (!H5_addr_defined(pf_entry_ptr->fd_parent_addrs[u])) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid flush dependency parent offset"); +@@ -2689,6 +2726,8 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + /* Copy the entry image from the cache image block */ ++ if (H5_IS_BUFFER_OVERFLOW(p, pf_entry_ptr->size, p_end)) ++ HGOTO_ERROR(H5E_CACHE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5MM_memcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size); + p += pf_entry_ptr->size; + +@@ -2704,13 +2743,20 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b + assert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE); + + /* Update buffer pointer */ ++ /* Update buffer pointer and buffer len */ ++ *buf_size -= (hsize_t)(p - *buf); + *buf = p; + + ret_value = pf_entry_ptr; + + done: +- if (NULL == ret_value && pf_entry_ptr) ++ if (NULL == ret_value && pf_entry_ptr) { ++ if (pf_entry_ptr->image_ptr) ++ H5MM_xfree(pf_entry_ptr->image_ptr); ++ if (pf_entry_ptr->fd_parent_count > 0 && pf_entry_ptr->fd_parent_addrs) ++ H5MM_xfree(pf_entry_ptr->fd_parent_addrs); + pf_entry_ptr = H5FL_FREE(H5C_cache_entry_t, pf_entry_ptr); ++ } + + FUNC_LEAVE_NOAPI(ret_value) + } /* H5C__reconstruct_cache_entry() */ +diff --git a/src/H5Ocont.c b/src/H5Ocont.c +index 621095a..180b115 100644 +--- a/src/H5Ocont.c ++++ b/src/H5Ocont.c +@@ -93,6 +93,9 @@ H5O__cont_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "memory allocation failed"); + + /* Decode */ ++ ++ cont->chunkno = 0; ++ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_addr_decode(f, &p, &(cont->addr)); +@@ -101,8 +104,6 @@ H5O__cont_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, cont->size); + +- cont->chunkno = 0; +- + /* Set return value */ + ret_value = cont; + +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-6750.patch b/SPECS/hdf5/CVE-2025-6750.patch new file mode 100644 index 00000000000..1eeb0286164 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-6750.patch @@ -0,0 +1,84 @@ +From aaaf32bb035f8b1ad8f634d799688587590d8544 Mon Sep 17 00:00:00 2001 +From: Binh-Minh +Date: Sun, 21 Sep 2025 22:29:27 -0400 +Subject: [PATCH 1/4] Fixes CVE-2025-6750 + +A heap buffer overflow occurred because an mtime message was not properly decoded, resulting in a buffer of size 0 being passed into the encoder. + +This PR added decoding for both old and new mtime messages which will allow invalid message size to be detected. + +Fixes #5549 +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5856.patch +--- + src/H5Ocache.c | 41 +++++++++++++++++++++++++++++++++++------ + 1 file changed, 35 insertions(+), 6 deletions(-) + +diff --git a/src/H5Ocache.c b/src/H5Ocache.c +index 4337d6e..d9a138f 100644 +--- a/src/H5Ocache.c ++++ b/src/H5Ocache.c +@@ -1266,6 +1266,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t + if (mesg_size != H5O_ALIGN_OH(oh, mesg_size)) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "message not aligned"); + ++ if (H5_IS_BUFFER_OVERFLOW(chunk_image, mesg_size, p_end)) ++ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "message size exceeds buffer end"); ++ + /* Message flags */ + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); +@@ -1298,12 +1301,6 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t + } + } + +- /* Try to detect invalidly formatted object header message that +- * extends past end of chunk. +- */ +- if (chunk_image + mesg_size > eom_ptr) +- HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "corrupt object header"); +- + /* Increment count of null messages */ + if (H5O_NULL_ID == id) + nullcnt++; +@@ -1450,6 +1447,38 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't decode refcount"); + oh->nlink = *refcount; + } ++ /* Check if message is an old mtime message */ ++ else if (H5O_MTIME_ID == id) { ++ time_t *mtime = NULL; ++ ++ /* Decode mtime message */ ++ mtime = ++ (time_t *)(H5O_MSG_MTIME->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, mesg->raw); ++ ++ /* Save the decoded old format mtime */ ++ if (!mtime) ++ HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't decode old format mtime"); ++ ++ /* Save 'native' form of mtime message and its value */ ++ mesg->native = mtime; ++ oh->ctime = *mtime; ++ } ++ /* Check if message is an new mtime message */ ++ else if (H5O_MTIME_NEW_ID == id) { ++ time_t *mtime = NULL; ++ ++ /* Decode mtime message */ ++ mtime = (time_t *)(H5O_MSG_MTIME_NEW->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, ++ mesg->raw); ++ ++ /* Save the decoded new format mtime */ ++ if (!mtime) ++ HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't decode new format mtime"); ++ ++ /* Save 'native' form of mtime message and its value */ ++ mesg->native = mtime; ++ oh->ctime = *mtime; ++ } + /* Check if message is a link message */ + else if (H5O_LINK_ID == id) { + /* Increment the count of link messages */ +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-6816.patch b/SPECS/hdf5/CVE-2025-6816.patch new file mode 100644 index 00000000000..80d5838000e --- /dev/null +++ b/SPECS/hdf5/CVE-2025-6816.patch @@ -0,0 +1,65 @@ +From b6d4a76c7a9309eba6e70fde0e1ecf0dd09d3d23 Mon Sep 17 00:00:00 2001 +From: Jordan Henderson +Date: Mon, 15 Sep 2025 12:26:10 -0500 +Subject: [PATCH] Fix issue with handling of corrupted object header + continuation messages + +An HDF5 file could be specifically constructed such that an object +header contained a corrupted continuation message which pointed +back to itself. This eventually resulted in an internal buffer being +allocated with too small of a size, leading to a heap buffer overflow +when encoding an object header message into it. This has been fixed +by checking the expected number of deserialized object header chunks +against the actual value as chunks are being deserialized. + +Fixes CVE-2025-6816, CVE-2025-6856, CVE-2025-2923, CVE-2025-6818 + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5829.patch +--- + src/H5Oint.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/src/H5Oint.c b/src/H5Oint.c +index 460ccb6..555e6a1 100644 +--- a/src/H5Oint.c ++++ b/src/H5Oint.c +@@ -1013,10 +1013,9 @@ H5O_protect(const H5O_loc_t *loc, unsigned prot_flags, bool pin_all_chunks) + */ + curr_msg = 0; + while (curr_msg < cont_msg_info.nmsgs) { +- H5O_chunk_proxy_t *chk_proxy; /* Proxy for chunk, to bring it into memory */ +-#ifndef NDEBUG +- size_t chkcnt = oh->nchunks; /* Count of chunks (for sanity checking) */ +-#endif /* NDEBUG */ ++ H5O_chunk_proxy_t *chk_proxy; /* Proxy for chunk, to bring it into memory */ ++ unsigned chunkno; /* Chunk number for chunk proxy */ ++ size_t chkcnt = oh->nchunks; /* Count of chunks (for sanity checking) */ + + /* Bring the chunk into the cache */ + /* (which adds to the object header) */ +@@ -1029,14 +1028,20 @@ H5O_protect(const H5O_loc_t *loc, unsigned prot_flags, bool pin_all_chunks) + + /* Sanity check */ + assert(chk_proxy->oh == oh); +- assert(chk_proxy->chunkno == chkcnt); +- assert(oh->nchunks == (chkcnt + 1)); ++ ++ chunkno = chk_proxy->chunkno; + + /* Release the chunk from the cache */ + if (H5AC_unprotect(loc->file, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, chk_proxy, + H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to release object header chunk"); + ++ if (chunkno != chkcnt) ++ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "incorrect chunk number for object header chunk"); ++ if (oh->nchunks != (chkcnt + 1)) ++ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, ++ "incorrect number of chunks after deserializing object header chunk"); ++ + /* Advance to next continuation message */ + curr_msg++; + } /* end while */ +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-6857.patch b/SPECS/hdf5/CVE-2025-6857.patch new file mode 100644 index 00000000000..80af89cdb6d --- /dev/null +++ b/SPECS/hdf5/CVE-2025-6857.patch @@ -0,0 +1,168 @@ +From 852a9f87849833074683038ea47ddf3cb5e10311 Mon Sep 17 00:00:00 2001 +From: Jordan Henderson +Date: Wed, 10 Sep 2025 16:41:49 -0500 +Subject: [PATCH] Fix CVE-2025-6857 + +Add additional checks for v1 B-tree corruption +Upstream Patch Reference :https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5799.patch +--- + src/H5B.c | 82 +++++++++++++++++++++++++++++++++++++++++++--------- + src/H5Bpkg.h | 6 ++++ + 2 files changed, 74 insertions(+), 14 deletions(-) + +diff --git a/src/H5B.c b/src/H5B.c +index 30e39ef..082ad98 100644 +--- a/src/H5B.c ++++ b/src/H5B.c +@@ -140,6 +140,8 @@ typedef struct H5B_ins_ud_t { + /********************/ + /* Local Prototypes */ + /********************/ ++static herr_t H5B_find_helper(H5F_t *f, const H5B_class_t *type, haddr_t addr, int exp_level, bool *found, ++ void *udata); + static H5B_ins_t H5B__insert_helper(H5F_t *f, H5B_ins_ud_t *bt_ud, const H5B_class_t *type, uint8_t *lt_key, + bool *lt_key_changed, uint8_t *md_key, void *udata, uint8_t *rt_key, + bool *rt_key_changed, H5B_ins_ud_t *split_bt_ud /*out*/); +@@ -252,26 +254,67 @@ done: + } /* end H5B_create() */ + + /*------------------------------------------------------------------------- +- * Function: H5B_find ++ * Function: H5B_find + * +- * Purpose: Locate the specified information in a B-tree and return +- * that information by filling in fields of the caller-supplied +- * UDATA pointer depending on the type of leaf node +- * requested. The UDATA can point to additional data passed +- * to the key comparison function. ++ * Purpose: Locate the specified information in a B-tree and return ++ * that information by filling in fields of the ++ * caller-supplied UDATA pointer depending on the type of leaf ++ * node requested. The UDATA can point to additional data ++ * passed to the key comparison function. + * +- * Note: This function does not follow the left/right sibling +- * pointers since it assumes that all nodes can be reached +- * from the parent node. ++ * Note: This function does not follow the left/right sibling ++ * pointers since it assumes that all nodes can be reached ++ * from the parent node. + * +- * Return: Non-negative (true/false) on success (if found, values returned +- * through the UDATA argument). Negative on failure (if not found, +- * UDATA is undefined). ++ * Return: Non-negative (true/false) on success (if found, values ++ * returned through the UDATA argument). Negative on failure ++ * (if not found, UDATA is undefined). + * + *------------------------------------------------------------------------- + */ + herr_t + H5B_find(H5F_t *f, const H5B_class_t *type, haddr_t addr, bool *found, void *udata) ++{ ++ herr_t ret_value = SUCCEED; ++ ++ FUNC_ENTER_NOAPI(FAIL) ++ ++ /* ++ * Check arguments. ++ */ ++ assert(f); ++ assert(type); ++ assert(type->decode); ++ assert(type->cmp3); ++ assert(type->found); ++ assert(H5_addr_defined(addr)); ++ ++ if ((ret_value = H5B_find_helper(f, type, addr, H5B_UNKNOWN_NODELEVEL, found, udata)) < 0) ++ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key"); ++ ++done: ++ FUNC_LEAVE_NOAPI(ret_value) ++} /* end H5B_find() */ ++ ++/*------------------------------------------------------------------------- ++ * Function: H5B_find_helper ++ * ++ * Purpose: Recursive helper routine for H5B_find used to track node ++ * levels and attempt to detect B-tree corruption during ++ * lookups. ++ * ++ * Note: This function does not follow the left/right sibling ++ * pointers since it assumes that all nodes can be reached ++ * from the parent node. ++ * ++ * Return: Non-negative on success (if found, values returned through ++ * the UDATA argument). Negative on failure (if not found, ++ * UDATA is undefined). ++ * ++ *------------------------------------------------------------------------- ++ */ ++static herr_t ++H5B_find_helper(H5F_t *f, const H5B_class_t *type, haddr_t addr, int exp_level, bool *found, void *udata) + { + H5B_t *bt = NULL; + H5UC_t *rc_shared; /* Ref-counted shared info */ +@@ -306,6 +349,7 @@ H5B_find(H5F_t *f, const H5B_class_t *type, haddr_t addr, bool *found, void *uda + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; ++ cache_udata.exp_level = exp_level; + if (NULL == (bt = (H5B_t *)H5AC_protect(f, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node"); + +@@ -329,7 +373,17 @@ H5B_find(H5F_t *f, const H5B_class_t *type, haddr_t addr, bool *found, void *uda + assert(idx < bt->nchildren); + + if (bt->level > 0) { +- if ((ret_value = H5B_find(f, type, bt->child[idx], found, udata)) < 0) ++ /* Sanity check to catch the case where the current node points to ++ * itself and the current node was loaded with an expected node level ++ * of H5B_UNKNOWN_NODELEVEL, thus bypassing the expected node level ++ * check during deserialization and in the future if the node was ++ * cached. ++ */ ++ if (bt->child[idx] == addr) ++ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "cyclic B-tree detected"); ++ ++ if ((ret_value = H5B_find_helper(f, type, bt->child[idx], (int)(bt->level - 1), found, udata)) < ++ 0) + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in subtree"); + } /* end if */ + else { +@@ -343,7 +397,7 @@ done: + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release node"); + + FUNC_LEAVE_NOAPI(ret_value) +-} /* end H5B_find() */ ++} /* end H5B_find_helper() */ + + /*------------------------------------------------------------------------- + * Function: H5B__split +diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h +index d1ad647..f75e857 100644 +--- a/src/H5Bpkg.h ++++ b/src/H5Bpkg.h +@@ -39,6 +39,11 @@ + /* # of bits for node level: 1 byte */ + #define LEVEL_BITS 8 + ++/* Indicates that the level of the current node is unknown. When the level ++ * is known, it can be used to detect corrupted level during decoding ++ */ ++#define H5B_UNKNOWN_NODELEVEL -1 ++ + /****************************/ + /* Package Private Typedefs */ + /****************************/ +@@ -60,6 +65,7 @@ typedef struct H5B_t { + typedef struct H5B_cache_ud_t { + H5F_t *f; /* File that B-tree node is within */ + const struct H5B_class_t *type; /* Type of tree */ ++ int exp_level; /* Expected level of the current node */ + H5UC_t *rc_shared; /* Ref-counted shared info */ + } H5B_cache_ud_t; + +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-6858.patch b/SPECS/hdf5/CVE-2025-6858.patch new file mode 100644 index 00000000000..4266062f94f --- /dev/null +++ b/SPECS/hdf5/CVE-2025-6858.patch @@ -0,0 +1,32 @@ +From ff3d6722a91587daaaac82e78b25d26ad3f50172 Mon Sep 17 00:00:00 2001 +From: Binh-Minh +Date: Mon, 4 Aug 2025 03:10:29 -0400 +Subject: [PATCH] Fix reading bad size in the raw header continuation message + +This issue was reported in GH-5376 as a heap-use-after-free vulnerability in +one of the free lists. It appeared that the library came to this vulnerability +after it encountered an undetected reading of a bad value. The fuzzer now failed +with an appropriate error message. + +This considers addressing what GH-5376 reported. +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5710.patch +--- + src/H5Ocont.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/src/H5Ocont.c b/src/H5Ocont.c +index 180b115..4b18404 100644 +--- a/src/H5Ocont.c ++++ b/src/H5Ocont.c +@@ -103,6 +103,8 @@ H5O__cont_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, cont->size); ++ if (cont->size == 0) ++ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid continuation chunk size (0)"); + + /* Set return value */ + ret_value = cont; +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-7067.patch b/SPECS/hdf5/CVE-2025-7067.patch new file mode 100644 index 00000000000..d4f8039bbd8 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-7067.patch @@ -0,0 +1,980 @@ +From 1bc11ba2952dd942cf637f5a2c748e8e9c4031a4 Mon Sep 17 00:00:00 2001 +From: Jordan Henderson +Date: Fri, 12 Sep 2025 21:58:17 -0500 +Subject: [PATCH] Unlink file free space section on failure to update data + structures + +When linking a file free space section into a free space manager's +internal data structures, the library previously wouldn't unlink +the free space section when it failed to update the free space +manager's internal data structures. This could eventually result +in a use-after-free issue due to the stale reference kept around. + +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5815.patch +https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5938.patch +--- + src/H5FScache.c | 2 +- + src/H5FSprivate.h | 2 +- + src/H5FSsection.c | 89 +++++++++++++++++++++++++++++++++++++++++----- + src/H5HFspace.c | 2 +- + src/H5MF.c | 58 ++++++++++++++++++++++-------- + src/H5MFpkg.h | 3 +- + src/H5MFsection.c | 24 +++++++++++-- + test/freespace.c | 90 ++++++++++++++++++++++++++--------------------- + test/mf.c | 32 ++++++++--------- + 9 files changed, 215 insertions(+), 87 deletions(-) + +diff --git a/src/H5FScache.c b/src/H5FScache.c +index 7f8edf6..2db500b 100644 +--- a/src/H5FScache.c ++++ b/src/H5FScache.c +@@ -1024,7 +1024,7 @@ H5FS__cache_sinfo_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED l + + /* Insert section in free space manager, unless requested not to */ + if (!(des_flags & H5FS_DESERIALIZE_NO_ADD)) +- if (H5FS_sect_add(udata->f, fspace, new_sect, H5FS_ADD_DESERIALIZING, udata) < 0) ++ if (H5FS_sect_add(udata->f, fspace, new_sect, H5FS_ADD_DESERIALIZING, udata, NULL) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, NULL, + "can't add section to free space manager"); + } /* end for */ +diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h +index f917a25..954fd9e 100644 +--- a/src/H5FSprivate.h ++++ b/src/H5FSprivate.h +@@ -198,7 +198,7 @@ H5_DLL herr_t H5FS_free(H5F_t *f, H5FS_t *fspace, bool free_file_space); + + /* Free space section routines */ + H5_DLL herr_t H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *node, unsigned flags, +- void *op_data); ++ void *op_data, bool *merged_or_shrunk); + H5_DLL htri_t H5FS_sect_try_merge(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags, + void *op_data); + H5_DLL htri_t H5FS_sect_try_extend(H5F_t *f, H5FS_t *fspace, haddr_t addr, hsize_t size, +diff --git a/src/H5FSsection.c b/src/H5FSsection.c +index 57022a2..6d9bca7 100644 +--- a/src/H5FSsection.c ++++ b/src/H5FSsection.c +@@ -1057,8 +1057,9 @@ done: + static herr_t + H5FS__sect_link(H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags) + { +- const H5FS_section_class_t *cls; /* Class of section */ +- herr_t ret_value = SUCCEED; /* Return value */ ++ const H5FS_section_class_t *cls; /* Class of section */ ++ bool linked_sect = false; /* Was the section linked in? */ ++ herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + +@@ -1073,6 +1074,7 @@ H5FS__sect_link(H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags) + /* Add section to size tracked data structures */ + if (H5FS__sect_link_size(fspace->sinfo, cls, sect) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't add section to size tracking data structures"); ++ linked_sect = true; + + /* Update rest of free space manager data structures for section addition */ + if (H5FS__sect_link_rest(fspace, cls, sect, flags) < 0) +@@ -1080,6 +1082,12 @@ H5FS__sect_link(H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags) + "can't add section to non-size tracking data structures"); + + done: ++ if (ret_value < 0) { ++ if (linked_sect && H5FS__sect_unlink_size(fspace->sinfo, cls, sect) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, ++ "can't remove section from size tracking data structures"); ++ } ++ + FUNC_LEAVE_NOAPI(ret_value) + } /* H5FS__sect_link() */ + +@@ -1289,7 +1297,8 @@ done: + *------------------------------------------------------------------------- + */ + herr_t +-H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags, void *op_data) ++H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flags, void *op_data, ++ bool *merged_or_shrunk) + { + H5FS_section_class_t *cls; /* Section's class */ + bool sinfo_valid = false; /* Whether the section info is valid */ +@@ -1310,6 +1319,9 @@ H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flag + assert(H5_addr_defined(sect->addr)); + assert(sect->size); + ++ if (merged_or_shrunk) ++ *merged_or_shrunk = false; ++ + /* Get a pointer to the section info */ + if (H5FS__sinfo_lock(f, fspace, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info"); +@@ -1336,9 +1348,12 @@ H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flag + /* (If section has been completely merged or shrunk away, 'sect' will + * be NULL at this point - QAK) + */ +- if (sect) ++ if (sect) { + if (H5FS__sect_link(fspace, sect, flags) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't insert free space section into skip list"); ++ } ++ else if (merged_or_shrunk) ++ *merged_or_shrunk = true; + + #ifdef H5FS_SINFO_DEBUG + fprintf(stderr, "%s: fspace->tot_space = %" PRIuHSIZE "\n", __func__, fspace->tot_space); +@@ -2306,11 +2321,15 @@ done: + herr_t + H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, haddr_t *fs_addr_ptr) + { +- hsize_t hdr_alloc_size; +- hsize_t sinfo_alloc_size; +- haddr_t sect_addr = HADDR_UNDEF; /* address of sinfo */ +- haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */ +- herr_t ret_value = SUCCEED; /* Return value */ ++ hsize_t hdr_alloc_size = 0; ++ hsize_t sinfo_alloc_size = 0; ++ haddr_t sect_addr = HADDR_UNDEF; /* address of sinfo */ ++ haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */ ++ bool allocated_header = false; /* Whether a free space header was allocated */ ++ bool inserted_header = false; /* Whether a free space header was inserted into the metadata cache */ ++ bool allocated_section = false; /* Whether a free space section was allocated */ ++ bool inserted_section = false; /* Whether a free space section was inserted into the metadata cache */ ++ herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT + +@@ -2359,10 +2378,12 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, haddr_t + /* Allocate space for the free space header */ + if (HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, hdr_alloc_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for free space header"); ++ allocated_header = true; + + /* Cache the new free space header (pinned) */ + if (H5AC_insert_entry(f, H5AC_FSPACE_HDR, fspace->addr, fspace, H5AC__PIN_ENTRY_FLAG) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space header to cache"); ++ inserted_header = true; + + *fs_addr_ptr = fspace->addr; + } +@@ -2388,6 +2409,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, haddr_t + /* allocate space for the section info */ + if (HADDR_UNDEF == (sect_addr = H5MF_alloc(f, H5FD_MEM_FSPACE_SINFO, sinfo_alloc_size))) + HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for section info"); ++ allocated_section = true; + + /* update fspace->alloc_sect_size and fspace->sect_addr to reflect + * the allocation +@@ -2429,6 +2451,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, haddr_t + */ + if (H5AC_insert_entry(f, H5AC_FSPACE_SINFO, sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sinfo to cache"); ++ inserted_section = true; + + /* We have changed the sinfo address -- Mark free space header dirty */ + if (H5AC_mark_entry_dirty(fspace) < 0) +@@ -2445,5 +2468,53 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, haddr_t + } /* end if */ + + done: ++ if (ret_value < 0) { ++ /* Remove the free space section that was inserted into the metadata cache, ++ * making sure to free the file space that was allocated for it as well. ++ * Avoid expunging the entry, as the information needs to be kept around ++ * until we finish trying to settle the metadata free space manager(s). ++ */ ++ if (allocated_section && (sect_addr == fspace->sect_addr)) { ++ assert(H5_addr_defined(fspace->sect_addr)); ++ ++ if (H5MF_xfree(f, H5FD_MEM_FSPACE_SINFO, sect_addr, sinfo_alloc_size) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free free space sections"); ++ fspace->sect_addr = HADDR_UNDEF; ++ } ++ ++ if (inserted_section) { ++ if (H5AC_remove_entry(fspace->sinfo) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTEXPUNGE, FAIL, ++ "can't remove file free space section from cache"); ++ } ++ ++ /* Remove the free space header that was inserted into the metadata cache, ++ * making sure to free the file space that was allocated for it as well. ++ * Avoid expunging the entry, as the information needs to be kept around ++ * until we finish trying to settle the metadata free space manager(s). ++ */ ++ if (allocated_header) { ++ assert(H5_addr_defined(fspace->addr)); ++ ++ /* Free file space before removing entry from cache, as freeing the ++ * file space may depend on a valid cache pointer. ++ */ ++ if (H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, fspace->addr, hdr_alloc_size) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free file free space header"); ++ fspace->addr = HADDR_UNDEF; ++ } ++ ++ if (inserted_header) { ++ if (H5AC_mark_entry_clean(fspace) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTMARKCLEAN, FAIL, ++ "can't mark file free space header as clean"); ++ if (H5AC_unpin_entry(fspace) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTUNPIN, FAIL, "can't unpin file free space header"); ++ if (H5AC_remove_entry(fspace) < 0) ++ HDONE_ERROR(H5E_FSPACE, H5E_CANTEXPUNGE, FAIL, ++ "can't remove file free space header from cache"); ++ } ++ } ++ + FUNC_LEAVE_NOAPI(ret_value) + } /* H5FS_vfd_alloc_hdr_and_section_info_if_needed() */ +diff --git a/src/H5HFspace.c b/src/H5HFspace.c +index 8c9f0c0..ea1b63f 100644 +--- a/src/H5HFspace.c ++++ b/src/H5HFspace.c +@@ -159,7 +159,7 @@ H5HF__space_add(H5HF_hdr_t *hdr, H5HF_free_section_t *node, unsigned flags) + udata.hdr = hdr; + + /* Add to the free space for the heap */ +- if (H5FS_sect_add(hdr->f, hdr->fspace, (H5FS_section_info_t *)node, flags, &udata) < 0) ++ if (H5FS_sect_add(hdr->f, hdr->fspace, (H5FS_section_info_t *)node, flags, &udata, NULL) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "can't add section to heap free space"); + + done: +diff --git a/src/H5MF.c b/src/H5MF.c +index 2de3e7a..a9c1cf5 100644 +--- a/src/H5MF.c ++++ b/src/H5MF.c +@@ -596,7 +596,8 @@ done: + *------------------------------------------------------------------------- + */ + herr_t +-H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_section_t *node) ++H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_section_t *node, ++ bool *merged_or_shrunk) + { + H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */ + H5AC_ring_t fsm_ring = H5AC_RING_INV; /* Ring of FSM */ +@@ -631,7 +632,8 @@ H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_sectio + __func__, node->sect_info.addr, node->sect_info.size); + #endif /* H5MF_ALLOC_DEBUG_MORE */ + /* Add the section */ +- if (H5FS_sect_add(f, fspace, (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata) < 0) ++ if (H5FS_sect_add(f, fspace, (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata, ++ merged_or_shrunk) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space"); + + done: +@@ -711,8 +713,11 @@ H5MF__find_sect(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5FS_t *fspace, h + #endif /* H5MF_ALLOC_DEBUG_MORE */ + + /* Re-add the section to the free-space manager */ +- if (H5MF__add_sect(f, alloc_type, fspace, node) < 0) ++ if (H5MF__add_sect(f, alloc_type, fspace, node, NULL) < 0) { ++ node->sect_info.addr -= size; ++ node->sect_info.size += size; + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space"); ++ } + } /* end else */ + } /* end if */ + +@@ -852,9 +857,10 @@ done: + static haddr_t + H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size) + { +- H5F_mem_page_t ptype; /* Free-space manager type */ +- H5MF_free_section_t *node = NULL; /* Free space section pointer */ +- haddr_t ret_value = HADDR_UNDEF; /* Return value */ ++ H5F_mem_page_t ptype; /* Free-space manager type */ ++ H5MF_free_section_t *node = NULL; /* Free space section pointer */ ++ bool section_merged_or_shrunk = false; /* Whether free space section was merged or shrunk away */ ++ haddr_t ret_value = HADDR_UNDEF; /* Return value */ + + FUNC_ENTER_PACKAGE + +@@ -900,9 +906,13 @@ H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size) + "can't initialize free space section"); + + /* Add the fragment to the large free-space manager */ +- if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[ptype], node) < 0) ++ if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[ptype], node, §ion_merged_or_shrunk) < ++ 0) { ++ if (section_merged_or_shrunk) ++ node = NULL; + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, + "can't re-add section to file free space"); ++ } + + node = NULL; + } /* end if */ +@@ -931,9 +941,13 @@ H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, HADDR_UNDEF, "can't initialize free space section"); + + /* Add the remaining space in the page to the manager */ +- if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[ptype], node) < 0) ++ if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[ptype], node, §ion_merged_or_shrunk) < ++ 0) { ++ if (section_merged_or_shrunk) ++ node = NULL; + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, + "can't re-add section to file free space"); ++ } + + node = NULL; + +@@ -1154,6 +1168,8 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size) + + /* If size of the freed section is larger than threshold, add it to the free space manager */ + if (size >= f->shared->fs_threshold) { ++ bool section_merged_or_shrunk = false; /* Whether free space section was merged or shrunk away */ ++ + assert(f->shared->fs_man[fs_type]); + + #ifdef H5MF_ALLOC_DEBUG_MORE +@@ -1161,8 +1177,12 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size) + #endif /* H5MF_ALLOC_DEBUG_MORE */ + + /* Add to the free space for the file */ +- if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[fs_type], node) < 0) ++ if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[fs_type], node, §ion_merged_or_shrunk) < 0) { ++ if (section_merged_or_shrunk) ++ node = NULL; + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't add section to file free space"); ++ } ++ + node = NULL; + + #ifdef H5MF_ALLOC_DEBUG_MORE +@@ -1316,7 +1336,7 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space section"); + + /* Add the fragment to the large-sized free-space manager */ +- if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[fs_type], node) < 0) ++ if (H5MF__add_sect(f, alloc_type, f->shared->fs_man[fs_type], node, NULL) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space"); + + node = NULL; +@@ -3059,8 +3079,13 @@ H5MF_settle_meta_data_fsm(H5F_t *f, bool *fsm_settled) + assert(sm_fssinfo_fs_type > H5F_MEM_PAGE_DEFAULT); + assert(sm_fssinfo_fs_type < H5F_MEM_PAGE_LARGE_SUPER); + +- assert(!H5_addr_defined(f->shared->fs_addr[sm_fshdr_fs_type])); +- assert(!H5_addr_defined(f->shared->fs_addr[sm_fssinfo_fs_type])); ++ if (H5_addr_defined(f->shared->fs_addr[sm_fshdr_fs_type])) ++ HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, FAIL, ++ "small free space header block manager should not have had file space allocated"); ++ if (H5_addr_defined(f->shared->fs_addr[sm_fssinfo_fs_type])) ++ HGOTO_ERROR( ++ H5E_FSPACE, H5E_BADVALUE, FAIL, ++ "small free space serialized section manager should not have had file space allocated"); + + /* Note that in most cases, sm_hdr_fspace will equal sm_sinfo_fspace. */ + sm_hdr_fspace = f->shared->fs_man[sm_fshdr_fs_type]; +@@ -3078,8 +3103,13 @@ H5MF_settle_meta_data_fsm(H5F_t *f, bool *fsm_settled) + assert(lg_fssinfo_fs_type >= H5F_MEM_PAGE_LARGE_SUPER); + assert(lg_fssinfo_fs_type < H5F_MEM_PAGE_NTYPES); + +- assert(!H5_addr_defined(f->shared->fs_addr[lg_fshdr_fs_type])); +- assert(!H5_addr_defined(f->shared->fs_addr[lg_fssinfo_fs_type])); ++ if (H5_addr_defined(f->shared->fs_addr[lg_fshdr_fs_type])) ++ HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, FAIL, ++ "large free space header block manager should not have had file space allocated"); ++ if (H5_addr_defined(f->shared->fs_addr[lg_fssinfo_fs_type])) ++ HGOTO_ERROR( ++ H5E_FSPACE, H5E_BADVALUE, FAIL, ++ "large free space serialized section manager should not have had file space allocated"); + + /* Note that in most cases, lg_hdr_fspace will equal lg_sinfo_fspace. */ + lg_hdr_fspace = f->shared->fs_man[lg_fshdr_fs_type]; +diff --git a/src/H5MFpkg.h b/src/H5MFpkg.h +index 32d04d3..e6fd7b8 100644 +--- a/src/H5MFpkg.h ++++ b/src/H5MFpkg.h +@@ -176,7 +176,8 @@ H5_DLLVAR const H5FS_section_class_t H5MF_FSPACE_SECT_CLS_LARGE[1]; + H5_DLL herr_t H5MF__open_fstype(H5F_t *f, H5F_mem_page_t type); + H5_DLL herr_t H5MF__start_fstype(H5F_t *f, H5F_mem_page_t type); + H5_DLL htri_t H5MF__find_sect(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5FS_t *fspace, haddr_t *addr); +-H5_DLL herr_t H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_section_t *node); ++H5_DLL herr_t H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_section_t *node, ++ bool *merged_or_shrunk); + H5_DLL void H5MF__alloc_to_fs_type(H5F_shared_t *f_sh, H5FD_mem_t alloc_type, hsize_t size, + H5F_mem_page_t *fs_type); + +diff --git a/src/H5MFsection.c b/src/H5MFsection.c +index a3b3988..04fcdce 100644 +--- a/src/H5MFsection.c ++++ b/src/H5MFsection.c +@@ -360,7 +360,13 @@ H5MF__sect_simple_can_merge(const H5FS_section_info_t *_sect1, const H5FS_sectio + assert(sect1); + assert(sect2); + assert(sect1->sect_info.type == sect2->sect_info.type); /* Checks "MERGE_SYM" flag */ +- assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); ++ ++ /* ++ * The library currently doesn't attempt to detect duplicate or overlapping ++ * free space sections being inserted into the free space managers. Until it ++ * does so, this assertion must be left out. ++ */ ++ /* assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); */ + + /* Check if second section adjoins first section */ + ret_value = H5_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr); +@@ -663,7 +669,13 @@ H5MF__sect_small_can_merge(const H5FS_section_info_t *_sect1, const H5FS_section + assert(sect1); + assert(sect2); + assert(sect1->sect_info.type == sect2->sect_info.type); /* Checks "MERGE_SYM" flag */ +- assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); ++ ++ /* ++ * The library currently doesn't attempt to detect duplicate or overlapping ++ * free space sections being inserted into the free space managers. Until it ++ * does so, this assertion must be left out. ++ */ ++ /* assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); */ + + /* Check if second section adjoins first section */ + ret_value = H5_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr); +@@ -769,7 +781,13 @@ H5MF__sect_large_can_merge(const H5FS_section_info_t *_sect1, const H5FS_section + assert(sect1); + assert(sect2); + assert(sect1->sect_info.type == sect2->sect_info.type); /* Checks "MERGE_SYM" flag */ +- assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); ++ ++ /* ++ * The library currently doesn't attempt to detect duplicate or overlapping ++ * free space sections being inserted into the free space managers. Until it ++ * does so, this assertion must be left out. ++ */ ++ /* assert(H5_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr)); */ + + ret_value = H5_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr); + +diff --git a/test/freespace.c b/test/freespace.c +index 2d81217..074f201 100644 +--- a/test/freespace.c ++++ b/test/freespace.c +@@ -638,7 +638,7 @@ test_fs_sect_add(hid_t fapl) + init_sect_node(sect_node, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -704,7 +704,7 @@ test_fs_sect_add(hid_t fapl) + init_sect_node(sect_node, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, 0, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, 0, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -781,7 +781,8 @@ test_fs_sect_add(hid_t fapl) + init_sect_node(sect_node, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &can_shrink, NULL) < ++ 0) + FAIL_STACK_ERROR; + + /* nothing in free-space */ +@@ -851,7 +852,8 @@ test_fs_sect_add(hid_t fapl) + init_sect_node(sect_node, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_DESERIALIZING, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_DESERIALIZING, &can_shrink, NULL) < ++ 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1007,7 +1009,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1027,7 +1029,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node3, (haddr_t)(TEST_SECT_ADDR200), (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node3->sect_info.size; +@@ -1046,7 +1048,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -1065,7 +1067,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node4, (haddr_t)TEST_SECT_ADDR300, (hsize_t)TEST_SECT_SIZE80, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node4->sect_info.size; +@@ -1134,7 +1136,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1154,7 +1156,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR200, (hsize_t)TEST_SECT_SIZE80, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -1213,7 +1215,7 @@ test_fs_sect_find(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1361,7 +1363,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1381,7 +1383,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* section B & C are merged */ +@@ -1399,7 +1401,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node3, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE10, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* section A is merged with the merged section of B & C */ +@@ -1417,7 +1419,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node4, (haddr_t)TEST_SECT_ADDR150, (hsize_t)TEST_SECT_SIZE80, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* section D is merged with the merged section of A & B & C */ +@@ -1490,7 +1492,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1510,7 +1512,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* section A & B are not merged because H5FS_CLS_SEPAR_OBJ is set */ +@@ -1593,7 +1595,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE10, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1613,7 +1615,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE30, + TEST_FSPACE_SECT_TYPE_NEW, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* sections A & B are not merged because H5FS_CLS_MERGE_SYM is set & section class type is different */ +@@ -1633,7 +1635,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node3, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, + TEST_FSPACE_SECT_TYPE_NEW, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* sections B & C are merged because H5FS_CLS_MERGE_SYM is set & section class type is the same */ +@@ -1651,7 +1653,7 @@ test_fs_sect_merge(hid_t fapl) + init_sect_node(sect_node4, (haddr_t)TEST_SECT_ADDR150, (hsize_t)TEST_SECT_SIZE80, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node4, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* +@@ -1840,7 +1842,8 @@ test_fs_sect_shrink(hid_t fapl) + TEST_FSPACE_SECT_TYPE_NEW, H5FS_SECT_LIVE); + + can_shrink = false; +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1875,7 +1878,8 @@ test_fs_sect_shrink(hid_t fapl) + H5FS_SECT_LIVE); + + can_shrink = false; +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + /* should have nothing in free-space */ +@@ -1941,7 +1945,8 @@ test_fs_sect_shrink(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -1961,7 +1966,8 @@ test_fs_sect_shrink(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + /* free-space should be the same since section B is shrunk */ +@@ -2041,7 +2047,8 @@ test_fs_sect_shrink(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2061,7 +2068,8 @@ test_fs_sect_shrink(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &can_shrink) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &can_shrink, ++ NULL) < 0) + FAIL_STACK_ERROR; + + /* section A & B are merged and then strunk, so there is nothing in free-space */ +@@ -2183,7 +2191,7 @@ test_fs_sect_change_class(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR60, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2203,7 +2211,7 @@ test_fs_sect_change_class(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, + TEST_FSPACE_SECT_TYPE_NONE, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += TEST_SECT_SIZE50; +@@ -2287,7 +2295,7 @@ test_fs_sect_change_class(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE30, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* +@@ -2299,7 +2307,7 @@ test_fs_sect_change_class(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE50, + TEST_FSPACE_SECT_TYPE_NONE, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* +@@ -2311,7 +2319,7 @@ test_fs_sect_change_class(hid_t fapl) + init_sect_node(sect_node3, (haddr_t)TEST_SECT_ADDR200, (hsize_t)TEST_SECT_SIZE80, + TEST_FSPACE_SECT_TYPE_NONE, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node3, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* change the class of B to A's class */ +@@ -2471,7 +2479,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE5, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2491,7 +2499,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE40, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -2548,7 +2556,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE5, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2568,7 +2576,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE40, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -2622,7 +2630,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE5, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2642,7 +2650,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE40, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -2697,7 +2705,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node1, (haddr_t)TEST_SECT_ADDR70, (hsize_t)TEST_SECT_SIZE5, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(frspace_state_t)); +@@ -2717,7 +2725,7 @@ test_fs_sect_extend(hid_t fapl) + init_sect_node(sect_node2, (haddr_t)TEST_SECT_ADDR100, (hsize_t)TEST_SECT_SIZE40, TEST_FSPACE_SECT_TYPE, + H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + + state.tot_space += sect_node2->sect_info.size; +@@ -2828,7 +2836,7 @@ test_fs_sect_iterate(hid_t fapl) + sect_size = (unsigned)((i - 1) % 9) + 1; + init_sect_node(sect_node, (haddr_t)i * 10, (hsize_t)sect_size, TEST_FSPACE_SECT_TYPE, H5FS_SECT_LIVE); + +- if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, NULL) < 0) ++ if (H5FS_sect_add(f, frsp, (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, NULL, NULL) < 0) + FAIL_STACK_ERROR; + } /* end for */ + +diff --git a/test/mf.c b/test/mf.c +index e400348..8ed6871 100644 +--- a/test/mf.c ++++ b/test/mf.c +@@ -1222,7 +1222,7 @@ test_mf_fs_alloc_free(hid_t fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1300,7 +1300,7 @@ test_mf_fs_alloc_free(hid_t fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1376,7 +1376,7 @@ test_mf_fs_alloc_free(hid_t fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1552,7 +1552,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node1 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1581,7 +1581,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node2 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50); + + /* Add section B to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2, NULL)) + FAIL_STACK_ERROR; + + state.tot_space += TBLOCK_SIZE50; +@@ -1662,7 +1662,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node1 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1691,7 +1691,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node2 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50); + + /* Add section B to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2, NULL)) + FAIL_STACK_ERROR; + + state.tot_space += TBLOCK_SIZE50; +@@ -1767,7 +1767,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node1 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1796,7 +1796,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node2 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50); + + /* Add section B to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2, NULL)) + FAIL_STACK_ERROR; + + state.tot_space += TBLOCK_SIZE50; +@@ -1873,7 +1873,7 @@ test_mf_fs_extend(hid_t fapl) + H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)(TBLOCK_SIZE30 - 10)); + + /* Add section A of size=20 to free-space */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -1902,7 +1902,7 @@ test_mf_fs_extend(hid_t fapl) + sect_node2 = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50); + + /* Add section B to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2, NULL)) + FAIL_STACK_ERROR; + + state.tot_space += TBLOCK_SIZE50; +@@ -2057,7 +2057,7 @@ test_mf_fs_absorb(const char *driver_name, hid_t fapl) + H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)(ma_addr + ma_size), (hsize_t)TBLOCK_SIZE2048); + + /* Add a section to free-space that adjoins end of the aggregator */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + /* Verify that the section did absorb the aggregator */ +@@ -2117,7 +2117,7 @@ test_mf_fs_absorb(const char *driver_name, hid_t fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)addr, (hsize_t)TBLOCK_SIZE30); + + /* When adding, meta_aggr is absorbed onto the end of the section */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + /* Verify that the section did absorb the aggregator */ +@@ -4183,7 +4183,7 @@ test_mf_align_fs(const char *driver_name, hid_t fapl, hid_t new_fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)alignment, (hsize_t)TBLOCK_SIZE50); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -4247,7 +4247,7 @@ test_mf_align_fs(const char *driver_name, hid_t fapl, hid_t new_fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE8000); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +@@ -4334,7 +4334,7 @@ test_mf_align_fs(const char *driver_name, hid_t fapl, hid_t new_fapl) + sect_node = H5MF__sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE700); + + /* Add section A to free-space manager */ +- if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node)) ++ if (H5MF__add_sect(f, H5FD_MEM_SUPER, f->shared->fs_man[H5FD_MEM_SUPER], sect_node, NULL)) + FAIL_STACK_ERROR; + + memset(&state, 0, sizeof(H5FS_stat_t)); +-- +2.45.4 + diff --git a/SPECS/hdf5/CVE-2025-7068.patch b/SPECS/hdf5/CVE-2025-7068.patch new file mode 100644 index 00000000000..4be2ae96ff2 --- /dev/null +++ b/SPECS/hdf5/CVE-2025-7068.patch @@ -0,0 +1,309 @@ +From 58f5a6bc06bd3ad0a60bc12551e2da4f5c0aa65d Mon Sep 17 00:00:00 2001 +From: Jordan Henderson +Date: Sat, 13 Sep 2025 19:51:59 -0500 +Subject: [PATCH] Fix issue with handling of failure during discard of metadata + cache entries + +When discarding a metadata cache entry after flushing it, errors +during the discard process could cause the library to skip calling +the 'free_icr' callback for the entry. This could result in resource +leaks and the inability of the cache to be fully flushed and closed +due to issues such as pinned entries remaining in the cache. This +has been fixed by noting errors during the discard process, but +attempting to fully free a cache entry before signalling that an +error has occurred. + +Fixes CVE-2025-7068 +Upstream Patch Reference: https://patch-diff.githubusercontent.com/raw/HDFGroup/hdf5/pull/5817.patch +--- + src/H5Centry.c | 259 ++++++++++++++++++++++++++++--------------------- + 1 file changed, 148 insertions(+), 111 deletions(-) + +diff --git a/src/H5Centry.c b/src/H5Centry.c +index 13a0b8d..8542396 100644 +--- a/src/H5Centry.c ++++ b/src/H5Centry.c +@@ -63,6 +63,9 @@ static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *en + static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, bool update_rp); + static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, bool update_rp); + static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); ++static herr_t H5C__discard_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, ++ bool destroy_entry, bool free_file_space, ++ bool suppress_image_entry_frees); + static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, bool actual); + static void *H5C__load_entry(H5F_t *f, + #ifdef H5_HAVE_PARALLEL +@@ -753,118 +756,13 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) + * Now discard the entry if appropriate. + */ + if (destroy) { +- /* Sanity check */ +- assert(0 == entry_ptr->flush_dep_nparents); +- +- /* if both suppress_image_entry_frees and entry_ptr->include_in_image +- * are true, simply set entry_ptr->image_ptr to NULL, as we have +- * another pointer to the buffer in an instance of H5C_image_entry_t +- * in cache_ptr->image_entries. +- * +- * Otherwise, free the buffer if it exists. +- */ +- if (suppress_image_entry_frees && entry_ptr->include_in_image) +- entry_ptr->image_ptr = NULL; +- else if (entry_ptr->image_ptr != NULL) +- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); +- +- /* If the entry is not a prefetched entry, verify that the flush +- * dependency parents addresses array has been transferred. +- * +- * If the entry is prefetched, the free_isr routine will dispose of +- * the flush dependency parents addresses array if necessary. +- */ +- if (!entry_ptr->prefetched) { +- assert(0 == entry_ptr->fd_parent_count); +- assert(NULL == entry_ptr->fd_parent_addrs); +- } /* end if */ +- +- /* Check whether we should free the space in the file that +- * the entry occupies +- */ +- if (free_file_space) { +- hsize_t fsf_size; +- +- /* Sanity checks */ +- assert(H5_addr_defined(entry_ptr->addr)); +- assert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); +-#ifndef NDEBUG +- { +- size_t curr_len; +- +- /* Get the actual image size for the thing again */ +- entry_ptr->type->image_len((void *)entry_ptr, &curr_len); +- assert(curr_len == entry_ptr->size); +- } +-#endif +- +- /* If the file space free size callback is defined, use +- * it to get the size of the block of file space to free. +- * Otherwise use entry_ptr->size. +- */ +- if (entry_ptr->type->fsf_size) { +- if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) +- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size"); +- } /* end if */ +- else /* no file space free size callback -- use entry size */ +- fsf_size = entry_ptr->size; +- +- /* Release the space on disk */ +- if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) +- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry"); +- } /* end if ( free_file_space ) */ +- +- /* Reset the pointer to the cache the entry is within. -QAK */ +- entry_ptr->cache_ptr = NULL; +- +- /* increment entries_removed_counter and set +- * last_entry_removed_ptr. As we are likely abuut to +- * free the entry, recall that last_entry_removed_ptr +- * must NEVER be dereferenced. +- * +- * Recall that these fields are maintained to allow functions +- * that perform scans of lists of entries to detect the +- * unexpected removal of entries (via expunge, eviction, +- * or take ownership at present), so that they can re-start +- * their scans if necessary. +- * +- * Also check if the entry we are watching for removal is being +- * removed (usually the 'next' entry for an iteration) and reset +- * it to indicate that it was removed. +- */ +- cache_ptr->entries_removed_counter++; +- cache_ptr->last_entry_removed_ptr = entry_ptr; +- +- if (entry_ptr == cache_ptr->entry_watched_for_removal) +- cache_ptr->entry_watched_for_removal = NULL; +- +- /* Check for actually destroying the entry in memory */ +- /* (As opposed to taking ownership of it) */ +- if (destroy_entry) { +- if (entry_ptr->is_dirty) { +- /* Reset dirty flag */ +- entry_ptr->is_dirty = false; +- +- /* If the entry's type has a 'notify' callback send a +- * 'entry cleaned' notice now that the entry is fully +- * integrated into the cache. +- */ +- if (entry_ptr->type->notify && +- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) +- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, +- "can't notify client about entry dirty flag cleared"); +- } /* end if */ +- +- /* verify that the image has been freed */ +- assert(entry_ptr->image_ptr == NULL); ++ /* Make sure one of either `destroy_entry` or `take_ownership` are true */ ++ assert(destroy_entry != take_ownership); + +- if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) +- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed"); +- } /* end if */ +- else { +- assert(take_ownership); +- } /* end else */ +- } /* if (destroy) */ ++ if (H5C__discard_single_entry(f, cache_ptr, entry_ptr, destroy_entry, free_file_space, ++ suppress_image_entry_frees) < 0) ++ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't discard cache entry"); ++ } + + /* Check if we have to update the page buffer with cleared entries + * so it doesn't go out of date +@@ -891,6 +789,145 @@ done: + FUNC_LEAVE_NOAPI(ret_value) + } /* H5C__flush_single_entry() */ + ++/*------------------------------------------------------------------------- ++ * Function: H5C__discard_single_entry ++ * ++ * Purpose: Helper routine to discard a cache entry, freeing as much ++ * of the relevant file space and data structures as possible ++ * along the way. ++ * ++ * Return: FAIL if error is detected, SUCCEED otherwise. ++ * ++ *------------------------------------------------------------------------- ++ */ ++static herr_t ++H5C__discard_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, bool destroy_entry, ++ bool free_file_space, bool suppress_image_entry_frees) ++{ ++ herr_t ret_value = SUCCEED; ++ ++ FUNC_ENTER_PACKAGE ++ ++ assert(f); ++ assert(cache_ptr); ++ assert(entry_ptr); ++ ++ /* Sanity check */ ++ assert(0 == entry_ptr->flush_dep_nparents); ++ ++ /* if both suppress_image_entry_frees and entry_ptr->include_in_image ++ * are true, simply set entry_ptr->image_ptr to NULL, as we have ++ * another pointer to the buffer in an instance of H5C_image_entry_t ++ * in cache_ptr->image_entries. ++ * ++ * Otherwise, free the buffer if it exists. ++ */ ++ if (suppress_image_entry_frees && entry_ptr->include_in_image) ++ entry_ptr->image_ptr = NULL; ++ else if (entry_ptr->image_ptr != NULL) ++ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); ++ ++ /* If the entry is not a prefetched entry, verify that the flush ++ * dependency parents addresses array has been transferred. ++ * ++ * If the entry is prefetched, the free_icr routine will dispose of ++ * the flush dependency parents addresses array if necessary. ++ */ ++ if (!entry_ptr->prefetched) { ++ assert(0 == entry_ptr->fd_parent_count); ++ assert(NULL == entry_ptr->fd_parent_addrs); ++ } /* end if */ ++ ++ /* Check whether we should free the space in the file that ++ * the entry occupies ++ */ ++ if (free_file_space) { ++ hsize_t fsf_size; ++ ++ /* Sanity checks */ ++ assert(H5_addr_defined(entry_ptr->addr)); ++ assert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); ++#ifndef NDEBUG ++ { ++ size_t curr_len; ++ ++ /* Get the actual image size for the thing again */ ++ entry_ptr->type->image_len((void *)entry_ptr, &curr_len); ++ assert(curr_len == entry_ptr->size); ++ } ++#endif ++ ++ /* If the file space free size callback is defined, use ++ * it to get the size of the block of file space to free. ++ * Otherwise use entry_ptr->size. ++ */ ++ if (entry_ptr->type->fsf_size) { ++ if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) ++ /* Note error but keep going */ ++ HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size"); ++ } /* end if */ ++ else /* no file space free size callback -- use entry size */ ++ fsf_size = entry_ptr->size; ++ ++ /* Release the space on disk */ ++ if ((ret_value >= 0) && H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) ++ /* Note error but keep going */ ++ HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry"); ++ } /* end if ( free_file_space ) */ ++ ++ /* Reset the pointer to the cache the entry is within. -QAK */ ++ entry_ptr->cache_ptr = NULL; ++ ++ /* increment entries_removed_counter and set ++ * last_entry_removed_ptr. As we are likely about to ++ * free the entry, recall that last_entry_removed_ptr ++ * must NEVER be dereferenced. ++ * ++ * Recall that these fields are maintained to allow functions ++ * that perform scans of lists of entries to detect the ++ * unexpected removal of entries (via expunge, eviction, ++ * or take ownership at present), so that they can re-start ++ * their scans if necessary. ++ * ++ * Also check if the entry we are watching for removal is being ++ * removed (usually the 'next' entry for an iteration) and reset ++ * it to indicate that it was removed. ++ */ ++ cache_ptr->entries_removed_counter++; ++ cache_ptr->last_entry_removed_ptr = entry_ptr; ++ ++ if (entry_ptr == cache_ptr->entry_watched_for_removal) ++ cache_ptr->entry_watched_for_removal = NULL; ++ ++ /* Check for actually destroying the entry in memory */ ++ /* (As opposed to taking ownership of it) */ ++ if (destroy_entry) { ++ if (entry_ptr->is_dirty) { ++ /* Reset dirty flag */ ++ entry_ptr->is_dirty = false; ++ ++ /* If the entry's type has a 'notify' callback send a ++ * 'entry cleaned' notice now that the entry is fully ++ * integrated into the cache. ++ */ ++ if (entry_ptr->type->notify && ++ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) ++ /* Note error but keep going */ ++ HDONE_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, ++ "can't notify client about entry dirty flag cleared"); ++ } /* end if */ ++ ++ /* verify that the image has been freed */ ++ assert(entry_ptr->image_ptr == NULL); ++ ++ if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) ++ /* Note error but keep going */ ++ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed"); ++ } /* end if */ ++ ++ FUNC_LEAVE_NOAPI(ret_value) ++} /* end H5C__discard_single_entry() */ ++ + /*------------------------------------------------------------------------- + * Function: H5C__verify_len_eoa + * +-- +2.45.4 + diff --git a/SPECS/hdf5/hdf5.signatures.json b/SPECS/hdf5/hdf5.signatures.json index c8b6c02e293..4b011a64eed 100644 --- a/SPECS/hdf5/hdf5.signatures.json +++ b/SPECS/hdf5/hdf5.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { "h5comp": "d0d40ba5b894f9fa1e230cbf123120243cb3aa58c85fa563eb88742d97744c2b", - "hdf5-1.14.4.3.tar.gz": "019ac451d9e1cf89c0482ba2a06f07a46166caf23f60fea5ef3c37724a318e03" + "hdf5-1.14.6.tar.gz": "e4defbac30f50d64e1556374aa49e574417c9e72c6b1de7a4ff88c4b1bea6e9b" } } diff --git a/SPECS/hdf5/hdf5.spec b/SPECS/hdf5/hdf5.spec index 1d66ea9d277..d3a531b9743 100644 --- a/SPECS/hdf5/hdf5.spec +++ b/SPECS/hdf5/hdf5.spec @@ -11,21 +11,34 @@ %endif Summary: A general purpose library and file format for storing scientific data Name: hdf5 -Version: 1.14.4.3 +Version: 1.14.6 Release: 1%{?dist} License: BSD Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://portal.hdfgroup.org/display/HDF5/HDF5 -Source0: https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.14/hdf5-1.14.4/src/hdf5-1.14.4-3.tar.gz#/%{name}-%{version}.tar.gz +Source0: https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_6/downloads/%{name}-%{version}.tar.gz Source1: h5comp -Patch3: hdf5-build.patch +Patch0: hdf5-build.patch # Remove Fedora build flags from h5cc/h5c++/h5fc # https://bugzilla.redhat.com/show_bug.cgi?id=1794625 -Patch5: hdf5-wrappers.patch +Patch1: hdf5-wrappers.patch +Patch2: CVE-2025-2153.patch +Patch3: CVE-2025-2310.patch +Patch4: CVE-2025-2914.patch +Patch5: CVE-2025-2924.patch +Patch6: CVE-2025-2925.patch +Patch7: CVE-2025-2926.patch +Patch8: CVE-2025-44905.patch +Patch9: CVE-2025-6269.patch +Patch10: CVE-2025-6750.patch +Patch11: CVE-2025-6816.patch +Patch12: CVE-2025-6857.patch +Patch13: CVE-2025-6858.patch +Patch14: CVE-2025-7067.patch +Patch15: CVE-2025-7068.patch -# For patches/rpath # For patches/rpath BuildRequires: automake # Needed for mpi tests @@ -130,7 +143,7 @@ HDF5 parallel openmpi static libraries %prep -%autosetup -n %{name}-1.14.4-3 -p1 +%autosetup -p1 # Force shared by default for compiler wrappers (bug #1266645) sed -i -e '/^STATIC_AVAILABLE=/s/=.*/=no/' */*/h5[cf]*.in @@ -221,8 +234,6 @@ find %{buildroot} -type f -name "*.la" -delete -print mkdir -p %{buildroot}%{_libdir}/$mpi/hdf5/plugin module purge done -#Fixup example permissions -find %{buildroot}%{_datadir} \( -name '*.[ch]*' -o -name '*.f90' \) -exec chmod -x {} + #Fixup headers and scripts for multiarch %ifarch x86_64 ppc64 ia64 s390x sparc64 alpha @@ -254,7 +265,8 @@ cat > %{buildroot}%{macrosdir}/macros.hdf5 < - 1.14.6-1 +- Upgrade to 1.14.6 +- Patch hdf5 for CVE-2025-2153, CVE-2025-2310, CVE-2025-2914, CVE-2025-2926, CVE-2025-6816, + CVE-2025-2925, CVE-2025-2924, CVE-2025-44905, CVE-2025-6269, CVE-2025-6750, CVE-2025-6857, + CVE-2025-7067, CVE-2025-7068, CVE-2025-6858, CVE_2025-2923, CVE-2025-2913, CVE-2025-6516, + CVE-2025-6818, CVE-2025-6817, CVE-2025-6856, CVE-2025-7069 + * Tue Jun 04 2024 Neha Agarwal - 1.14.4.3-1 - Upgrade to v1.14.4.3 to fix CVEs 2024-29157, 2024-29158, 2024-29159, 2024-29160, 2024-29161, 2024-29162, 2024-29163, 2024-29164, 2024-29165, 2024-29166, 2024-32605, diff --git a/SPECS/httpd/httpd.signatures.json b/SPECS/httpd/httpd.signatures.json index 211c5279bac..2c528af0f15 100644 --- a/SPECS/httpd/httpd.signatures.json +++ b/SPECS/httpd/httpd.signatures.json @@ -1,15 +1,15 @@ { - "Signatures": { - "00-proxyhtml.conf": "a2211995b7e55b781f68666664f0bcd84550ed9a16edee07121f63477dfaaffa", - "00-ssl.conf": "88f04c415dbd1bf0d074965d37261e056d073b675a047a02e55222818640c6e8", - "01-ldap.conf": "cbbbdd396fe056e8ab167abd7b2cb5145b42210bfea38452968ff02a03493fc8", - "01-session.conf": "51df0ceeb7dae9922817f4af0554f83fe01d6268025ee08260aeed69be3953d1", - "10-listen443.conf": "fc7484790ec6328b9082e04083137551a5ae2e8f4d4696d9846b052915b6a0cb", - "httpd-2.4.65.tar.bz2": "58b8be97d9940ec17f7656c0c6b9f41b618aac468b894b534148e3296c53b8b3", - "httpd-init.service": "2501b44bdb02f583d98cc5296accbf0af36957b93ed5b871358aeb10a0512a7c", - "httpd-ssl-gencerts": "ae96a94eeb0be8731c0bb976e5b878e0e5a196442a001c9e809bed3873f4755d", - "httpd-ssl-pass-dialog": "b9bd4816dda673ad9294a0fbd2904fac9b96eabddb4d72080ae58b498bcd1db9", - "macros.httpd": "6dbf9313a5d085cb705fa5ef393372ec940008f08bf1c9350f8f49d58df75dff", - "ssl.conf": "6690cb873d2312d0ecffcda3822562cd1b1b11ac44b1fcb7bd1b720a9e53c333" - } -} \ No newline at end of file + "Signatures": { + "00-proxyhtml.conf": "a2211995b7e55b781f68666664f0bcd84550ed9a16edee07121f63477dfaaffa", + "00-ssl.conf": "88f04c415dbd1bf0d074965d37261e056d073b675a047a02e55222818640c6e8", + "01-ldap.conf": "cbbbdd396fe056e8ab167abd7b2cb5145b42210bfea38452968ff02a03493fc8", + "01-session.conf": "51df0ceeb7dae9922817f4af0554f83fe01d6268025ee08260aeed69be3953d1", + "10-listen443.conf": "fc7484790ec6328b9082e04083137551a5ae2e8f4d4696d9846b052915b6a0cb", + "httpd-init.service": "2501b44bdb02f583d98cc5296accbf0af36957b93ed5b871358aeb10a0512a7c", + "httpd-ssl-gencerts": "ae96a94eeb0be8731c0bb976e5b878e0e5a196442a001c9e809bed3873f4755d", + "httpd-ssl-pass-dialog": "b9bd4816dda673ad9294a0fbd2904fac9b96eabddb4d72080ae58b498bcd1db9", + "macros.httpd": "6dbf9313a5d085cb705fa5ef393372ec940008f08bf1c9350f8f49d58df75dff", + "ssl.conf": "6690cb873d2312d0ecffcda3822562cd1b1b11ac44b1fcb7bd1b720a9e53c333", + "httpd-2.4.66.tar.bz2": "94d7ff2b42acbb828e870ba29e4cbad48e558a79c623ad3596e4116efcfea25a" + } +} diff --git a/SPECS/httpd/httpd.spec b/SPECS/httpd/httpd.spec index aa4257b7ec4..b89f6ac9451 100644 --- a/SPECS/httpd/httpd.spec +++ b/SPECS/httpd/httpd.spec @@ -2,7 +2,7 @@ %define _confdir %{_sysconfdir} Summary: The Apache HTTP Server Name: httpd -Version: 2.4.65 +Version: 2.4.66 Release: 1%{?dist} License: Apache-2.0 Vendor: Microsoft Corporation @@ -345,6 +345,9 @@ fi %{_libexecdir}/httpd-ssl-pass-dialog %changelog +* Sun Dec 07 2025 CBL-Mariner Servicing Account - 2.4.66-1 +- Auto-upgrade to 2.4.66 - for CVE-2025-55753, CVE-2025-58098, CVE-2025-59775, CVE-2025-65082, CVE-2025-66200 + * Mon Jul 28 2025 Kshitiz Godara - 2.4.65-1 - Upgrade to 2.4.65 to fix CVE-2025-54090 diff --git a/SPECS/hyperv-daemons/hyperv-daemons.signatures.json b/SPECS/hyperv-daemons/hyperv-daemons.signatures.json index 5defa2a862a..10b31258413 100644 --- a/SPECS/hyperv-daemons/hyperv-daemons.signatures.json +++ b/SPECS/hyperv-daemons/hyperv-daemons.signatures.json @@ -7,6 +7,6 @@ "hypervkvpd.service": "c1bb207cf9f388f8f3cf5b649abbf8cfe4c4fcf74538612946e68f350d1f265f", "hypervvss.rules": "94cead44245ef6553ab79c0bbac8419e3ff4b241f01bcec66e6f508098cbedd1", "hypervvssd.service": "22270d9f0f23af4ea7905f19c1d5d5495e40c1f782cbb87a99f8aec5a011078d", - "kernel-6.6.117.1.tar.gz": "bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3" + "kernel-6.6.121.1.tar.gz": "aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12" } } diff --git a/SPECS/hyperv-daemons/hyperv-daemons.spec b/SPECS/hyperv-daemons/hyperv-daemons.spec index 7f43ccbd750..a8d57d9a6ad 100644 --- a/SPECS/hyperv-daemons/hyperv-daemons.spec +++ b/SPECS/hyperv-daemons/hyperv-daemons.spec @@ -10,7 +10,7 @@ Summary: Hyper-V daemons suite Name: hyperv-daemons -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2+ Vendor: Microsoft Corporation @@ -221,6 +221,12 @@ fi %{_sbindir}/lsvmbus %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS/ibarr/ibarr.signatures.json b/SPECS/ibarr/ibarr.signatures.json index 741df92c21e..ff5c71512ab 100644 --- a/SPECS/ibarr/ibarr.signatures.json +++ b/SPECS/ibarr/ibarr.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ibarr-0.1.3.tar.gz": "db24745abfd49af9ed2b3f1990b9b00e0bd51258c282caa6fa3cf420f90b8b25" + "ibarr-0.1.5.tar.gz": "785b7587b15e9778aa3b443beef90f2a5b678751c0866fa7547cbcdafd37fe6a" } } \ No newline at end of file diff --git a/SPECS/ibarr/ibarr.spec b/SPECS/ibarr/ibarr.spec index a881fa40212..674672c44c7 100644 --- a/SPECS/ibarr/ibarr.spec +++ b/SPECS/ibarr/ibarr.spec @@ -1,13 +1,15 @@ -Name: ibarr -Version: 0.1.3 -Release: 3%{?dist} -Summary: Nvidia address and route userspace resolution services for Infiniband +Name: ibarr +Version: 0.1.5 +Release: 1%{?dist} +Summary: Nvidia address and route userspace resolution services for Infiniband Vendor: Microsoft Corporation Distribution: Azure Linux -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ibarr-0.1.3.tar.gz#/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz Group: Applications/System License: (GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause - BuildRequires: cmake BuildRequires: gcc BuildRequires: libnl3-devel @@ -54,6 +56,9 @@ rm -rf $RPM_BUILD_ROOT /lib/systemd/system/%{name}.service %changelog +* Tue Oct 04 2025 Suresh Babu Chalamalasetty - 0.1.5-1 +- Upgrade version to 0.1.5. +- Update source path * Mon Sep 15 2025 Elaheh Dehghani - 0.1.3-3 - Enable ARM64 build by removing ExclusiveArch * Tue Dec 17 2024 Binu Jose Philip - 0.1.3-2 diff --git a/SPECS/ibsim/ibsim.signatures.json b/SPECS/ibsim/ibsim.signatures.json index bd36bdca1e6..b67118660be 100644 --- a/SPECS/ibsim/ibsim.signatures.json +++ b/SPECS/ibsim/ibsim.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ibsim-0.12.tar.gz": "f137872bf1ec1ca56c9f301ddef237a5f9c4111d6b83b4be853b58c054e454a3" + "ibsim-0.12.1.tar.gz": "25d1f5dbfcbd83d0119681151eb5d53e252daa47416b1348d567a7bc92d490fc" } } \ No newline at end of file diff --git a/SPECS/ibsim/ibsim.spec b/SPECS/ibsim/ibsim.spec index c805d1f1ca4..6093493bd31 100644 --- a/SPECS/ibsim/ibsim.spec +++ b/SPECS/ibsim/ibsim.spec @@ -1,15 +1,18 @@ -%define RELEASE 2 +%define RELEASE 1 %define rel %{?CUSTOM_RELEASE}%{!?CUSTOM_RELEASE:%RELEASE} Summary: InfiniBand fabric simulator for management Name: ibsim -Version: 0.12 -Release: 2%{?dist} +Version: 0.12.1 +Release: 1%{?dist} License: GPLv2 or BSD Group: System Environment/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ibsim-0.12.tar.gz#/ibsim-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/ibsim-%{version}.tar.gz Url: https://github.com/linux-rdma/ibsim Vendor: Microsoft Corporation Distribution: Azure Linux @@ -47,6 +50,9 @@ rm -rf $RPM_BUILD_ROOT %license COPYING %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 0.12.1-1 +- Upgrade version to 0.12.1. +- Update source path * Mon Sep 15 2025 Elaheh Dehghani - 0.12-2 - Enable ARM64 build by removing ExclusiveArch * Tue Dec 17 2024 Binu Jose Philip - 0.12-1 diff --git a/SPECS/influxdb/CVE-2025-10543.patch b/SPECS/influxdb/CVE-2025-10543.patch new file mode 100644 index 00000000000..1c398665646 --- /dev/null +++ b/SPECS/influxdb/CVE-2025-10543.patch @@ -0,0 +1,33 @@ +From 99e87e5303de418336060151f8718453b6ece75a Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Wed, 17 Dec 2025 05:03:27 +0000 +Subject: [PATCH] packets: truncate encodeBytes to 65535 bytes to avoid + overflow of 16-bit length; add comments; backport test and fvt changes + skipped due to repository structure + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://patch-diff.githubusercontent.com/raw/eclipse-paho/paho.mqtt.golang/pull/714.patch +--- + .../github.com/eclipse/paho.mqtt.golang/packets/packets.go | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go +index 42eeb46..a221a3d 100644 +--- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go ++++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go +@@ -304,6 +304,12 @@ func decodeBytes(b io.Reader) ([]byte, error) { + } + + func encodeBytes(field []byte) []byte { ++ // Attempting to encode more than 65,535 bytes would lead to an unexpected 16-bit length and extra data written ++ // (which would be parsed as later parts of the message). The safest option is to truncate. ++ if len(field) > 65535 { ++ field = field[0:65535] ++ } ++ + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +-- +2.45.4 + diff --git a/SPECS/influxdb/CVE-2025-65637.patch b/SPECS/influxdb/CVE-2025-65637.patch new file mode 100644 index 00000000000..d299bbcfd4b --- /dev/null +++ b/SPECS/influxdb/CVE-2025-65637.patch @@ -0,0 +1,136 @@ +From 41548ebae4b6e3a9706658428fa6f6784a9f39d0 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 1/2] This commit fixes a potential denial of service + vulnerability in logrus.Writer() that could be triggered by logging text + longer than 64kb without newlines. Previously, the bufio.Scanner used by + Writer() would hang indefinitely when reading such text without newlines, + causing the application to become unresponsive. + +--- + vendor/github.com/sirupsen/logrus/writer.go | 33 ++++++++++++++++++++- + 1 file changed, 32 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 72e8e3a..36032d0 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -4,6 +4,7 @@ import ( + "bufio" + "io" + "runtime" ++ "strings" + ) + + // Writer at INFO level. See WriterLevel for details. +@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) + } + ++// Writer returns an io.Writer that writes to the logger at the info log level + func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) + } + ++// WriterLevel returns an io.Writer that writes to the logger at the given log level + func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + ++ // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace +@@ -48,23 +52,50 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + printFunc = entry.Print + } + ++ // Start a new goroutine to scan the input and write it to the logger using the specified print function. ++ // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) ++ ++ // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer + } + ++// writerScanner scans the input from the reader and writes it to the logger + func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) ++ ++ // Set the buffer size to the maximum token size to avoid buffer overflows ++ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) ++ ++ // Define a split function to split the input into chunks of up to 64KB ++ chunkSize := 64 * 1024 // 64KB ++ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { ++ if len(data) > chunkSize { ++ return chunkSize, data[:chunkSize], nil ++ } ++ return 0, nil, nil ++ } ++ ++ //Use the custom split function to split the input ++ scanner.Split(splitFunc) ++ ++ // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { +- printFunc(scanner.Text()) ++ printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } ++ ++ // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } ++ ++ // Close the reader when we are done + reader.Close() + } + ++// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected + func writerFinalizer(writer *io.PipeWriter) { + writer.Close() + } +-- +2.45.4 + + +From a1351838d31739b7a5e8c11f102cf7f733f29a3a Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 2/2] Scan text in 64KB chunks + +This commit fixes a potential denial of service +vulnerability in logrus.Writer() that could be +triggered by logging text longer than 64KB +without newlines. Previously, the bufio.Scanner +used by Writer() would hang indefinitely when +reading such text without newlines, causing the +application to become unresponsive. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/sirupsen/logrus/pull/1376.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 36032d0..7e7703c 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -75,7 +75,8 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } +- return 0, nil, nil ++ ++ return len(data), data, nil + } + + //Use the custom split function to split the input +-- +2.45.4 + diff --git a/SPECS/influxdb/influxdb.spec b/SPECS/influxdb/influxdb.spec index f4754745812..eb001f35c2a 100644 --- a/SPECS/influxdb/influxdb.spec +++ b/SPECS/influxdb/influxdb.spec @@ -18,7 +18,7 @@ Summary: Scalable datastore for metrics, events, and real-time analytics Name: influxdb Version: 2.7.5 -Release: 8%{?dist} +Release: 10%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -67,6 +67,8 @@ Patch8: CVE-2025-22868.patch Patch9: CVE-2025-22870.patch Patch10: CVE-2024-51744.patch Patch11: CVE-2025-22872.patch +Patch12: CVE-2025-65637.patch +Patch13: CVE-2025-10543.patch BuildRequires: clang BuildRequires: golang BuildRequires: kernel-headers @@ -156,6 +158,12 @@ go test ./... %{_tmpfilesdir}/influxdb.conf %changelog +* Wed Dec 17 2025 Azure Linux Security Servicing Account - 2.7.5-10 +- Patch for CVE-2025-10543 + +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 2.7.5-9 +- Patch for CVE-2025-65637 + * Mon Jul 21 2025 Jyoti Kanase - 2.7.5-8 - Bump release to rebuild with rust diff --git a/SPECS/iser-hwe/iser-hwe.signatures.json b/SPECS/iser-hwe/iser-hwe.signatures.json index 7091fd70f69..1ddd7258bb5 100644 --- a/SPECS/iser-hwe/iser-hwe.signatures.json +++ b/SPECS/iser-hwe/iser-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "iser-24.10.tgz": "d9c1344146697664a32d47e933b1ab26d4ba1899ea5936c2b6216316a1d974a5" + "iser-25.07.tgz": "a3f5502cb490caeb19a17658be1a97b61ebd0d465e9afe83ebcbce8e48f17ee7" } } \ No newline at end of file diff --git a/SPECS/iser-hwe/iser-hwe.spec b/SPECS/iser-hwe/iser-hwe.spec index f44a3ec3dde..9cdf5fb318b 100644 --- a/SPECS/iser-hwe/iser-hwe.spec +++ b/SPECS/iser-hwe/iser-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -40,9 +40,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name iser-hwe} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-24%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-2%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -66,16 +66,18 @@ Summary: %{_name} Driver Name: iser-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/iser-24.10.tgz#/iser-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/iser-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 BuildRequires: gcc BuildRequires: make @@ -249,6 +251,14 @@ fi # 1 : closed %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/iser/iser.signatures.json b/SPECS/iser/iser.signatures.json index 7091fd70f69..1ddd7258bb5 100644 --- a/SPECS/iser/iser.signatures.json +++ b/SPECS/iser/iser.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "iser-24.10.tgz": "d9c1344146697664a32d47e933b1ab26d4ba1899ea5936c2b6216316a1d974a5" + "iser-25.07.tgz": "a3f5502cb490caeb19a17658be1a97b61ebd0d465e9afe83ebcbce8e48f17ee7" } } \ No newline at end of file diff --git a/SPECS/iser/iser.spec b/SPECS/iser/iser.spec index 8749dd80008..e67720745a2 100644 --- a/SPECS/iser/iser.spec +++ b/SPECS/iser/iser.spec @@ -39,9 +39,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name iser} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-21%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-1%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -65,12 +65,15 @@ Summary: %{_name} Driver Name: iser -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/iser-24.10.tgz#/iser-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/iser-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux @@ -249,6 +252,10 @@ fi # 1 : closed %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS/isert-hwe/isert-hwe.signatures.json b/SPECS/isert-hwe/isert-hwe.signatures.json index 616d6a2865f..6c27296af33 100644 --- a/SPECS/isert-hwe/isert-hwe.signatures.json +++ b/SPECS/isert-hwe/isert-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "isert-24.10.tgz": "ee91338b063800563b7a1cc1adfb29ff8913b17b0bc941a24995211f3268d33a" + "isert-25.07.tgz": "f1b1da264f4c42998098e2c48e5bbf0c5dd5dcebc5ae231c64a85fe299a1ba87" } } \ No newline at end of file diff --git a/SPECS/isert-hwe/isert-hwe.spec b/SPECS/isert-hwe/isert-hwe.spec index c93159042d5..26a87663d24 100644 --- a/SPECS/isert-hwe/isert-hwe.spec +++ b/SPECS/isert-hwe/isert-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -40,9 +40,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name isert-hwe} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-24%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-2%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -66,16 +66,18 @@ Summary: %{_name}-hwe Driver Name: isert-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/isert-24.10.tgz#/isert-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/isert-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 BuildRequires: gcc BuildRequires: make @@ -249,6 +251,14 @@ fi # 1 : closed %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/isert/isert.signatures.json b/SPECS/isert/isert.signatures.json index 616d6a2865f..6c27296af33 100644 --- a/SPECS/isert/isert.signatures.json +++ b/SPECS/isert/isert.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "isert-24.10.tgz": "ee91338b063800563b7a1cc1adfb29ff8913b17b0bc941a24995211f3268d33a" + "isert-25.07.tgz": "f1b1da264f4c42998098e2c48e5bbf0c5dd5dcebc5ae231c64a85fe299a1ba87" } } \ No newline at end of file diff --git a/SPECS/isert/isert.spec b/SPECS/isert/isert.spec index 51e587ffda9..271e942653e 100644 --- a/SPECS/isert/isert.spec +++ b/SPECS/isert/isert.spec @@ -39,9 +39,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name isert} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-21%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-1%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -65,12 +65,15 @@ Summary: %{_name} Driver Name: isert -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/isert-24.10.tgz#/isert-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/isert-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux @@ -249,6 +252,10 @@ fi # 1 : closed %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS/kata-containers-cc/kata-containers-cc.spec b/SPECS/kata-containers-cc/kata-containers-cc.spec index 9878d82e498..c769da8047b 100644 --- a/SPECS/kata-containers-cc/kata-containers-cc.spec +++ b/SPECS/kata-containers-cc/kata-containers-cc.spec @@ -3,7 +3,7 @@ Name: kata-containers-cc Version: 3.15.0.aks0 -Release: 5%{?dist} +Release: 6%{?dist} Summary: Kata Confidential Containers package developed for Confidential Containers on AKS License: ASL 2.0 URL: https://github.com/microsoft/kata-containers @@ -11,7 +11,7 @@ Vendor: Microsoft Corporation Distribution: Azure Linux Source0: https://github.com/microsoft/kata-containers/archive/refs/tags/%{version}.tar.gz#/%{sourceName}-%{version}.tar.gz Source1: %{sourceName}-%{version}-cargo.tar.gz - +Patch0: rust-1.90-fixes.patch ExclusiveArch: x86_64 BuildRequires: azurelinux-release @@ -150,6 +150,10 @@ fi %{tools_pkg}/tools/osbuilder/node-builder/azure-linux/agent-install/usr/lib/systemd/system/kata-agent.service %changelog +* Wed Oct 15 2025 Kavya Sree Kaitepalli - 3.15.0-aks0-6 +- Bump release to rebuild with rust +- Add patch to suppress dead_code warnings and add explicit lifetime for U32Set iterator + * Fri Aug 08 2025 Azure Linux Security Servicing Account - 3.15.0-aks0-5 - Bump release to rebuild with rust diff --git a/SPECS/kata-containers-cc/rust-1.90-fixes.patch b/SPECS/kata-containers-cc/rust-1.90-fixes.patch new file mode 100644 index 00000000000..ccc55438ab4 --- /dev/null +++ b/SPECS/kata-containers-cc/rust-1.90-fixes.patch @@ -0,0 +1,51 @@ +From 40df9e0f016e4ce67e90e3e7f5b0ec87c5cb0a32 Mon Sep 17 00:00:00 2001 +From: Kavya Sree Kaitepalli +Date: Tue, 28 Oct 2025 17:39:43 +0000 +Subject: [PATCH] Suppress dead_code warnings and add explicit lifetime for U32Set iterator for Rust 1.90 + +--- + src/agent/src/device/block_device_handler.rs | 1 + + src/agent/src/storage/block_handler.rs | 1 + + src/libs/kata-types/src/utils/u32_set.rs | 2 +- + 3 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/agent/src/device/block_device_handler.rs b/src/agent/src/device/block_device_handler.rs +index d518f9d..8607751 100644 +--- a/src/agent/src/device/block_device_handler.rs ++++ b/src/agent/src/device/block_device_handler.rs +@@ -29,6 +29,7 @@ use tracing::instrument; + pub struct VirtioBlkPciDeviceHandler {} + + #[derive(Debug)] ++#[allow(dead_code)] + pub struct VirtioBlkCcwDeviceHandler {} + + #[derive(Debug)] +diff --git a/src/agent/src/storage/block_handler.rs b/src/agent/src/storage/block_handler.rs +index 251a4df..074efc3 100644 +--- a/src/agent/src/storage/block_handler.rs ++++ b/src/agent/src/storage/block_handler.rs +@@ -94,6 +94,7 @@ impl StorageHandler for VirtioBlkPciHandler { + } + + #[derive(Debug)] ++#[allow(dead_code)] + pub struct VirtioBlkCcwHandler {} + + #[async_trait::async_trait] +diff --git a/src/libs/kata-types/src/utils/u32_set.rs b/src/libs/kata-types/src/utils/u32_set.rs +index 44c55a1..837e7a0 100644 +--- a/src/libs/kata-types/src/utils/u32_set.rs ++++ b/src/libs/kata-types/src/utils/u32_set.rs +@@ -47,7 +47,7 @@ impl U32Set { + } + + /// Get an iterator over the CPU set. +- pub fn iter(&self) -> Iter { ++ pub fn iter(&self) -> Iter<'_, u32> { + self.0.iter() + } + } +-- +2.45.4 + diff --git a/SPECS/kata-containers/CVE-2026-24054.patch b/SPECS/kata-containers/CVE-2026-24054.patch new file mode 100644 index 00000000000..64fd275b9de --- /dev/null +++ b/SPECS/kata-containers/CVE-2026-24054.patch @@ -0,0 +1,429 @@ +From 14ef41ac93b4f0b4cad1e28f94f974238d0ee936 Mon Sep 17 00:00:00 2001 +From: Dan Mihai +Date: Wed, 7 Jan 2026 00:09:08 +0000 +Subject: [PATCH] runtime: DEFDISABLEBLOCK := true +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1. Add disable_block_device_use to CLH settings file, for parity with + the already existing QEMU settings. + +2. Set DEFDISABLEBLOCK := true by default for both QEMU and CLH. After + this change, Kata Guests will use by default virtio-fs to access + container rootfs directories from their Hosts. Hosts that were + designed to use Host block devices attached to the Guests can + re-enable these rootfs block devices by changing the value of + disable_block_device_use back to false in their settings files. + +3. Add test using container image without any rootfs layers. Depending + on the container runtime and image snapshotter being used, the empty + container rootfs image might get stored on a host block device that + cannot be safely hotplugged to a guest VM, because the host is using + the same block device. + +4. Add block device hotplug safety warning into the Kata Shim + configuration files. + +Signed-off-by: Dan Mihai +Signed-off-by: Fabiano Fidêncio +Signed-off-by: Cameron McDermott +--- + docs/design/architecture/storage.md | 7 ++- + docs/how-to/how-to-set-sandbox-config-kata.md | 2 +- + src/runtime/Makefile | 2 +- + src/runtime/config/configuration-clh.toml.in | 14 +++++ + .../configuration-qemu-coco-dev.toml.in | 8 ++- + .../configuration-qemu-nvidia-gpu-snp.toml.in | 8 ++- + .../configuration-qemu-nvidia-gpu-tdx.toml.in | 8 ++- + .../configuration-qemu-nvidia-gpu.toml.in | 8 ++- + .../config/configuration-qemu-se.toml.in | 8 ++- + .../config/configuration-qemu-snp.toml.in | 8 ++- + .../config/configuration-qemu-tdx.toml.in | 8 ++- + src/runtime/config/configuration-qemu.toml.in | 8 ++- + .../config/configuration-stratovirt.toml.in | 8 ++- + .../kubernetes/k8s-empty-image.bats | 59 +++++++++++++++++++ + .../kubernetes/run_kubernetes_tests.sh | 1 + + .../no-layer-image.yaml | 13 ++++ + 16 files changed, 155 insertions(+), 14 deletions(-) + create mode 100644 tests/integration/kubernetes/k8s-empty-image.bats + create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/no-layer-image.yaml + +diff --git a/docs/design/architecture/storage.md b/docs/design/architecture/storage.md +index 3aefc7ecf9..d3cb71ad85 100644 +--- a/docs/design/architecture/storage.md ++++ b/docs/design/architecture/storage.md +@@ -51,6 +51,7 @@ containers started after the VM has been launched. + Users can check to see if the container uses the `devicemapper` block + device as its rootfs by calling `mount(8)` within the container. If + the `devicemapper` block device is used, the root filesystem (`/`) +-will be mounted from `/dev/vda`. Users can disable direct mounting of +-the underlying block device through the runtime +-[configuration](README.md#configuration). ++will be mounted from `/dev/vda`. Users can enable direct mounting of ++the underlying block device by setting the runtime ++[configuration](README.md#configuration) flag `disable_block_device_use` to ++`false`. +diff --git a/docs/how-to/how-to-set-sandbox-config-kata.md b/docs/how-to/how-to-set-sandbox-config-kata.md +index aa044367b3..37c454cba4 100644 +--- a/docs/how-to/how-to-set-sandbox-config-kata.md ++++ b/docs/how-to/how-to-set-sandbox-config-kata.md +@@ -50,7 +50,7 @@ There are several kinds of Kata configurations and they are listed below. + | `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor | + | `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` | + | `io.katacontainers.config.hypervisor.default_vcpus` | float32| the default vCPUs assigned for a VM by the hypervisor | +-| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used | ++| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disable hotplugging host block devices to guest VMs for container rootfs | + | `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) | + | `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host | + | `io.katacontainers.config.hypervisor.enable_hugepages` | `boolean` | if the memory should be `pre-allocated` from huge pages | +diff --git a/src/runtime/Makefile b/src/runtime/Makefile +index 36550295ce..fac37fc82d 100644 +--- a/src/runtime/Makefile ++++ b/src/runtime/Makefile +@@ -250,7 +250,7 @@ DEFSECCOMPSANDBOXPARAM := + DEFENTROPYSOURCE := /dev/urandom + DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"] + +-DEFDISABLEBLOCK := false ++DEFDISABLEBLOCK := true + DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs + DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs + # Please keep DEFSHAREDFS_QEMU_COCO_DEV_VIRTIOFS in sync with TDX/SNP +diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in +index fe5fca4883..937d25d6ba 100644 +--- a/src/runtime/config/configuration-clh.toml.in ++++ b/src/runtime/config/configuration-clh.toml.in +@@ -109,6 +109,20 @@ memory_slots = @DEFMEMSLOTS@ + # > amount of physical RAM --> will be set to the actual amount of physical RAM + default_maxmemory = @DEFMAXMEMSZ@ + ++# Disable hotplugging host block devices to guest VMs for container rootfs. ++# In case of a storage driver like devicemapper where a container's ++# root file system is backed by a block device, the block device is passed ++# directly to the hypervisor for performance reasons. ++# This flag prevents the block device from being passed to the hypervisor, ++# virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. ++disable_block_device_use = @DEFDISABLEBLOCK@ ++ + # Shared file system type: + # - virtio-fs (default) + # - virtio-fs-nydus +diff --git a/src/runtime/config/configuration-qemu-coco-dev.toml.in b/src/runtime/config/configuration-qemu-coco-dev.toml.in +index 64b3917124..1a792d9bf8 100644 +--- a/src/runtime/config/configuration-qemu-coco-dev.toml.in ++++ b/src/runtime/config/configuration-qemu-coco-dev.toml.in +@@ -167,12 +167,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-nvidia-gpu-snp.toml.in b/src/runtime/config/configuration-qemu-nvidia-gpu-snp.toml.in +index c0d531b5ee..7923ef2acd 100644 +--- a/src/runtime/config/configuration-qemu-nvidia-gpu-snp.toml.in ++++ b/src/runtime/config/configuration-qemu-nvidia-gpu-snp.toml.in +@@ -176,12 +176,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-nvidia-gpu-tdx.toml.in b/src/runtime/config/configuration-qemu-nvidia-gpu-tdx.toml.in +index bc6ffc2231..e5b8bb7b09 100644 +--- a/src/runtime/config/configuration-qemu-nvidia-gpu-tdx.toml.in ++++ b/src/runtime/config/configuration-qemu-nvidia-gpu-tdx.toml.in +@@ -160,12 +160,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-nvidia-gpu.toml.in b/src/runtime/config/configuration-qemu-nvidia-gpu.toml.in +index ddab7e60be..888b430a7b 100644 +--- a/src/runtime/config/configuration-qemu-nvidia-gpu.toml.in ++++ b/src/runtime/config/configuration-qemu-nvidia-gpu.toml.in +@@ -165,12 +165,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-se.toml.in b/src/runtime/config/configuration-qemu-se.toml.in +index a7732bd1f9..483fb348b3 100644 +--- a/src/runtime/config/configuration-qemu-se.toml.in ++++ b/src/runtime/config/configuration-qemu-se.toml.in +@@ -152,12 +152,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-snp.toml.in b/src/runtime/config/configuration-qemu-snp.toml.in +index e79051fec6..13a364de9a 100644 +--- a/src/runtime/config/configuration-qemu-snp.toml.in ++++ b/src/runtime/config/configuration-qemu-snp.toml.in +@@ -176,12 +176,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu-tdx.toml.in b/src/runtime/config/configuration-qemu-tdx.toml.in +index 287d356a55..5029c3ec92 100644 +--- a/src/runtime/config/configuration-qemu-tdx.toml.in ++++ b/src/runtime/config/configuration-qemu-tdx.toml.in +@@ -161,12 +161,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-qemu.toml.in b/src/runtime/config/configuration-qemu.toml.in +index c31d17f489..af971558ca 100644 +--- a/src/runtime/config/configuration-qemu.toml.in ++++ b/src/runtime/config/configuration-qemu.toml.in +@@ -166,12 +166,18 @@ + # Default false + #enable_virtio_mem = true + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/src/runtime/config/configuration-stratovirt.toml.in b/src/runtime/config/configuration-stratovirt.toml.in +index a86a584a52..b9f28a74a6 100644 +--- a/src/runtime/config/configuration-stratovirt.toml.in ++++ b/src/runtime/config/configuration-stratovirt.toml.in +@@ -104,12 +104,18 @@ + # Default 0 + #memory_offset = 0 + +-# Disable block device from being used for a container's rootfs. ++# Disable hotplugging host block devices to guest VMs for container rootfs. + # In case of a storage driver like devicemapper where a container's + # root file system is backed by a block device, the block device is passed + # directly to the hypervisor for performance reasons. + # This flag prevents the block device from being passed to the hypervisor, + # virtio-fs is used instead to pass the rootfs. ++# WARNING: ++# Don't set this flag to false if you don't understand well the behavior of ++# your container runtime and image snapshotter. Some snapshotters might use ++# container image storage devices that are not meant to be hotplugged into a ++# guest VM - e.g., because they contain files used by the host or by other ++# guests. + disable_block_device_use = @DEFDISABLEBLOCK@ + + # Shared file system type: +diff --git a/tests/integration/kubernetes/k8s-empty-image.bats b/tests/integration/kubernetes/k8s-empty-image.bats +new file mode 100644 +index 0000000000..6d003b2aad +--- /dev/null ++++ b/tests/integration/kubernetes/k8s-empty-image.bats +@@ -0,0 +1,59 @@ ++#!/usr/bin/env bats ++# ++# Copyright (c) 2025 NVIDIA Corporation ++# ++# SPDX-License-Identifier: Apache-2.0 ++# ++ ++load "${BATS_TEST_DIRNAME}/../../common.bash" ++load "${BATS_TEST_DIRNAME}/lib.sh" ++load "${BATS_TEST_DIRNAME}/tests_common.sh" ++ ++setup() { ++ setup_common || die "setup_common failed" ++ pod_name="no-layer-image" ++ get_pod_config_dir ++ ++ yaml_file="${pod_config_dir}/${pod_name}.yaml" ++ ++ # genpolicy fails for this unusual container image, so use the allow_all policy. ++ add_allow_all_policy_to_yaml "${yaml_file}" ++} ++ ++@test "Test image with no layers cannot run" { ++ # Error from run-k8s-tests (ubuntu, qemu, small): ++ # ++ # failed to create containerd task: failed to create shim task: the file sleep was not found ++ # ++ # Error from run-k8s-tests-on-tee (sev-snp, qemu-snp): ++ # ++ # failed to create containerd task: failed to create shim task: rpc status: ++ # Status { code: INTERNAL, message: "[CDH] [ERROR]: Image Pull error: Failed to pull image ++ # ghcr.io/kata-containers/no-layer-image:latest from all mirror/mapping locations or original location: image: ++ # ghcr.io/kata-containers/no-layer-image:latest, error: Internal error", details: [], special_fields: ++ # SpecialFields { unknown_fields: UnknownFields { fields: None }, cached_size: CachedSize { size: 0 } } } ++ # ++ # Error from run-k8s-tests-coco-nontee-with-erofs-snapshotter (qemu-coco-dev, erofs, default): ++ # ++ # failed to create containerd task: failed to create shim task: failed to mount ++ # /run/kata-containers/shared/containers/fadd1af7ea2a7bfc6caf26471f70e9a913a2989fd4a1be9d001b59e48c0781aa/rootfs ++ # to /run/kata-containers/fadd1af7ea2a7bfc6caf26471f70e9a913a2989fd4a1be9d001b59e48c0781aa/rootfs, with error: ++ # ENOENT: No such file or directory ++ ++ kubectl create -f "${yaml_file}" ++ ++ local -r command="kubectl describe "pod/${pod_name}" | grep -E \ ++ 'the file sleep was not found|\[CDH\] \[ERROR\]: Image Pull error|ENOENT: No such file or directory'" ++ info "Waiting ${wait_time} seconds for: ${command}" ++ waitForProcess "${wait_time}" "${sleep_time}" "${command}" >/dev/null 2>/dev/null ++} ++ ++teardown() { ++ # Debugging information ++ kubectl describe "pod/${pod_name}" ++ kubectl get "pod/${pod_name}" -o yaml ++ ++ kubectl delete pod "${pod_name}" ++ ++ teardown_common "${node}" "${node_start_time:-}" ++} +diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh +index 92097baeb8..7ca20dae1a 100755 +--- a/tests/integration/kubernetes/run_kubernetes_tests.sh ++++ b/tests/integration/kubernetes/run_kubernetes_tests.sh +@@ -42,6 +42,7 @@ else + ) + + K8S_TEST_SMALL_HOST_UNION=( \ ++ "k8s-empty-image.bats" \ + "k8s-guest-pull-image.bats" \ + "k8s-confidential.bats" \ + "k8s-sealed-secret.bats" \ +diff --git a/tests/integration/kubernetes/runtimeclass_workloads/no-layer-image.yaml b/tests/integration/kubernetes/runtimeclass_workloads/no-layer-image.yaml +new file mode 100644 +index 0000000000..0e552eb5d3 +--- /dev/null ++++ b/tests/integration/kubernetes/runtimeclass_workloads/no-layer-image.yaml +@@ -0,0 +1,13 @@ ++apiVersion: v1 ++kind: Pod ++metadata: ++ name: no-layer-image ++spec: ++ runtimeClassName: kata ++ containers: ++ - name: no-layer-image ++ image: ghcr.io/kata-containers/no-layer-image:latest ++ resources: {} ++ command: ++ - sleep ++ - infinity diff --git a/SPECS/kata-containers/kata-containers.spec b/SPECS/kata-containers/kata-containers.spec index def885dcdb8..c80d60a8306 100644 --- a/SPECS/kata-containers/kata-containers.spec +++ b/SPECS/kata-containers/kata-containers.spec @@ -2,7 +2,7 @@ Name: kata-containers Version: 3.19.1.kata2 -Release: 2%{?dist} +Release: 4%{?dist} Summary: Kata Containers package developed for Pod Sandboxing on AKS License: ASL 2.0 URL: https://github.com/microsoft/kata-containers @@ -10,6 +10,8 @@ Vendor: Microsoft Corporation Distribution: Azure Linux Source0: https://github.com/microsoft/kata-containers/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz Source1: %{name}-%{version}-cargo.tar.gz +Patch0: CVE-2026-24054.patch +Patch1: rust-1.90-fixes.patch BuildRequires: azurelinux-release BuildRequires: golang @@ -113,6 +115,13 @@ popd %{tools_pkg}/tools/osbuilder/node-builder/azure-linux/agent-install/usr/lib/systemd/system/kata-agent.service %changelog +* Fri Jan 29 2026 Kavya Sree Kaitepalli - 3.19.1.kata2-4 +- Bump release to rebuild with rust +- Add patch to suppress dead_code warnings and add explicit lifetime for U32Set iterator + +* Thu Jan 22 2026 Aurelien Bombo - 3.19.1.kata2-3 +- Patch CVE-2026-24054 + * Thu Oct 09 2025 Saul Paredes - 3.19.1.kata2-2 - Enable build on aarch64 diff --git a/SPECS/kata-containers/rust-1.90-fixes.patch b/SPECS/kata-containers/rust-1.90-fixes.patch new file mode 100644 index 00000000000..ccc55438ab4 --- /dev/null +++ b/SPECS/kata-containers/rust-1.90-fixes.patch @@ -0,0 +1,51 @@ +From 40df9e0f016e4ce67e90e3e7f5b0ec87c5cb0a32 Mon Sep 17 00:00:00 2001 +From: Kavya Sree Kaitepalli +Date: Tue, 28 Oct 2025 17:39:43 +0000 +Subject: [PATCH] Suppress dead_code warnings and add explicit lifetime for U32Set iterator for Rust 1.90 + +--- + src/agent/src/device/block_device_handler.rs | 1 + + src/agent/src/storage/block_handler.rs | 1 + + src/libs/kata-types/src/utils/u32_set.rs | 2 +- + 3 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/agent/src/device/block_device_handler.rs b/src/agent/src/device/block_device_handler.rs +index d518f9d..8607751 100644 +--- a/src/agent/src/device/block_device_handler.rs ++++ b/src/agent/src/device/block_device_handler.rs +@@ -29,6 +29,7 @@ use tracing::instrument; + pub struct VirtioBlkPciDeviceHandler {} + + #[derive(Debug)] ++#[allow(dead_code)] + pub struct VirtioBlkCcwDeviceHandler {} + + #[derive(Debug)] +diff --git a/src/agent/src/storage/block_handler.rs b/src/agent/src/storage/block_handler.rs +index 251a4df..074efc3 100644 +--- a/src/agent/src/storage/block_handler.rs ++++ b/src/agent/src/storage/block_handler.rs +@@ -94,6 +94,7 @@ impl StorageHandler for VirtioBlkPciHandler { + } + + #[derive(Debug)] ++#[allow(dead_code)] + pub struct VirtioBlkCcwHandler {} + + #[async_trait::async_trait] +diff --git a/src/libs/kata-types/src/utils/u32_set.rs b/src/libs/kata-types/src/utils/u32_set.rs +index 44c55a1..837e7a0 100644 +--- a/src/libs/kata-types/src/utils/u32_set.rs ++++ b/src/libs/kata-types/src/utils/u32_set.rs +@@ -47,7 +47,7 @@ impl U32Set { + } + + /// Get an iterator over the CPU set. +- pub fn iter(&self) -> Iter { ++ pub fn iter(&self) -> Iter<'_, u32> { + self.0.iter() + } + } +-- +2.45.4 + diff --git a/SPECS/keda/CVE-2025-68156.patch b/SPECS/keda/CVE-2025-68156.patch new file mode 100644 index 00000000000..ca259e4a656 --- /dev/null +++ b/SPECS/keda/CVE-2025-68156.patch @@ -0,0 +1,150 @@ +From 6f26d5402b43cb7d71044ab0d7d444fbfb148b47 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Fri, 19 Dec 2025 10:48:43 +0000 +Subject: [PATCH] fix(builtin): limit recursion depth + +Add builtin.MaxDepth (default 10k) to prevent stack overflows when +processing deeply nested or cyclic structures in builtin functions. +The functions flatten, min, max, mean, and median now return a +"recursion depth exceeded" error instead of crashing the runtime. + +Signed-off-by: Ville Vesilehto +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/expr-lang/expr/pull/870.patch +--- + .../expr-lang/expr/builtin/builtin.go | 13 ++++++---- + .../github.com/expr-lang/expr/builtin/lib.go | 24 ++++++++++++++----- + 2 files changed, 27 insertions(+), 10 deletions(-) + +diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go +index efc01fc2..3fcf4118 100644 +--- a/vendor/github.com/expr-lang/expr/builtin/builtin.go ++++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go +@@ -3,6 +3,7 @@ package builtin + import ( + "encoding/base64" + "encoding/json" ++ "errors" + "fmt" + "reflect" + "sort" +@@ -16,6 +17,10 @@ import ( + var ( + Index map[string]int + Names []string ++ ++ // MaxDepth limits the recursion depth for nested structures. ++ MaxDepth = 10000 ++ ErrorMaxDepth = errors.New("recursion depth exceeded") + ) + + func init() { +@@ -377,7 +382,7 @@ var Builtins = []*Function{ + { + Name: "max", + Func: func(args ...any) (any, error) { +- return minMax("max", runtime.Less, args...) ++ return minMax("max", runtime.Less, 0, args...) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + return validateAggregateFunc("max", args) +@@ -386,7 +391,7 @@ var Builtins = []*Function{ + { + Name: "min", + Func: func(args ...any) (any, error) { +- return minMax("min", runtime.More, args...) ++ return minMax("min", runtime.More, 0, args...) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + return validateAggregateFunc("min", args) +@@ -395,7 +400,7 @@ var Builtins = []*Function{ + { + Name: "mean", + Func: func(args ...any) (any, error) { +- count, sum, err := mean(args...) ++ count, sum, err := mean(0, args...) + if err != nil { + return nil, err + } +@@ -411,7 +416,7 @@ var Builtins = []*Function{ + { + Name: "median", + Func: func(args ...any) (any, error) { +- values, err := median(args...) ++ values, err := median(0, args...) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go +index e3cd61b9..d9b9a3d8 100644 +--- a/vendor/github.com/expr-lang/expr/builtin/lib.go ++++ b/vendor/github.com/expr-lang/expr/builtin/lib.go +@@ -258,7 +258,11 @@ func String(arg any) any { + return fmt.Sprintf("%v", arg) + } + +-func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { ++func minMax(name string, fn func(any, any) bool, depth int, args ...any) (any, error) { ++ if depth > MaxDepth { ++ return nil, ErrorMaxDepth ++ } ++ + var val any + for _, arg := range args { + rv := reflect.ValueOf(deref.Deref(arg)) +@@ -266,7 +270,7 @@ func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elemVal, err := minMax(name, fn, rv.Index(i).Interface()) ++ elemVal, err := minMax(name, fn, depth+1, rv.Index(i).Interface()) + if err != nil { + return nil, err + } +@@ -299,7 +303,11 @@ func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { + return val, nil + } + +-func mean(args ...any) (int, float64, error) { ++func mean(depth int, args ...any) (int, float64, error) { ++ if depth > MaxDepth { ++ return 0, 0, ErrorMaxDepth ++ } ++ + var total float64 + var count int + +@@ -309,7 +317,7 @@ func mean(args ...any) (int, float64, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elemCount, elemSum, err := mean(rv.Index(i).Interface()) ++ elemCount, elemSum, err := mean(depth+1, rv.Index(i).Interface()) + if err != nil { + return 0, 0, err + } +@@ -332,7 +340,11 @@ func mean(args ...any) (int, float64, error) { + return count, total, nil + } + +-func median(args ...any) ([]float64, error) { ++func median(depth int, args ...any) ([]float64, error) { ++ if depth > MaxDepth { ++ return nil, ErrorMaxDepth ++ } ++ + var values []float64 + + for _, arg := range args { +@@ -341,7 +353,7 @@ func median(args ...any) ([]float64, error) { + case reflect.Array, reflect.Slice: + size := rv.Len() + for i := 0; i < size; i++ { +- elems, err := median(rv.Index(i).Interface()) ++ elems, err := median(depth+1, rv.Index(i).Interface()) + if err != nil { + return nil, err + } +-- +2.45.4 + diff --git a/SPECS/keda/CVE-2025-68476.patch b/SPECS/keda/CVE-2025-68476.patch new file mode 100644 index 00000000000..85558a3fb35 --- /dev/null +++ b/SPECS/keda/CVE-2025-68476.patch @@ -0,0 +1,296 @@ +From f92653ffd49b5021da59d3650aac7373e83462ad Mon Sep 17 00:00:00 2001 +From: Jorge Turrado Ferrero +Date: Mon, 22 Dec 2025 11:20:55 +0100 +Subject: [PATCH 1/3] Merge commit from fork + +* fix: projected service accounts are validated to prevent arbitrary path reads + +Signed-off-by: Jorge Turrado + +* validate signature + +Signed-off-by: Jorge Turrado + +--------- + +Signed-off-by: Jorge Turrado +--- + .../resolver/hashicorpvault_handler.go | 4 +- + pkg/scaling/resolver/k8s_validator.go | 39 ++++ + pkg/scaling/resolver/k8s_validator_test.go | 176 ++++++++++++++++++ + 3 files changed, 216 insertions(+), 3 deletions(-) + create mode 100644 pkg/scaling/resolver/k8s_validator.go + create mode 100644 pkg/scaling/resolver/k8s_validator_test.go + +diff --git a/pkg/scaling/resolver/hashicorpvault_handler.go b/pkg/scaling/resolver/hashicorpvault_handler.go +index 1781a204..15b2846e 100644 +--- a/pkg/scaling/resolver/hashicorpvault_handler.go ++++ b/pkg/scaling/resolver/hashicorpvault_handler.go +@@ -20,7 +20,6 @@ import ( + "encoding/json" + "errors" + "fmt" +- "os" + "strings" + + "github.com/go-logr/logr" +@@ -120,8 +119,7 @@ func (vh *HashicorpVaultHandler) token(client *vaultapi.Client) (string, error) + return token, errors.New("k8s SA file not in config") + } + +- // Get the JWT from POD +- jwt, err := os.ReadFile(vh.vault.Credential.ServiceAccount) ++ jwt, err := readKubernetesServiceAccountProjectedToken(vh.vault.Credential.ServiceAccount) + if err != nil { + return token, err + } +diff --git a/pkg/scaling/resolver/k8s_validator.go b/pkg/scaling/resolver/k8s_validator.go +new file mode 100644 +index 00000000..f32a0c36 +--- /dev/null ++++ b/pkg/scaling/resolver/k8s_validator.go +@@ -0,0 +1,39 @@ ++package resolver ++ ++import ( ++ "fmt" ++ "os" ++ "strings" ++ ++ "github.com/golang-jwt/jwt/v5" ++) ++ ++var parser = jwt.NewParser() ++ ++func readKubernetesServiceAccountProjectedToken(path string) ([]byte, error) { ++ jwt, err := os.ReadFile(path) ++ if err != nil { ++ return []byte{}, err ++ } ++ if err = validateK8sSAToken(jwt); err != nil { ++ return []byte{}, err ++ } ++ return jwt, nil ++} ++ ++func validateK8sSAToken(saToken []byte) error { ++ claims := jwt.MapClaims{} ++ _, _, err := parser.ParseUnverified(string(saToken), &claims) ++ if err != nil { ++ return fmt.Errorf("error validating token: %w", err) ++ } ++ sub, err := claims.GetSubject() ++ if err != nil { ++ return fmt.Errorf("error getting token sub: %w", err) ++ } ++ if !strings.HasPrefix(sub, "system:serviceaccount:") { ++ return fmt.Errorf("error validating token: subject isn't a service account") ++ } ++ ++ return nil ++} +diff --git a/pkg/scaling/resolver/k8s_validator_test.go b/pkg/scaling/resolver/k8s_validator_test.go +new file mode 100644 +index 00000000..0e2b2cf8 +--- /dev/null ++++ b/pkg/scaling/resolver/k8s_validator_test.go +@@ -0,0 +1,176 @@ ++/* ++Copyright 2025 The KEDA Authors ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++package resolver ++ ++import ( ++ "crypto/rand" ++ "crypto/rsa" ++ "crypto/x509" ++ "encoding/pem" ++ "os" ++ "testing" ++ "time" ++ ++ "github.com/golang-jwt/jwt/v5" ++) ++ ++func TestReadKubernetesServiceAccountProjectedToken(t *testing.T) { ++ tests := []struct { ++ name string ++ setupToken func() string ++ expectError bool ++ validate func([]byte) bool ++ }{ ++ { ++ name: "valid token", ++ setupToken: func() string { ++ privateKey, _, err := generateTestRSAKeyPair() ++ if err != nil { ++ t.Fatalf("failed to generate RSA keys: %v", err) ++ } ++ ++ // Create valid JWT token ++ claims := jwt.MapClaims{ ++ "iss": "kubernetes/serviceaccount", ++ "sub": "system:serviceaccount:default:default", ++ "exp": time.Now().Add(time.Hour).Unix(), ++ "iat": time.Now().Unix(), ++ } ++ tokenBytes, err := createJWTToken(privateKey, claims) ++ if err != nil { ++ t.Fatalf("failed to create JWT token: %v", err) ++ } ++ tokenPath := createTempFile(t, tokenBytes) ++ ++ return tokenPath ++ }, ++ expectError: false, ++ validate: func(token []byte) bool { ++ return len(token) > 0 ++ }, ++ }, ++ { ++ name: "token file does not exist", ++ setupToken: func() string { ++ return "/nonexistent/token/path" ++ }, ++ expectError: true, ++ }, ++ { ++ name: "arbitrary file content is not a valid token", ++ setupToken: func() string { ++ // Create an arbitrary file with random content that is not a JWT ++ arbitraryContent := []byte("This is just arbitrary file content, not a JWT token at all") ++ tokenPath := createTempFile(t, arbitraryContent) ++ ++ return tokenPath ++ }, ++ expectError: true, ++ }, ++ { ++ name: "not sa token", ++ setupToken: func() string { ++ privateKey, _, err := generateTestRSAKeyPair() ++ if err != nil { ++ t.Fatalf("failed to generate RSA keys: %v", err) ++ } ++ ++ // Create valid JWT token but not from k8s ++ claims := jwt.MapClaims{ ++ "iss": "random-issuer", ++ "sub": "1234-3212", ++ "exp": time.Now().Add(time.Hour).Unix(), ++ "iat": time.Now().Unix(), ++ } ++ tokenBytes, err := createJWTToken(privateKey, claims) ++ tokenPath := createTempFile(t, tokenBytes) ++ ++ return tokenPath ++ }, ++ expectError: true, ++ }, ++ } ++ ++ for _, tt := range tests { ++ t.Run(tt.name, func(t *testing.T) { ++ tokenPath := tt.setupToken() ++ defer os.Remove(tokenPath) ++ ++ result, err := readKubernetesServiceAccountProjectedToken(tokenPath) ++ ++ if (err != nil) != tt.expectError { ++ t.Errorf("readKubernetesServiceAccountProjectedToken() error = %v, expectError = %v", err, tt.expectError) ++ return ++ } ++ ++ if !tt.expectError && tt.validate != nil { ++ if !tt.validate(result) { ++ t.Errorf("readKubernetesServiceAccountProjectedToken() returned invalid result") ++ } ++ } ++ }) ++ } ++} ++ ++// Helper function to generate RSA key pair for testing ++func generateTestRSAKeyPair() (*rsa.PrivateKey, *rsa.PublicKey, error) { ++ privateKey, err := rsa.GenerateKey(rand.Reader, 2048) ++ if err != nil { ++ return nil, nil, err ++ } ++ return privateKey, &privateKey.PublicKey, nil ++} ++ ++// Helper function to create a valid JWT token for testing ++func createJWTToken(privateKey *rsa.PrivateKey, claims jwt.MapClaims) ([]byte, error) { ++ token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) ++ tokenString, err := token.SignedString(privateKey) ++ if err != nil { ++ return nil, err ++ } ++ return []byte(tokenString), nil ++} ++ ++// Helper function to write RSA public key to PEM format ++func writePublicKeyPEM(publicKey *rsa.PublicKey) ([]byte, error) { ++ publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) ++ if err != nil { ++ return nil, err ++ } ++ ++ publicKeyPEM := pem.EncodeToMemory(&pem.Block{ ++ Type: "PUBLIC KEY", ++ Bytes: publicKeyBytes, ++ }) ++ ++ return publicKeyPEM, nil ++} ++ ++// Helper function to create temporary files for testing ++func createTempFile(t *testing.T, content []byte) string { ++ tmpFile, err := os.CreateTemp("", "k8s_test_*") ++ if err != nil { ++ t.Fatalf("failed to create temp file: %v", err) ++ } ++ defer tmpFile.Close() ++ ++ if _, err := tmpFile.Write(content); err != nil { ++ t.Fatalf("failed to write to temp file: %v", err) ++ } ++ ++ return tmpFile.Name() ++} +-- +2.45.4 + + +From e3d2ff04e2adb26420a9676a315b059d3a337e97 Mon Sep 17 00:00:00 2001 +From: Jorge Turrado +Date: Mon, 22 Dec 2025 11:28:27 +0100 +Subject: [PATCH 2/3] update changelog + +Signed-off-by: Jorge Turrado +-- +2.45.4 + + +From 9ca1c4e17f9260531f9ae622dc18a7d5eea1c8fd Mon Sep 17 00:00:00 2001 +From: Jorge Turrado +Date: Mon, 22 Dec 2025 11:54:29 +0100 +Subject: [PATCH 3/3] Update releases + +Signed-off-by: Jorge Turrado +-- +2.45.4 + diff --git a/SPECS/keda/keda.spec b/SPECS/keda/keda.spec index db522517213..b4bc280b5df 100644 --- a/SPECS/keda/keda.spec +++ b/SPECS/keda/keda.spec @@ -1,7 +1,7 @@ Summary: Kubernetes-based Event Driven Autoscaling Name: keda Version: 2.14.1 -Release: 7%{?dist} +Release: 9%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -32,6 +32,8 @@ Patch6: CVE-2025-29923.patch Patch7: CVE-2025-22870.patch Patch8: CVE-2024-51744.patch Patch9: CVE-2025-22872.patch +Patch10: CVE-2025-68156.patch +Patch11: CVE-2025-68476.patch BuildRequires: golang >= 1.15 %description @@ -67,6 +69,12 @@ cp ./bin/keda-admission-webhooks %{buildroot}%{_bindir} %{_bindir}/%{name}-admission-webhooks %changelog +* Fri Jan 02 2026 Azure Linux Security Servicing Account - 2.14.1-9 +- Patch for CVE-2025-68476 + +* Fri Dec 19 2025 Azure Linux Security Servicing Account - 2.14.1-8 +- Patch for CVE-2025-68156 + * Fri Apr 25 2025 Kanishk Bansal - 2.14.1-7 - Patch CVE-2025-22872 diff --git a/SPECS/keepalived/keepalived.spec b/SPECS/keepalived/keepalived.spec index 752004a7b5d..ef418b5ea7c 100644 --- a/SPECS/keepalived/keepalived.spec +++ b/SPECS/keepalived/keepalived.spec @@ -1,7 +1,7 @@ Summary: HA monitor built upon LVS, VRRP and services poller Name: keepalived Version: 2.3.1 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -110,6 +110,9 @@ fi %{_mandir}/man8/keepalived.8* %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 2.3.1-2 +- Bumping release to rebuild with new 'net-snmp' libs. + * Thu Sep 19 2024 Suresh Thelkar - 2.3.1-1 - Add patch for CVE-2024-41184.patch. - Use autosetup. diff --git a/SPECS/keras/CVE-2026-0897.patch b/SPECS/keras/CVE-2026-0897.patch new file mode 100644 index 00000000000..bd6084f5587 --- /dev/null +++ b/SPECS/keras/CVE-2026-0897.patch @@ -0,0 +1,94 @@ +From 557b7c38f2797aca0059deb3fafbfea550a093d2 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Fri, 16 Jan 2026 17:42:25 +0000 +Subject: [PATCH] Fix DoS via malicious HDF5 dataset metadata in + H5IOStore.__getitem__; add MAX_BYTES limit; harden dataset shape/dtype + validation + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/keras-team/keras/commit/7360d4f0d764fbb1fa9c6408fe53da41974dd4f6.patch +--- + keras/src/saving/saving_lib.py | 59 ++++++++++++++++++++++++++++++++-- + 1 file changed, 57 insertions(+), 2 deletions(-) + +diff --git a/keras/src/saving/saving_lib.py b/keras/src/saving/saving_lib.py +index 1668489..0bcce01 100644 +--- a/keras/src/saving/saving_lib.py ++++ b/keras/src/saving/saving_lib.py +@@ -24,6 +24,10 @@ try: + except ImportError: + h5py = None + ++ ++# Maximum allowed HDF5 dataset size in bytes (4 GiB) ++MAX_BYTES = 1 << 32 # 4 GiB ++ + _CONFIG_FILENAME = "config.json" + _METADATA_FILENAME = "metadata.json" + _VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5" +@@ -696,9 +700,60 @@ class H5Entry: + + def __getitem__(self, name): + value = self.group[name] ++ ++ # ------------------------------------------------------ ++ # CASE 2 — HDF5 DATASET → SAFE LOADING ++ # ------------------------------------------------------ ++ ++ # Skip any objects that are not proper datasets ++ if not hasattr(value, "shape") or not hasattr(value, "dtype"): ++ # Fallback: attempt read if possible, else return as-is ++ try: ++ return value[()] ++ except Exception: ++ return value ++ ++ shape = value.shape ++ dtype = value.dtype ++ ++ # ------------------------------------------------------ ++ # Validate SHAPE (avoid malformed / malicious metadata) ++ # ------------------------------------------------------ ++ ++ # No negative dimensions ++ if any(dim < 0 for dim in shape): ++ raise ValueError( ++ "Malformed HDF5 dataset shape encountered in .keras file; " ++ "negative dimension detected." ++ ) ++ ++ # Prevent absurdly high-rank tensors ++ if len(shape) > 64: ++ raise ValueError( ++ "Malformed HDF5 dataset shape encountered in .keras file; " ++ "tensor rank exceeds safety limit." ++ ) ++ ++ # Safe product computation (Python int is unbounded) ++ num_elems = int(np.prod(shape)) ++ ++ # ------------------------------------------------------ ++ # Validate TOTAL memory size ++ # ------------------------------------------------------ ++ size_bytes = num_elems * dtype.itemsize ++ if size_bytes > MAX_BYTES: ++ raise ValueError( ++ f"HDF5 dataset too large to load safely " ++ f"({size_bytes} bytes; limit is {MAX_BYTES})." ++ ) ++ ++ # ------------------------------------------------------ ++ # SAFE — load dataset (guaranteed ≤ 4 GiB) ++ # ------------------------------------------------------ ++ arr = value[()] + if "dtype" in value.attrs and value.attrs["dtype"] == "bfloat16": +- value = np.array(value, dtype=ml_dtypes.bfloat16) +- return value ++ arr = np.array(arr, dtype=ml_dtypes.bfloat16) ++ return arr + + + class NpzIOStore: +-- +2.45.4 + diff --git a/SPECS/keras/keras.spec b/SPECS/keras/keras.spec index 24ede2bafb0..77011e1ee11 100644 --- a/SPECS/keras/keras.spec +++ b/SPECS/keras/keras.spec @@ -3,7 +3,7 @@ Summary: Keras is a high-level neural networks API. Name: keras Version: 3.3.3 -Release: 5%{?dist} +Release: 6%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -16,6 +16,7 @@ Patch01: CVE-2025-1550.patch Patch02: CVE-2025-8747.patch Patch03: CVE-2025-9905.patch Patch4: CVE-2025-12060.patch +Patch5: CVE-2026-0897.patch # Fix for CVE-2025-9906 included as part of CVE-2025-8747 and kept here as nopatch # and commented out, because from patch command perspective, these files @@ -80,6 +81,9 @@ python3 pip_build.py --install %changelog +* Fri Jan 16 2026 Azure Linux Security Servicing Account - 3.3.3-6 +- Patch for CVE-2026-0897 + * Fri Oct 31 2025 Azure Linux Security Servicing Account - 3.3.3-5 - Patch for CVE-2025-12060 diff --git a/SPECS/kernel-64k/config_aarch64 b/SPECS/kernel-64k/config_aarch64 index f059d47405e..9ae72e89a52 100644 --- a/SPECS/kernel-64k/config_aarch64 +++ b/SPECS/kernel-64k/config_aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.117.1 Kernel Configuration +# Linux/arm64 6.6.121.1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.0" CONFIG_CC_IS_GCC=y @@ -1232,7 +1232,7 @@ CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m # CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -4728,6 +4728,7 @@ CONFIG_SPI_SUN6I=m CONFIG_SPI_SYNQUACER=m CONFIG_SPI_MXIC=m # CONFIG_SPI_TEGRA210_QUAD is not set +# CONFIG_SPI_TEGRA114 is not set # CONFIG_SPI_TEGRA20_SFLASH is not set CONFIG_SPI_THUNDERX=m CONFIG_SPI_XCOMM=m @@ -8544,7 +8545,6 @@ CONFIG_STAGING=y # CONFIG_RTL8192U is not set # CONFIG_RTLLIB is not set # CONFIG_RTL8723BS is not set -# CONFIG_R8712U is not set # CONFIG_RTS5208 is not set # CONFIG_VT6655 is not set # CONFIG_VT6656 is not set diff --git a/SPECS/kernel-64k/kernel-64k.signatures.json b/SPECS/kernel-64k/kernel-64k.signatures.json index e32e6266779..8f799980880 100644 --- a/SPECS/kernel-64k/kernel-64k.signatures.json +++ b/SPECS/kernel-64k/kernel-64k.signatures.json @@ -1,10 +1,10 @@ { "Signatures": { "azurelinux-ca-20230216.pem": "d545401163c75878319f01470455e6bc18a5968e39dd964323225e3fe308849b", - "config_aarch64": "8bd37170ab5799efa800cf3207492f1d413b1fa148f5a6e8bd1baa8a979d7fc5", + "config_aarch64": "d91ca78ac79434988a8317c42549fd453a25f883c0cd4dba4269c5b0a9203ab9", "cpupower": "d7518767bf2b1110d146a49c7d42e76b803f45eb8bd14d931aa6d0d346fae985", "cpupower.service": "b057fe9e5d0e8c36f485818286b80e3eba8ff66ff44797940e99b1fd5361bb98", "sha512hmac-openssl.sh": "02ab91329c4be09ee66d759e4d23ac875037c3b56e5a598e32fd1206da06a27f", - "kernel-6.6.117.1.tar.gz": "bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3" + "kernel-6.6.121.1.tar.gz": "aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12" } } diff --git a/SPECS/kernel-64k/kernel-64k.spec b/SPECS/kernel-64k/kernel-64k.spec index 0fab00ce160..ba2436f0e93 100644 --- a/SPECS/kernel-64k/kernel-64k.spec +++ b/SPECS/kernel-64k/kernel-64k.spec @@ -26,7 +26,7 @@ Summary: Linux Kernel Name: kernel-64k -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -380,6 +380,21 @@ echo "initrd of kernel %{uname_r} removed" >&2 %{_sysconfdir}/bash_completion.d/bpftool %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Enable CONFIG_INET_DIAG_DESTROY + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS/kernel-headers/kernel-headers.signatures.json b/SPECS/kernel-headers/kernel-headers.signatures.json index 69b3cd22497..45e49557c7e 100644 --- a/SPECS/kernel-headers/kernel-headers.signatures.json +++ b/SPECS/kernel-headers/kernel-headers.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "kernel-6.6.117.1.tar.gz": "bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3" + "kernel-6.6.121.1.tar.gz": "aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12" } } diff --git a/SPECS/kernel-headers/kernel-headers.spec b/SPECS/kernel-headers/kernel-headers.spec index 6dccedd0b47..8a71ed1a628 100644 --- a/SPECS/kernel-headers/kernel-headers.spec +++ b/SPECS/kernel-headers/kernel-headers.spec @@ -13,7 +13,7 @@ Summary: Linux API header files Name: kernel-headers -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -75,6 +75,21 @@ done %endif %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Bump release to match kernel,kernel-ipe,kernel-64k + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS/kernel-hwe-headers/kernel-hwe-headers.spec b/SPECS/kernel-hwe-headers/kernel-hwe-headers.spec index 4a259fed5dc..d56a3de2e5b 100644 --- a/SPECS/kernel-hwe-headers/kernel-hwe-headers.spec +++ b/SPECS/kernel-hwe-headers/kernel-hwe-headers.spec @@ -4,7 +4,7 @@ Summary: Linux API header files Name: kernel-hwe-headers Version: 6.12.57.1 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -35,6 +35,9 @@ cp -rv usr/include/* /%{buildroot}%{_includedir} %{_includedir}/* %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 6.12.57.1-2 +- Bump to match kernel-hwe. + * Wed Nov 05 2025 Siddharth Chintamaneni - 6.12.57.1-1 - Bump to match kernel-hwe diff --git a/SPECS/kernel-hwe/config_aarch64 b/SPECS/kernel-hwe/config_aarch64 index a2b3c266773..19bd9b913ce 100644 --- a/SPECS/kernel-hwe/config_aarch64 +++ b/SPECS/kernel-hwe/config_aarch64 @@ -1196,12 +1196,15 @@ CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_MAPPING_DIRTY_HELPERS=y CONFIG_MEMFD_CREATE=y CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set +CONFIG_ANON_VMA_NAME=y CONFIG_HAVE_ARCH_USERFAULTFD_WP=y CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y CONFIG_USERFAULTFD=y CONFIG_PTE_MARKER_UFFD_WP=y -# CONFIG_LRU_GEN is not set +CONFIG_LRU_GEN=y +CONFIG_LRU_GEN_ENABLED=y +# CONFIG_LRU_GEN_STATS is not set +CONFIG_LRU_GEN_WALKS_MMU=y CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y CONFIG_PER_VMA_LOCK=y CONFIG_LOCK_MM_AND_FIND_VMA=y @@ -9321,8 +9324,8 @@ CONFIG_IOMMU_IO_PGTABLE_DART=y # end of Generic IOMMU Pagetable Support # CONFIG_IOMMU_DEBUGFS is not set -CONFIG_IOMMU_DEFAULT_DMA_STRICT=y -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +CONFIG_IOMMU_DEFAULT_DMA_LAZY=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y CONFIG_IOMMU_DMA=y @@ -9594,11 +9597,11 @@ CONFIG_PM_DEVFREQ=y # # DEVFREQ Governors # -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m -# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set -# CONFIG_DEVFREQ_GOV_POWERSAVE is not set -# CONFIG_DEVFREQ_GOV_USERSPACE is not set -# CONFIG_DEVFREQ_GOV_PASSIVE is not set +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_DEVFREQ_GOV_PASSIVE=y # # DEVFREQ Drivers diff --git a/SPECS/kernel-hwe/kernel-hwe.signatures.json b/SPECS/kernel-hwe/kernel-hwe.signatures.json index 2fb08922d5b..4f66174d44d 100644 --- a/SPECS/kernel-hwe/kernel-hwe.signatures.json +++ b/SPECS/kernel-hwe/kernel-hwe.signatures.json @@ -2,7 +2,7 @@ "Signatures": { "azurelinux-ca-20230216.pem": "d545401163c75878319f01470455e6bc18a5968e39dd964323225e3fe308849b", "config":"01a3849a80da55f79d3e17f5e9fab0b28313947687c6555408d13da2c0cacb69", - "config_aarch64": "4281645e8b008e9756e04e97961573f7f54430dbfb1bb9b495e33657c0819cee", + "config_aarch64": "2e32dbcbb22e1ab7ff22cef75459cd14468d99f8568d44044a1df0340fe42352", "cpupower": "d7518767bf2b1110d146a49c7d42e76b803f45eb8bd14d931aa6d0d346fae985", "cpupower.service": "b057fe9e5d0e8c36f485818286b80e3eba8ff66ff44797940e99b1fd5361bb98", "kernel-hwe-6.12.57.1.tar.gz": "859cc1f4becef5aae11fd0fa0d81a4431c1a80ce77a6c617a42caa3c02e2f780" diff --git a/SPECS/kernel-hwe/kernel-hwe.spec b/SPECS/kernel-hwe/kernel-hwe.spec index 76f2fa62ae5..e77cd94a897 100644 --- a/SPECS/kernel-hwe/kernel-hwe.spec +++ b/SPECS/kernel-hwe/kernel-hwe.spec @@ -30,7 +30,7 @@ Summary: Linux Kernel Name: kernel-hwe Version: 6.12.57.1 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -423,6 +423,9 @@ echo "initrd of kernel %{uname_r} removed" >&2 %{_sysconfdir}/bash_completion.d/bpftool %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 6.12.57.1-2 +- Enable aarch64 kernel configs for performance improvements. + * Wed Nov 05 2025 Siddharth Chintamaneni - 6.12.57.1-1 - Kernel upgrade diff --git a/SPECS/kernel-mshv/config b/SPECS/kernel-mshv/config index 8e61f3743df..8b0c5b0d8a5 100644 --- a/SPECS/kernel-mshv/config +++ b/SPECS/kernel-mshv/config @@ -7379,6 +7379,7 @@ CONFIG_RING_BUFFER=y CONFIG_EVENT_TRACING=y CONFIG_CONTEXT_SWITCH_TRACER=y CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_BOOTTIME_TRACING is not set @@ -7390,8 +7391,7 @@ CONFIG_FTRACE=y # CONFIG_OSNOISE_TRACER is not set # CONFIG_TIMERLAT_TRACER is not set # CONFIG_MMIOTRACE is not set -# CONFIG_ENABLE_DEFAULT_TRACERS is not set -# CONFIG_FTRACE_SYSCALLS is not set +CONFIG_FTRACE_SYSCALLS=y # CONFIG_TRACER_SNAPSHOT is not set CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set @@ -7410,6 +7410,7 @@ CONFIG_PROBE_EVENTS=y # CONFIG_TRACEPOINT_BENCHMARK is not set # CONFIG_RING_BUFFER_BENCHMARK is not set # CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set # CONFIG_RING_BUFFER_STARTUP_TEST is not set # CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set # CONFIG_PREEMPTIRQ_DELAY_TEST is not set diff --git a/SPECS/kernel-mshv/kernel-mshv.signatures.json b/SPECS/kernel-mshv/kernel-mshv.signatures.json index b13fe569715..66cc111c09a 100644 --- a/SPECS/kernel-mshv/kernel-mshv.signatures.json +++ b/SPECS/kernel-mshv/kernel-mshv.signatures.json @@ -3,7 +3,7 @@ "50_mariner_mshv.cfg": "12bf23f8857f893549933a20062ad2a69c3c654b87893e8dd3ce42f9329801c7", "50_mariner_mshv_menuentry": "5a46bb273be0e829fec4e1f498b1a11b59ded6e14d42a19be7da860c3f4c35be", "cbl-mariner-ca-20211013.pem": "5ef124b0924cb1047c111a0ecff1ae11e6ad7cac8d1d9b40f98f99334121f0b0", - "config": "544925b5fdc925b8abcc44f497ac65c89381dfac8ee0a1ef6a178beab94cf879", + "config": "8ce3731fe06abb3fcf51942bb680ee425fac59de76818ac5491d53553550e2f5", "kernel-mshv-6.6.100.mshv1.tar.gz": "4ea7cd612712401f2537fdeb12ebb28373532c11f649ebcd1b7c5c39af839893", "config_aarch64": "a1ce617173428aa4c5b89ccfbe35705ed6dc53521b26590328a7a4f81aefb404" } diff --git a/SPECS/kernel-mshv/kernel-mshv.spec b/SPECS/kernel-mshv/kernel-mshv.spec index 480c9b9d23b..39617adf3cf 100644 --- a/SPECS/kernel-mshv/kernel-mshv.spec +++ b/SPECS/kernel-mshv/kernel-mshv.spec @@ -18,7 +18,7 @@ Summary: Mariner kernel that has MSHV Host support Name: kernel-mshv Version: 6.6.100.mshv1 -Release: 2%{?dist} +Release: 3%{?dist} License: GPLv2 Group: Development/Tools Vendor: Microsoft Corporation @@ -267,6 +267,9 @@ echo "initrd of kernel %{uname_r} removed" >&2 %{_includedir}/perf/perf_dlfilter.h %changelog +* Mon Jan 06 2026 Roaa Sakr - 6.6.100.mshv1-3 +- Enable ftrace syscalls tracing support in kernel config + * Wed Oct 08 2025 Saul Paredes - 6.6.100.mshv1-2 - Enable build on aarch64 diff --git a/SPECS/kernel-uvm/config b/SPECS/kernel-uvm/config index 2a2f7085665..f235bcf7ebb 100644 --- a/SPECS/kernel-uvm/config +++ b/SPECS/kernel-uvm/config @@ -975,6 +975,7 @@ CONFIG_XFRM_SUB_POLICY=y # CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set # CONFIG_XDP_SOCKETS is not set +CONFIG_NET_HANDSHAKE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -2776,16 +2777,44 @@ CONFIG_EROFS_FS_ZIP=y # CONFIG_EROFS_FS_ZIP_DEFLATE is not set # CONFIG_EROFS_FS_PCPU_KTHREAD is not set CONFIG_NETWORK_FILESYSTEMS=y -# CONFIG_NFS_FS is not set +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=y +CONFIG_PNFS_FLEXFILE_LAYOUT=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set # CONFIG_NFSD is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_BACKCHANNEL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SUNRPC_DEBUG is not set # CONFIG_CEPH_FS is not set CONFIG_CIFS=y CONFIG_CIFS_STATS2=y # CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set -# CONFIG_CIFS_UPCALL is not set -# CONFIG_CIFS_XATTR is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y # CONFIG_CIFS_DEBUG is not set -# CONFIG_CIFS_DFS_UPCALL is not set +CONFIG_CIFS_DFS_UPCALL=y # CONFIG_CIFS_SWN_UPCALL is not set # CONFIG_CIFS_ROOT is not set # CONFIG_SMB_SERVER is not set diff --git a/SPECS/kernel-uvm/kernel-uvm.signatures.json b/SPECS/kernel-uvm/kernel-uvm.signatures.json index 2319ac5acdf..b3ee4b2217e 100644 --- a/SPECS/kernel-uvm/kernel-uvm.signatures.json +++ b/SPECS/kernel-uvm/kernel-uvm.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "config": "11d7dcd506392b4933011b7144fe1a27c9b1b1d8610fc808d1e4606c1c19bf31", + "config": "d91f986d31573e0c3e46d9904f3c3ae6a3afc270311e2eb2e86b0fe01d5014a1", "kernel-uvm-6.6.96.mshv1.tar.gz": "cb98adbb01b3c0aba96e9e80b1d8a8531028b1b7e33e583c931cb83f1e7df713", "config_aarch64": "38352aeef618347c3b5b14316126fdcb12107fc1431ef9c9b1b779a74eecee2b" } diff --git a/SPECS/kernel-uvm/kernel-uvm.spec b/SPECS/kernel-uvm/kernel-uvm.spec index 26e1229eba9..c6e547ed306 100644 --- a/SPECS/kernel-uvm/kernel-uvm.spec +++ b/SPECS/kernel-uvm/kernel-uvm.spec @@ -17,7 +17,7 @@ Summary: Linux Kernel for Kata UVM Name: kernel-uvm Version: 6.6.96.mshv1 -Release: 2%{?dist} +Release: 3%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -165,6 +165,9 @@ find %{buildroot}/lib/modules -name '*.ko' -exec chmod u+x {} + %{_prefix}/src/linux-headers-%{uname_r} %changelog +* Tue Jan 13 2026 Cameron Baird - 6.6.96.mshv1-3 +- Enable xattr and other feature modules for CIFS and NFS + * Wed Oct 08 2025 Saul Paredes - 6.6.96.mshv1-2 - Enable build on aarch64 diff --git a/SPECS/kernel/config b/SPECS/kernel/config index ac2579594bf..036939da340 100644 --- a/SPECS/kernel/config +++ b/SPECS/kernel/config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 6.6.117.1 Kernel Configuration +# Linux/x86_64 6.6.121.1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.0" CONFIG_CC_IS_GCC=y @@ -1170,7 +1170,7 @@ CONFIG_XFRM_OFFLOAD=y CONFIG_XFRM_ALGO=m CONFIG_XFRM_USER=m # CONFIG_XFRM_USER_COMPAT is not set -# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y @@ -1220,7 +1220,7 @@ CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m # CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -2062,7 +2062,8 @@ CONFIG_DMIID=y CONFIG_DMI_SYSFS=m CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y # CONFIG_ISCSI_IBFT is not set -# CONFIG_FW_CFG_SYSFS is not set +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set CONFIG_SYSFB=y # CONFIG_SYSFB_SIMPLEFB is not set # CONFIG_GOOGLE_FIRMWARE is not set @@ -7169,7 +7170,7 @@ CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set +CONFIG_SQUASHFS_ZSTD=y # CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set # CONFIG_SQUASHFS_EMBEDDED is not set CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 diff --git a/SPECS/kernel/config_aarch64 b/SPECS/kernel/config_aarch64 index 57521d161aa..600b9693681 100644 --- a/SPECS/kernel/config_aarch64 +++ b/SPECS/kernel/config_aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.117.1 Kernel Configuration +# Linux/arm64 6.6.121.1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.2.0" CONFIG_CC_IS_GCC=y @@ -1231,7 +1231,7 @@ CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m # CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y @@ -4715,6 +4715,7 @@ CONFIG_SPI_SUN6I=m CONFIG_SPI_SYNQUACER=m CONFIG_SPI_MXIC=m # CONFIG_SPI_TEGRA210_QUAD is not set +# CONFIG_SPI_TEGRA114 is not set # CONFIG_SPI_TEGRA20_SFLASH is not set CONFIG_SPI_THUNDERX=m CONFIG_SPI_XCOMM=m @@ -8523,7 +8524,6 @@ CONFIG_STAGING=y # CONFIG_RTL8192U is not set # CONFIG_RTLLIB is not set # CONFIG_RTL8723BS is not set -# CONFIG_R8712U is not set # CONFIG_RTS5208 is not set # CONFIG_VT6655 is not set # CONFIG_VT6656 is not set diff --git a/SPECS/kernel/kernel-uki.spec b/SPECS/kernel/kernel-uki.spec index 533d14f4780..162dd22008e 100644 --- a/SPECS/kernel/kernel-uki.spec +++ b/SPECS/kernel/kernel-uki.spec @@ -12,7 +12,7 @@ Summary: Unified Kernel Image Name: kernel-uki -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -70,6 +70,21 @@ cp %{buildroot}/boot/vmlinuz-uki-%{kernelver}.efi %{buildroot}/boot/efi/EFI/Linu /boot/efi/EFI/Linux/vmlinuz-uki-%{kernelver}.efi %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Bump release to match kernel + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Bump release to match kernel,kernel-ipe + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Bump release to match kernel,kernel-ipe,kernel-64k + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS/kernel/kernel.signatures.json b/SPECS/kernel/kernel.signatures.json index 3ba93ebd51e..764dc14b99f 100644 --- a/SPECS/kernel/kernel.signatures.json +++ b/SPECS/kernel/kernel.signatures.json @@ -1,11 +1,11 @@ { "Signatures": { "azurelinux-ca-20230216.pem": "d545401163c75878319f01470455e6bc18a5968e39dd964323225e3fe308849b", - "config": "f9b4a11f5f16da83111766e1af913b77103a6dc872b848bb25d41a5be68cb032", - "config_aarch64": "d4207f14d92b0b873856ed4d26ee7c340ed8fe77e0beea9e5047d632c48189b3", + "config": "45568c4b391b581400145626bd7ca1712028bfcef6b1f3ab4691c27786a91c3a", + "config_aarch64": "77ba2d0761f07f9d1182fd3ab469106e99c38e839665e5af699379a1a204a844", "cpupower": "d7518767bf2b1110d146a49c7d42e76b803f45eb8bd14d931aa6d0d346fae985", "cpupower.service": "b057fe9e5d0e8c36f485818286b80e3eba8ff66ff44797940e99b1fd5361bb98", "sha512hmac-openssl.sh": "02ab91329c4be09ee66d759e4d23ac875037c3b56e5a598e32fd1206da06a27f", - "kernel-6.6.117.1.tar.gz": "bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3" + "kernel-6.6.121.1.tar.gz": "aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12" } } diff --git a/SPECS/kernel/kernel.spec b/SPECS/kernel/kernel.spec index 81e138427c3..280c9285dad 100644 --- a/SPECS/kernel/kernel.spec +++ b/SPECS/kernel/kernel.spec @@ -31,7 +31,7 @@ Summary: Linux Kernel Name: kernel -Version: 6.6.117.1 +Version: 6.6.121.1 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation @@ -440,6 +440,21 @@ echo "initrd of kernel %{uname_r} removed" >&2 %{_sysconfdir}/bash_completion.d/bpftool %changelog +* Mon Feb 02 2026 CBL-Mariner Servicing Account - 6.6.121.1-1 +- Auto-upgrade to 6.6.121.1 + +* Tue Jan 28 2026 Sean Dougherty - 6.6.119.3-4 +- Enable CONFIG_FW_CFG_SYSFS and CONFIG_SQUASHFS_ZSTD + +* Fri Jan 16 2026 Rachel Menge - 6.6.119.3-3 +- Enable XFRM_INTERFACE + +* Thu Jan 08 2026 Rachel Menge - 6.6.119.3-2 +- Enable CONFIG_INET_DIAG_DESTROY + +* Tue Jan 06 2026 CBL-Mariner Servicing Account - 6.6.119.3-1 +- Auto-upgrade to 6.6.119.3 + * Wed Nov 26 2025 CBL-Mariner Servicing Account - 6.6.117.1-1 - Auto-upgrade to 6.6.117.1 diff --git a/SPECS/kexec-tools/kexec-tools.spec b/SPECS/kexec-tools/kexec-tools.spec index 8557c16b316..0594f600895 100644 --- a/SPECS/kexec-tools/kexec-tools.spec +++ b/SPECS/kexec-tools/kexec-tools.spec @@ -6,7 +6,7 @@ Summary: The kexec/kdump userspace component Name: kexec-tools Version: 2.0.27 -Release: 7%{?dist} +Release: 8%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -59,7 +59,8 @@ Requires(preun): systemd Requires(postun): systemd Requires(pre): coreutils sed zlib Requires: dracut -Requires: ethtool +Requires: (ethtool or mlnx-ethtool) +Recommends: ethtool Requires: awk Requires: squashfs-tools %{?grub2_configuration_requires} @@ -251,7 +252,6 @@ then mv /etc/sysconfig/kdump.new /etc/sysconfig/kdump fi - %postun %systemd_postun_with_restart kdump.service %grub2_postun @@ -330,6 +330,13 @@ done /usr/share/makedumpfile/ %changelog +* Tue Nov 11 2025 Mayank Singh - 2.0.27-8 +- Updated dependency handling for kexec-tools: + Changed from hard dependency on a single package. + Allows installation to satisfy dependency with either `ethtool` or `mlnx-ethtool`. + Ensures flexibility for image builds and user choice at install time. + Added mutual exclusivity between providers to prevent file conflicts. + * Tue Jul 09 2024 Chris Co - 2.0.27-7 - Remove requires on dhcp-client diff --git a/SPECS/knem-hwe/knem-hwe.spec b/SPECS/knem-hwe/knem-hwe.spec index f00f304ae74..3151b887434 100644 --- a/SPECS/knem-hwe/knem-hwe.spec +++ b/SPECS/knem-hwe/knem-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -39,7 +39,7 @@ %global KVERSION %{target_kernel_version_full} %global K_SRC /lib/modules/%{target_kernel_version_full}/build -%{!?_release: %global _release OFED.23.10.0.2.1.1} +%{!?_release: %global _release OFED.25.07.0.9.7.1} # %{!?KVERSION: %global KVERSION %(uname -r)} %global kernel_version %{KVERSION} %global krelver %(echo -n %{KVERSION} | sed -e 's/-/_/g') @@ -55,14 +55,17 @@ Summary: KNEM: High-Performance Intra-Node MPI Communication Name: knem-hwe Version: 1.1.4.90mlnx3 -Release: 24%{release_suffix}%{?dist} +Release: 26%{release_suffix}%{?dist} Provides: knem-hwe-mlnx = %{version}-%{release} Obsoletes: knem-hwe-mlnx < %{version}-%{release} License: BSD and GPLv2 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/knem-1.1.4.90mlnx3.tar.gz#/knem-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/knem-%{version}.tar.gz BuildRoot: /var/tmp/%{name}-%{version}-build BuildRequires: gcc @@ -117,7 +120,6 @@ Group: System Environment/Libraries Requires: kernel-hwe = %{target_kernel_version_full} Requires: kmod Conflicts: knem-modules -ExclusiveArch: aarch64 %description -n %{non_kmp_pname} KNEM is a Linux kernel module enabling high-performance intra-node MPI communication for large messages. KNEM offers support for asynchronous and vectorial data transfers as well as loading memory copies on to Intel I/OAT hardware. @@ -239,6 +241,14 @@ fi %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-26_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-25_6.12.57.1.1 +- Build with OFED 25.07.0.9.7.1. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 1.1.4.90mlnx3-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/knem/knem.spec b/SPECS/knem/knem.spec index 9478444c099..64b69713e83 100644 --- a/SPECS/knem/knem.spec +++ b/SPECS/knem/knem.spec @@ -38,7 +38,7 @@ %global KVERSION %{target_kernel_version_full} %global K_SRC /lib/modules/%{target_kernel_version_full}/build -%{!?_release: %global _release OFED.23.10.0.2.1.1} +%{!?_release: %global _release OFED.25.07.0.9.7.1} # %{!?KVERSION: %global KVERSION %(uname -r)} %global kernel_version %{KVERSION} %global krelver %(echo -n %{KVERSION} | sed -e 's/-/_/g') @@ -54,14 +54,17 @@ Summary: KNEM: High-Performance Intra-Node MPI Communication Name: knem Version: 1.1.4.90mlnx3 -Release: 21%{release_suffix}%{?dist} +Release: 22%{release_suffix}%{?dist} Provides: knem-mlnx = %{version}-%{release} Obsoletes: knem-mlnx < %{version}-%{release} License: BSD and GPLv2 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/knem-1.1.4.90mlnx3.tar.gz#/knem-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/knem-%{version}.tar.gz BuildRoot: /var/tmp/%{name}-%{version}-build BuildRequires: gcc @@ -301,6 +304,10 @@ fi %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 1.1.4.90mlnx3-22 +- Build with OFED 25.07.0.9.7.1. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 1.1.4.90mlnx3-21 - Adjusted package dependencies on user space components. diff --git a/SPECS/krb5/CVE-2025-24528.patch b/SPECS/krb5/CVE-2025-24528.patch new file mode 100644 index 00000000000..c2e9fdaaf5c --- /dev/null +++ b/SPECS/krb5/CVE-2025-24528.patch @@ -0,0 +1,65 @@ +From d53fe0c6e1c6ca432365f9f194428936532ea491 Mon Sep 17 00:00:00 2001 +From: Zoltan Borbely +Date: Tue, 28 Jan 2025 16:39:25 -0500 +Subject: [PATCH] Prevent overflow when calculating ulog block size + +In kdb_log.c:resize(), log an error and fail if the update size is +larger than the largest possible block size (2^16-1). + +CVE-2025-24528: + +In MIT krb5 release 1.7 and later with incremental propagation +enabled, an authenticated attacker can cause kadmind to write beyond +the end of the mapped region for the iprop log file, likely causing a +process crash. + +[ghudson@mit.edu: edited commit message and added CVE description] + +ticket: 9159 (new) +tags: pullup +target_version: 1.21-next + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/krb5/krb5/commit/78ceba024b64d49612375be4a12d1c066b0bfbd0.patch +--- + src/lib/kdb/kdb_log.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/src/lib/kdb/kdb_log.c b/src/lib/kdb/kdb_log.c +index 2659a25..68fae91 100644 +--- a/src/lib/kdb/kdb_log.c ++++ b/src/lib/kdb/kdb_log.c +@@ -183,7 +183,7 @@ extend_file_to(int fd, unsigned int new_size) + */ + static krb5_error_code + resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, +- unsigned int recsize) ++ unsigned int recsize, const kdb_incr_update_t *upd) + { + unsigned int new_block, new_size; + +@@ -195,6 +195,12 @@ resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, + new_block *= ULOG_BLOCK; + new_size += ulogentries * new_block; + ++ if (new_block > UINT16_MAX) { ++ syslog(LOG_ERR, _("ulog overflow caused by principal %.*s"), ++ upd->kdb_princ_name.utf8str_t_len, ++ upd->kdb_princ_name.utf8str_t_val); ++ return KRB5_LOG_ERROR; ++ } + if (new_size > MAXLOGLEN) + return KRB5_LOG_ERROR; + +@@ -291,7 +297,7 @@ store_update(kdb_log_context *log_ctx, kdb_incr_update_t *upd) + recsize = sizeof(kdb_ent_header_t) + upd_size; + + if (recsize > ulog->kdb_block) { +- retval = resize(ulog, ulogentries, log_ctx->ulogfd, recsize); ++ retval = resize(ulog, ulogentries, log_ctx->ulogfd, recsize, upd); + if (retval) + return retval; + } +-- +2.45.4 + diff --git a/SPECS/krb5/krb5.spec b/SPECS/krb5/krb5.spec index 1ad51fc41e2..03f67d5b890 100644 --- a/SPECS/krb5/krb5.spec +++ b/SPECS/krb5/krb5.spec @@ -4,7 +4,7 @@ Summary: The Kerberos newtork authentication system Name: krb5 Version: 1.21.3 -Release: 2%{?dist} +Release: 3%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -13,6 +13,7 @@ URL: https://web.mit.edu/kerberos/ Source0: https://kerberos.org/dist/%{name}/%{maj_version}/%{name}-%{version}.tar.gz Source1: krb5.conf Patch0: CVE-2024-26461.patch +Patch1: CVE-2025-24528.patch BuildRequires: e2fsprogs-devel BuildRequires: openssl-devel Requires: e2fsprogs-libs @@ -126,6 +127,9 @@ make check %{_datarootdir}/locale/* %changelog +* Wed Jan 21 2026 Azure Linux Security Servicing Account - 1.21.3-3 +- Patch for CVE-2025-24528 + * Mon Sep 2 2024 Ankita Pareek - 1.21.3-2 - Add patch for CVE-2024-26458 and CVE-2024-26461 diff --git a/SPECS/kubernetes/CVE-2025-13281.patch b/SPECS/kubernetes/CVE-2025-13281.patch new file mode 100644 index 00000000000..13509061081 --- /dev/null +++ b/SPECS/kubernetes/CVE-2025-13281.patch @@ -0,0 +1,97 @@ +From bf4d44806d709585073e613415146cb288f08279 Mon Sep 17 00:00:00 2001 +From: Ankit Gohil +Date: Mon, 3 Nov 2025 22:38:58 +0000 +Subject: [PATCH] Clean up event messages for errors in Portworx in-tree driver + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/kubernetes/kubernetes/commit/7506ce804c20696ba32cdb72126270ceaed06e24.patch +--- + pkg/volume/portworx/portworx.go | 33 +++++++++++++++++++++++++-------- + 1 file changed, 25 insertions(+), 8 deletions(-) + +diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go +index 6b9243f5..4866739b 100644 +--- a/pkg/volume/portworx/portworx.go ++++ b/pkg/volume/portworx/portworx.go +@@ -311,8 +311,9 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterAr + notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) + klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) + if err != nil && !os.IsNotExist(err) { +- klog.Errorf("Cannot validate mountpoint: %s", dir) +- return err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Cannot validate mountpoint %s: %v", dir, err) ++ return fmt.Errorf("failed to validate mountpoint: see kube-controller-manager.log for details") + } + if !notMnt { + return nil +@@ -322,7 +323,9 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterAr + attachOptions[attachContextKey] = dir + attachOptions[attachHostKey] = b.plugin.host.GetHostName() + if _, err := b.manager.AttachVolume(b, attachOptions); err != nil { +- return err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to attach volume %s: %v", b.volumeID, err) ++ return fmt.Errorf("failed to attach volume: see kube-controller-manager.log for details") + } + + klog.V(4).Infof("Portworx Volume %s attached", b.volumeID) +@@ -332,7 +335,9 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterAr + } + + if err := b.manager.MountVolume(b, dir); err != nil { +- return err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to mount volume %s: %v", b.volumeID, err) ++ return fmt.Errorf("failed to mount volume: see kube-controller-manager.log for details") + } + if !b.readOnly { + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) +@@ -363,12 +368,16 @@ func (c *portworxVolumeUnmounter) TearDownAt(dir string) error { + klog.Infof("Portworx Volume TearDown of %s", dir) + + if err := c.manager.UnmountVolume(c, dir); err != nil { +- return err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to unmount volume %s: %v", c.volumeID, err) ++ return fmt.Errorf("failed to unmount volume: see kube-controller-manager.log for details") + } + + // Call Portworx Detach Volume. + if err := c.manager.DetachVolume(c); err != nil { +- return err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to detach volume %s: %v", c.volumeID, err) ++ return fmt.Errorf("failed to detach volume: see kube-controller-manager.log for details") + } + + return nil +@@ -385,7 +394,13 @@ func (d *portworxVolumeDeleter) GetPath() string { + } + + func (d *portworxVolumeDeleter) Delete() error { +- return d.manager.DeleteVolume(d) ++ err := d.manager.DeleteVolume(d) ++ if err != nil { ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to delete volume %s: %v", d.volumeID, err) ++ return fmt.Errorf("failed to delete volume: see kube-controller-manager.log for details") ++ } ++ return nil + } + + type portworxVolumeProvisioner struct { +@@ -406,7 +421,9 @@ func (c *portworxVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopo + + volumeID, sizeGiB, labels, err := c.manager.CreateVolume(c) + if err != nil { +- return nil, err ++ // don't log error details from client calls in events ++ klog.V(4).Infof("Failed to create volume: %v", err) ++ return nil, fmt.Errorf("failed to create volume: see kube-controller-manager.log for details") + } + + pv := &v1.PersistentVolume{ +-- +2.45.4 + diff --git a/SPECS/kubernetes/CVE-2025-52881.patch b/SPECS/kubernetes/CVE-2025-52881.patch new file mode 100644 index 00000000000..c23d7eb4921 --- /dev/null +++ b/SPECS/kubernetes/CVE-2025-52881.patch @@ -0,0 +1,67 @@ +From ff94f9991bd32076c871ef0ad8bc1b763458e480 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Thu, 19 Jun 2025 10:19:41 +1000 +Subject: [PATCH] *: switch to safer securejoin.Reopen + +filepath-securejoin v0.3 gave us a much safer re-open primitive, we +should use it to avoid any theoretical attacks. Rather than using it +direcly, add a small pathrs wrapper to make libpathrs migrations in the +future easier... + +Signed-off-by: Aleksa Sarai + +Upstream Patch Reference: https://github.com/opencontainers/runc/commit/ff94f9991bd32076c871ef0ad8bc1b763458e480.patch +--- + .../runc/libcontainer/standard_init_linux.go | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +index 809dad5d..5ec1fe26 100644 +--- a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go ++++ b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +@@ -12,6 +12,7 @@ import ( + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + ++ "github.com/opencontainers/runc/internal/pathrs" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/keys" +@@ -27,6 +28,7 @@ type linuxStandardInit struct { + fifoFd int + logFd int + mountFds []int ++ fifoFile *os.File + config *initConfig + } + +@@ -234,13 +236,13 @@ func (l *linuxStandardInit) Init() error { + // user process. We open it through /proc/self/fd/$fd, because the fd that + // was given to us was an O_PATH fd to the fifo itself. Linux allows us to + // re-open an O_PATH fd through /proc. +- fifoPath := "/proc/self/fd/" + strconv.Itoa(l.fifoFd) +- fd, err := unix.Open(fifoPath, unix.O_WRONLY|unix.O_CLOEXEC, 0) ++ fifoFile, err := pathrs.Reopen(l.fifoFile, unix.O_WRONLY|unix.O_CLOEXEC) + if err != nil { +- return &os.PathError{Op: "open exec fifo", Path: fifoPath, Err: err} ++ return fmt.Errorf("reopen exec fifo: %w", err) + } +- if _, err := unix.Write(fd, []byte("0")); err != nil { +- return &os.PathError{Op: "write exec fifo", Path: fifoPath, Err: err} ++ defer fifoFile.Close() ++ if _, err := fifoFile.Write([]byte("0")); err != nil { ++ return &os.PathError{Op: "write exec fifo", Path: fifoFile.Name(), Err: err} + } + + // Close the O_PATH fifofd fd before exec because the kernel resets +@@ -249,6 +251,7 @@ func (l *linuxStandardInit) Init() error { + // N.B. the core issue itself (passing dirfds to the host filesystem) has + // since been resolved. + // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 ++ _ = fifoFile.Close() + _ = unix.Close(l.fifoFd) + + s := l.config.SpecState +-- +2.45.4 + diff --git a/SPECS/kubernetes/CVE-2025-65637.patch b/SPECS/kubernetes/CVE-2025-65637.patch new file mode 100644 index 00000000000..4339f7c14fd --- /dev/null +++ b/SPECS/kubernetes/CVE-2025-65637.patch @@ -0,0 +1,195 @@ +From dbb83382a6088a0e6e966c19840bfef7151838d1 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 1/2] This commit fixes a potential denial of service + vulnerability in logrus.Writer() that could be triggered by logging text + longer than 64kb without newlines. Previously, the bufio.Scanner used by + Writer() would hang indefinitely when reading such text without newlines, + causing the application to become unresponsive. + +Upstream Reference: https://github.com/sirupsen/logrus/commit/766cfece3701d0b1737681ffb5e6e40b628b664d.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 33 ++++++++++++++++++++- + 1 file changed, 32 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 72e8e3a1..36032d06 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -4,6 +4,7 @@ import ( + "bufio" + "io" + "runtime" ++ "strings" + ) + + // Writer at INFO level. See WriterLevel for details. +@@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) + } + ++// Writer returns an io.Writer that writes to the logger at the info log level + func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) + } + ++// WriterLevel returns an io.Writer that writes to the logger at the given log level + func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + ++ // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace +@@ -48,23 +52,50 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + printFunc = entry.Print + } + ++ // Start a new goroutine to scan the input and write it to the logger using the specified print function. ++ // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) ++ ++ // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer + } + ++// writerScanner scans the input from the reader and writes it to the logger + func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) ++ ++ // Set the buffer size to the maximum token size to avoid buffer overflows ++ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) ++ ++ // Define a split function to split the input into chunks of up to 64KB ++ chunkSize := 64 * 1024 // 64KB ++ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { ++ if len(data) > chunkSize { ++ return chunkSize, data[:chunkSize], nil ++ } ++ return 0, nil, nil ++ } ++ ++ //Use the custom split function to split the input ++ scanner.Split(splitFunc) ++ ++ // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { +- printFunc(scanner.Text()) ++ printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } ++ ++ // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } ++ ++ // Close the reader when we are done + reader.Close() + } + ++// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected + func writerFinalizer(writer *io.PipeWriter) { + writer.Close() + } +-- +2.45.4 + + +From fcc7a15c7711402f9abb01be25ab50a854f67d26 Mon Sep 17 00:00:00 2001 +From: Chris +Date: Fri, 10 Mar 2023 13:45:41 -0800 +Subject: [PATCH 2/2] Scan text in 64KB chunks + +This commit fixes a potential denial of service +vulnerability in logrus.Writer() that could be +triggered by logging text longer than 64KB +without newlines. Previously, the bufio.Scanner +used by Writer() would hang indefinitely when +reading such text without newlines, causing the +application to become unresponsive. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/sirupsen/logrus/pull/1376.patch +--- + vendor/github.com/sirupsen/logrus/writer.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 36032d06..7e7703c7 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -75,7 +75,8 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } +- return 0, nil, nil ++ ++ return len(data), data, nil + } + + //Use the custom split function to split the input +-- +2.45.4 + +From d40e25cd45ed9c6b2b66e6b97573a0413e4c23bd Mon Sep 17 00:00:00 2001 +From: Paul Holzinger +Date: Wed, 17 May 2023 15:39:49 +0200 +Subject: [PATCH] fix panic in Writer + +Commit 766cfece introduced this bug by defining an incorrect split +function. First it breaks the old behavior because it never splits at +newlines now. Second, it causes a panic because it never tells the +scanner to stop. See the bufio.ScanLines function, something like: +``` +if atEOF && len(data) == 0 { + return 0, nil, nil +} +``` +is needed to do that. + +This commit fixes it by restoring the old behavior and calling +bufio.ScanLines but also keep the 64KB check in place to avoid buffering +for to long. + +Two tests are added to ensure it is working as expected. + +Fixes #1383 +Upstream Reference Patch: https://github.com/sirupsen/logrus/commit/d40e25cd45ed9c6b2b66e6b97573a0413e4c23bd.patch + +Signed-off-by: Paul Holzinger +--- + vendor/github.com/sirupsen/logrus/writer.go | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go +index 7e7703c7..074fd4b8 100644 +--- a/vendor/github.com/sirupsen/logrus/writer.go ++++ b/vendor/github.com/sirupsen/logrus/writer.go +@@ -70,16 +70,16 @@ func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ... + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB +- chunkSize := 64 * 1024 // 64KB ++ chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { +- if len(data) > chunkSize { ++ if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + +- return len(data), data, nil ++ return bufio.ScanLines(data, atEOF) + } + +- //Use the custom split function to split the input ++ // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function +-- +2.45.4 + diff --git a/SPECS/kubernetes/kubernetes.spec b/SPECS/kubernetes/kubernetes.spec index 2af25d7cd75..1249d3322a1 100644 --- a/SPECS/kubernetes/kubernetes.spec +++ b/SPECS/kubernetes/kubernetes.spec @@ -10,7 +10,7 @@ Summary: Microsoft Kubernetes Name: kubernetes Version: 1.30.10 -Release: 16%{?dist} +Release: 20%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -29,8 +29,11 @@ Patch7: CVE-2025-22872.patch Patch8: CVE-2025-4563.patch Patch9: CVE-2025-31133.patch Patch10: CVE-2025-52565.patch +Patch11: CVE-2025-13281.patch +Patch12: CVE-2025-65637.patch +Patch13: CVE-2025-52881.patch BuildRequires: flex-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: golang < 1.25 BuildRequires: rsync BuildRequires: systemd-devel @@ -280,6 +283,17 @@ fi %{_exec_prefix}/local/bin/pause %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.30.10-20 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.30.10-19 +- Bump to rebuild with updated glibc + +* Thu Dec 18 2025 Aditya Singh - 1.30.10-18 +- Address CVE-2025-52881 + +* Tue Dec 16 2025 Azure Linux Security Servicing Account - 1.30.10-17 +- Patch for CVE-2025-13281, CVE-2025-65637 * Mon Dec 1 2025 Andrew Phelps - 1.30.10-16 - Bump to rebuild with updated glibc diff --git a/SPECS/kubevirt/CVE-2025-22872.patch b/SPECS/kubevirt/CVE-2025-22872.patch deleted file mode 100644 index 8e0d348b32b..00000000000 --- a/SPECS/kubevirt/CVE-2025-22872.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 49deeee2f3c9277aa729e4d0698ab9f297b0c38a Mon Sep 17 00:00:00 2001 -From: Sreenivasulu Malavathula -Date: Thu, 24 Apr 2025 18:37:02 -0500 -Subject: [PATCH] Address CVE-2025-22872 -Upstream Patch Reference: https://github.com/golang/net/commit/e1fcd82abba34df74614020343be8eb1fe85f0d9 - ---- - vendor/golang.org/x/net/html/token.go | 18 ++++++++++++++++-- - 1 file changed, 16 insertions(+), 2 deletions(-) - -diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go -index de67f93..9bbdf7d 100644 ---- a/vendor/golang.org/x/net/html/token.go -+++ b/vendor/golang.org/x/net/html/token.go -@@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType { - if raw { - z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) - } -- // Look for a self-closing token like "
". -- if z.err == nil && z.buf[z.raw.end-2] == '/' { -+ // Look for a self-closing token (e.g.
). -+ // -+ // Originally, we did this by just checking that the last character of the -+ // tag (ignoring the closing bracket) was a solidus (/) character, but this -+ // is not always accurate. -+ // -+ // We need to be careful that we don't misinterpret a non-self-closing tag -+ // as self-closing, as can happen if the tag contains unquoted attribute -+ // values (i.e.

). -+ // -+ // To avoid this, we check that the last non-bracket character of the tag -+ // (z.raw.end-2) isn't the same character as the last non-quote character of -+ // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has -+ // attributes. -+ nAttrs := len(z.attr) -+ if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { - return SelfClosingTagToken - } - return StartTagToken --- -2.45.2 - diff --git a/SPECS/kubevirt/CVE-2025-64435.patch b/SPECS/kubevirt/CVE-2025-64435.patch new file mode 100644 index 00000000000..ab713df20ba --- /dev/null +++ b/SPECS/kubevirt/CVE-2025-64435.patch @@ -0,0 +1,129 @@ +From 0e11a68b243d5f0b2f09def23cb7c67cbdf038cd Mon Sep 17 00:00:00 2001 +From: fossedihelm +Date: Mon, 15 Sep 2025 08:26:00 +0200 +Subject: ctrl: Do not fallback using labels for getting ownerRef + +Currently, in case the pods don't have the ownerRef, we +try to rely on pod labels to get the vmi owner ref. +This fallback should not be allowed because nowadays, +every pod created by KV have the ownerRef. + +Signed-off-by: fossedihelm + +Upstream Patch Reference: +1. https://github.com/kubevirt/kubevirt/commit/0e11a68b243d5f0b2f09def23cb7c67cbdf038cd.patch +2. https://github.com/kubevirt/kubevirt/commit/2d6346ee45900dec7ae6178b599d12fd5f5c6a59.patch +--- + pkg/controller/controller_ref.go | 15 +-------------- + .../watch/drain/evacuation/evacuation_test.go | 1 + + .../watch/migration/migration_test.go | 2 ++ + pkg/virt-controller/watch/node/node_test.go | 1 + + pkg/virt-controller/watch/vmi/vmi_test.go | 3 +++ + .../workload-updater/workload-updater_test.go | 1 + + 6 files changed, 9 insertions(+), 14 deletions(-) + +diff --git a/pkg/controller/controller_ref.go b/pkg/controller/controller_ref.go +index 9a247de..8a672d2 100644 +--- a/pkg/controller/controller_ref.go ++++ b/pkg/controller/controller_ref.go +@@ -20,26 +20,13 @@ package controller + import ( + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +- "k8s.io/apimachinery/pkg/types" +- + virtv1 "kubevirt.io/api/core/v1" + ) + + // GetControllerOf returns the controllerRef if controllee has a controller, + // otherwise returns nil. + func GetControllerOf(pod *k8sv1.Pod) *metav1.OwnerReference { +- controllerRef := metav1.GetControllerOf(pod) +- if controllerRef != nil { +- return controllerRef +- } +- // We may find pods that are only using CreatedByLabel and not set with an OwnerReference +- if createdBy := pod.Labels[virtv1.CreatedByLabel]; len(createdBy) > 0 { +- name := pod.Annotations[virtv1.DomainAnnotation] +- uid := types.UID(createdBy) +- vmi := virtv1.NewVMI(name, uid) +- return metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind) +- } +- return nil ++ return metav1.GetControllerOf(pod) + } + + func IsControlledBy(pod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) bool { +diff --git a/pkg/virt-controller/watch/drain/evacuation/evacuation_test.go b/pkg/virt-controller/watch/drain/evacuation/evacuation_test.go +index a5dbfdd..4e6d131 100644 +--- a/pkg/virt-controller/watch/drain/evacuation/evacuation_test.go ++++ b/pkg/virt-controller/watch/drain/evacuation/evacuation_test.go +@@ -601,6 +601,7 @@ func newPod(vmi *v1.VirtualMachineInstance, name string, phase k8sv1.PodPhase, o + pod.Annotations = map[string]string{ + v1.DomainAnnotation: vmi.Name, + } ++ pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(vmi, v1.VirtualMachineInstanceGroupVersionKind)} + } + + return pod +diff --git a/pkg/virt-controller/watch/migration/migration_test.go b/pkg/virt-controller/watch/migration/migration_test.go +index 66c870e..9c5deb9 100644 +--- a/pkg/virt-controller/watch/migration/migration_test.go ++++ b/pkg/virt-controller/watch/migration/migration_test.go +@@ -2412,6 +2412,7 @@ func newSourcePodForVirtualMachine(vmi *virtv1.VirtualMachineInstance) *k8sv1.Po + Annotations: map[string]string{ + virtv1.DomainAnnotation: vmi.Name, + }, ++ OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind)}, + }, + Status: k8sv1.PodStatus{ + Phase: k8sv1.PodRunning, +@@ -2440,6 +2441,7 @@ func newTargetPodForVirtualMachine(vmi *virtv1.VirtualMachineInstance, migration + virtv1.DomainAnnotation: vmi.Name, + virtv1.MigrationJobNameAnnotation: migration.Name, + }, ++ OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind)}, + }, + Status: k8sv1.PodStatus{ + Phase: phase, +diff --git a/pkg/virt-controller/watch/node/node_test.go b/pkg/virt-controller/watch/node/node_test.go +index ea1b930..cf2ef23 100644 +--- a/pkg/virt-controller/watch/node/node_test.go ++++ b/pkg/virt-controller/watch/node/node_test.go +@@ -510,6 +510,7 @@ func NewHealthyPodForVirtualMachine(podName string, vmi *v1.VirtualMachineInstan + v1.CreatedByLabel: string(vmi.UID), + v1.AppLabel: "virt-launcher", + }, ++ OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vmi, v1.VirtualMachineInstanceGroupVersionKind)}, + }, + Spec: k8sv1.PodSpec{NodeName: vmi.Status.NodeName}, + Status: k8sv1.PodStatus{ +diff --git a/pkg/virt-controller/watch/vmi/vmi_test.go b/pkg/virt-controller/watch/vmi/vmi_test.go +index 8a05cfd..35ee4e4 100644 +--- a/pkg/virt-controller/watch/vmi/vmi_test.go ++++ b/pkg/virt-controller/watch/vmi/vmi_test.go +@@ -3909,6 +3909,9 @@ func newPodForVirtualMachine(vmi *virtv1.VirtualMachineInstance, phase k8sv1.Pod + virtv1.CreatedByLabel: string(vmi.UID), + }, + Annotations: podAnnotations, ++ OwnerReferences: []metav1.OwnerReference{ ++ *metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind), ++ }, + }, + Status: k8sv1.PodStatus{ + Phase: phase, +diff --git a/pkg/virt-controller/watch/workload-updater/workload-updater_test.go b/pkg/virt-controller/watch/workload-updater/workload-updater_test.go +index e320140..a7ae0df 100644 +--- a/pkg/virt-controller/watch/workload-updater/workload-updater_test.go ++++ b/pkg/virt-controller/watch/workload-updater/workload-updater_test.go +@@ -715,6 +715,7 @@ func newLauncherPodForVMI(vmi *v1.VirtualMachineInstance) *k8sv1.Pod { + Annotations: map[string]string{ + v1.DomainAnnotation: vmi.Name, + }, ++ OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vmi, v1.VirtualMachineInstanceGroupVersionKind)}, + }, + Status: k8sv1.PodStatus{ + Phase: k8sv1.PodRunning, +-- +2.45.4 + diff --git a/SPECS/kubevirt/kubevirt.signatures.json b/SPECS/kubevirt/kubevirt.signatures.json index fccac2ca21c..36fe0943a56 100644 --- a/SPECS/kubevirt/kubevirt.signatures.json +++ b/SPECS/kubevirt/kubevirt.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "kubevirt-1.5.3.tar.gz": "93518543f92fa6a9a16e7b6653745d6a2562c52b21af81769bf85ac6e67df5fa" + "kubevirt-1.6.3.tar.gz": "9d41ac421d7af1c25a2b5c370dc597b67e6fc2762ab03ddf4745f71cad5fb4e2" } } diff --git a/SPECS/kubevirt/kubevirt.spec b/SPECS/kubevirt/kubevirt.spec index b553e356be3..d39dc47c217 100644 --- a/SPECS/kubevirt/kubevirt.spec +++ b/SPECS/kubevirt/kubevirt.spec @@ -19,8 +19,8 @@ Summary: Container native virtualization Name: kubevirt -Version: 1.5.3 -Release: 2%{?dist} +Version: 1.6.3 +Release: 3%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -31,11 +31,12 @@ Source0: https://github.com/kubevirt/kubevirt/archive/refs/tags/v%{versio # Nexus team needs these to-be-upstreamed patches for the operator Edge to work # correctly. Patch0: CVE-2025-47913.patch +Patch1: CVE-2025-64435.patch %global debug_package %{nil} BuildRequires: swtpm-tools BuildRequires: glibc-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: golang >= 1.21 BuildRequires: golang-packaging BuildRequires: pkgconfig @@ -268,6 +269,22 @@ install -p -m 0644 cmd/virt-launcher/qemu.conf %{buildroot}%{_datadir}/kube-virt %{_bindir}/virt-tests %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.6.3-3 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.6.3-2 +- Bump to rebuild with updated glibc + +* Tue Dec 30 2025 Harshit Gupta - 1.6.3-1 +- Upgrade to 1.6.3 +- Remove CVE-2025-64324.patch + +* Wed Dec 17 2025 Aditya Singh - 1.5.3-4 +- Added patch for CVE-2025-64435 + +* Tue Dec 16 2025 Azure Linux Security Servicing Account - 1.5.3-3 +- Patch for CVE-2025-64324 + * Mon Nov 24 2025 Andrew Phelps - 1.5.3-2 - Bump to rebuild with updated glibc @@ -289,7 +306,7 @@ install -p -m 0644 cmd/virt-launcher/qemu.conf %{buildroot}%{_datadir}/kube-virt * Mon Aug 25 2025 Andrew Phelps - 1.5.0-2 - Bump to rebuild with updated glibc -* Thu Jul 03 2025 Harshit Gupta - 1.5.0-1 +* Fri Jul 11 2025 Harshit Gupta - 1.5.0-1 - Upgrade to 1.5.0 - Removed old patches - Remove virt_launcher.cil SELinux policy @@ -309,7 +326,7 @@ install -p -m 0644 cmd/virt-launcher/qemu.conf %{buildroot}%{_datadir}/kube-virt * Mon Mar 03 2025 corvus-callidus <108946721+corvus-callidus@users.noreply.github.com> - 1.2.0-15 - Address CVE-2023-44487 -* Sun March 02 2025 Kanishk Bansal - 1.2.0-14 +* Sun Mar 02 2025 Kanishk Bansal - 1.2.0-14 - Address CVE-2025-22869 * Tue Feb 25 2025 Chris Co - 1.2.0-14 diff --git a/SPECS/libarchive/CVE-2025-60753.patch b/SPECS/libarchive/CVE-2025-60753.patch new file mode 100644 index 00000000000..258c495711d --- /dev/null +++ b/SPECS/libarchive/CVE-2025-60753.patch @@ -0,0 +1,124 @@ +From 362d934368309d74829572eab884364f3e6ab475 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?ARJANEN=20Lo=C3=AFc=20Jean=20David?= +Date: Fri, 14 Nov 2025 20:34:48 +0100 +Subject: [PATCH 1/2] Fix bsdtar zero-length pattern issue. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Uses the sed-like way (and Java-like, and .Net-like, and Javascript-like…) to fix this issue of advancing the string to be processed by one if the match is zero-length. + +Fixes libarchive/libarchive#2725 and solves libarchive/libarchive#2438. +--- + tar/subst.c | 19 ++++++++++++------- + tar/test/test_option_s.c | 8 +++++++- + 2 files changed, 19 insertions(+), 8 deletions(-) + +diff --git a/tar/subst.c b/tar/subst.c +index 0194cc8..25a15d6 100644 +--- a/tar/subst.c ++++ b/tar/subst.c +@@ -234,7 +234,9 @@ apply_substitution(struct bsdtar *bsdtar, const char *name, char **result, + (*result)[0] = 0; + } + +- while (1) { ++ char isEnd = 0; ++ do { ++ isEnd = *name == '\0'; + if (regexec(&rule->re, name, 10, matches, 0)) + break; + +@@ -289,12 +291,15 @@ apply_substitution(struct bsdtar *bsdtar, const char *name, char **result, + } + + realloc_strcat(result, rule->result + j); +- +- name += matches[0].rm_eo; +- +- if (!rule->global) +- break; +- } ++ if (matches[0].rm_eo > 0) { ++ name += matches[0].rm_eo; ++ } else { ++ // We skip a character because the match is 0-length ++ // so we need to add it to the output ++ realloc_strncat(result, name, 1); ++ name += 1; ++ } ++ } while (rule->global && !isEnd); // Testing one step after because sed et al. run 0-length patterns a last time on the empty string at the end + } + + if (got_match) +diff --git a/tar/test/test_option_s.c b/tar/test/test_option_s.c +index 1a36280..c5e75ff 100644 +--- a/tar/test/test_option_s.c ++++ b/tar/test/test_option_s.c +@@ -42,7 +42,13 @@ DEFINE_TEST(test_option_s) + systemf("%s -cf test1_2.tar -s /d1/d2/ in/d1/foo", testprog); + systemf("%s -xf test1_2.tar -C test1", testprog); + assertFileContents("foo", 3, "test1/in/d2/foo"); +- ++ systemf("%s -cf test1_3.tar -s /o/#/g in/d1/foo", testprog); ++ systemf("%s -xf test1_3.tar -C test1", testprog); ++ assertFileContents("foo", 3, "test1/in/d1/f##"); ++ // For the 0-length pattern check, remember that "test1/" isn't part of the string affected by the regexp ++ systemf("%s -cf test1_4.tar -s /f*/\\<~\\>/g in/d1/foo", testprog); ++ systemf("%s -xf test1_4.tar -C test1", testprog); ++ assertFileContents("foo", 3, "test1/<>i<>n<>/<>d<>1<>/<>o<>o<>"); + /* + * Test 2: Basic substitution when extracting archive. + */ +-- +2.45.4 + + +From 0086b1c784724f5e341991973807d8cc6fb9ba76 Mon Sep 17 00:00:00 2001 +From: Martin Matuska +Date: Mon, 8 Dec 2025 21:40:46 +0100 +Subject: [PATCH 2/2] tar: fix off-bounds read resulting from #2787 (3150539ed) + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/libarchive/libarchive/pull/2787.patch https://patch-diff.githubusercontent.com/raw/libarchive/libarchive/pull/2809.patch +--- + tar/subst.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/tar/subst.c b/tar/subst.c +index 25a15d6..df83ca2 100644 +--- a/tar/subst.c ++++ b/tar/subst.c +@@ -236,7 +236,7 @@ apply_substitution(struct bsdtar *bsdtar, const char *name, char **result, + + char isEnd = 0; + do { +- isEnd = *name == '\0'; ++ isEnd = *name == '\0'; + if (regexec(&rule->re, name, 10, matches, 0)) + break; + +@@ -292,13 +292,13 @@ apply_substitution(struct bsdtar *bsdtar, const char *name, char **result, + + realloc_strcat(result, rule->result + j); + if (matches[0].rm_eo > 0) { +- name += matches[0].rm_eo; +- } else { +- // We skip a character because the match is 0-length +- // so we need to add it to the output +- realloc_strncat(result, name, 1); +- name += 1; +- } ++ name += matches[0].rm_eo; ++ } else if (!isEnd) { ++ // We skip a character because the match is 0-length ++ // so we need to add it to the output ++ realloc_strncat(result, name, 1); ++ name += 1; ++ } + } while (rule->global && !isEnd); // Testing one step after because sed et al. run 0-length patterns a last time on the empty string at the end + } + +-- +2.45.4 + diff --git a/SPECS/libarchive/libarchive.spec b/SPECS/libarchive/libarchive.spec index 8e4becd3c4b..32bab7149f8 100644 --- a/SPECS/libarchive/libarchive.spec +++ b/SPECS/libarchive/libarchive.spec @@ -1,7 +1,7 @@ Summary: Multi-format archive and compression library Name: libarchive Version: 3.7.7 -Release: 3%{?dist} +Release: 4%{?dist} # Certain files have individual licenses. For more details see contents of "COPYING". License: BSD AND Public Domain AND (ASL 2.0 OR CC0 1.0 OR OpenSSL) Vendor: Microsoft Corporation @@ -15,6 +15,7 @@ Patch3: CVE-2025-5915.patch Patch4: CVE-2025-5916.patch Patch5: CVE-2025-5917.patch Patch6: CVE-2025-5918.patch +Patch7: CVE-2025-60753.patch Provides: bsdtar = %{version}-%{release} BuildRequires: xz-libs @@ -45,6 +46,7 @@ make %{?_smp_mflags} rm -rf %{buildroot}%{_infodir} make DESTDIR=%{buildroot} install find %{buildroot} -type f -name "*.la" -delete -print +mv %{buildroot}%{_mandir}/man1/* . %check make %{?_smp_mflags} check @@ -55,6 +57,10 @@ make %{?_smp_mflags} check %files %defattr(-,root,root) %license COPYING +%license bsdcat.1 +%license bsdtar.1 +%license bsdcpio.1 +%license bsdunzip.1 %{_libdir}/*.so.* %{_bindir} %exclude %{_libdir}/debug/ @@ -67,6 +73,9 @@ make %{?_smp_mflags} check %{_libdir}/pkgconfig/*.pc %changelog +* Mon Jan 19 2026 Azure Linux Security Servicing Account - 3.7.7-4 +- Patch for CVE-2025-60753 + * Thu Jun 26 2025 Sumit Jena - 3.7.7-3 - Patch CVE-2025-5914, CVE-2025-5915, CVE-2025-5916, CVE-2025-5917, CVE-2025-5918 diff --git a/SPECS/libcap/libcap.spec b/SPECS/libcap/libcap.spec index e0a775b22d3..a73c1d9789c 100644 --- a/SPECS/libcap/libcap.spec +++ b/SPECS/libcap/libcap.spec @@ -1,7 +1,7 @@ Summary: Libcap Name: libcap Version: 2.69 -Release: 10%{?dist} +Release: 12%{?dist} License: GPLv2+ Group: System Environment/Security URL: https://www.gnu.org/software/hurd/community/gsoc/project_ideas/libcap.html @@ -9,7 +9,7 @@ Source0: https://www.kernel.org/pub/linux/libs/security/linux-privs/libca Patch0: CVE-2025-1390.patch Vendor: Microsoft Corporation Distribution: Azure Linux -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} %description The libcap package implements the user-space interfaces to the POSIX 1003.1e capabilities available @@ -62,6 +62,12 @@ sed -i '/echo "attempt to exploit kernel bug"/,/^fi$/d' quicktest.sh %{_mandir}/man3/* %changelog +* Thu Jan 22 2026 Kanishk Bansal - 2.69-12 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 2.69-11 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 2.69-10 - Bump to rebuild with updated glibc diff --git a/SPECS/libguestfs/libguestfs.spec b/SPECS/libguestfs/libguestfs.spec index 850d176f723..ad3bb21ea9a 100644 --- a/SPECS/libguestfs/libguestfs.spec +++ b/SPECS/libguestfs/libguestfs.spec @@ -25,7 +25,7 @@ Summary: Access and modify virtual machine disk images Name: libguestfs Version: 1.52.0 -Release: 18%{?dist} +Release: 20%{?dist} License: LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -82,7 +82,7 @@ BuildRequires: gcc-c++ BuildRequires: gdisk BuildRequires: genisoimage BuildRequires: gfs2-utils -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: gobject-introspection-devel BuildRequires: gperf BuildRequires: grep @@ -1147,6 +1147,12 @@ rm ocaml/html/.gitignore %endif %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.52.0-20 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.52.0-19 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 1.52.0-18 - Bump to rebuild with updated glibc diff --git a/SPECS/libpcap/libpcap.signatures.json b/SPECS/libpcap/libpcap.signatures.json index 339ed71c7c3..f594ea9c0ca 100644 --- a/SPECS/libpcap/libpcap.signatures.json +++ b/SPECS/libpcap/libpcap.signatures.json @@ -1,5 +1,5 @@ { - "Signatures": { - "libpcap-libpcap-1.10.5.tar.gz": "6cd9835338ca334b699b1217e2aee2b873463c76aafd19b8b9d4710554f025ac" - } -} \ No newline at end of file + "Signatures": { + "libpcap-libpcap-1.10.6.tar.gz": "fcaeefbbca8d99249c8f271f3218f77cdbfd7718cc141f5edd16e90575e6bbed" + } +} diff --git a/SPECS/libpcap/libpcap.spec b/SPECS/libpcap/libpcap.spec index 9cdd4e3c6ae..75a98d9927b 100644 --- a/SPECS/libpcap/libpcap.spec +++ b/SPECS/libpcap/libpcap.spec @@ -1,6 +1,6 @@ Summary: C/C++ library for network traffic capture Name: libpcap -Version: 1.10.5 +Version: 1.10.6 Release: 1%{?dist} License: BSD Vendor: Microsoft Corporation @@ -69,6 +69,9 @@ make DESTDIR=%{buildroot} install %{_mandir}/man7/* %changelog +* Mon Jan 05 2026 CBL-Mariner Servicing Account - 1.10.6-1 +- Auto-upgrade to 1.10.6 - for CVE-2025-11961 + * Mon Oct 21 2024 Sudipta Pandit - 1.10.5-1 - Upgrade to version 1.10.5 (fixes CVE-2024-8006) diff --git a/SPECS/libpng/libpng.signatures.json b/SPECS/libpng/libpng.signatures.json index 3cb58477556..87294dfbb87 100644 --- a/SPECS/libpng/libpng.signatures.json +++ b/SPECS/libpng/libpng.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "libpng-1.6.52.tar.xz": "36bd726228ec93a3b6c22fdb49e94a67b16f2fe9b39b78b7cb65772966661ccc" + "libpng-1.6.54.tar.xz": "01c9d8a303c941ec2c511c14312a3b1d36cedb41e2f5168ccdaa85d53b887805" } } diff --git a/SPECS/libpng/libpng.spec b/SPECS/libpng/libpng.spec index 28c64327569..cd66e767488 100644 --- a/SPECS/libpng/libpng.spec +++ b/SPECS/libpng/libpng.spec @@ -1,6 +1,6 @@ Summary: contains libraries for reading and writing PNG files. Name: libpng -Version: 1.6.52 +Version: 1.6.54 Release: 1%{?dist} License: zlib Vendor: Microsoft Corporation @@ -57,6 +57,9 @@ make %{?_smp_mflags} -k check %{_mandir}/man3/* %changelog +* Tue Jan 13 2026 CBL-Mariner Servicing Account - 1.6.54-1 +- Auto-upgrade to 1.6.54 - for CVE-2026-22695, CVE-2026-22801 + * Thu Dec 04 2025 CBL-Mariner Servicing Account - 1.6.52-1 - Auto-upgrade to 1.6.52 - for CVE-2025-66293 diff --git a/SPECS/libsndfile/CVE-2025-56226.patch b/SPECS/libsndfile/CVE-2025-56226.patch new file mode 100644 index 00000000000..d8babe47b22 --- /dev/null +++ b/SPECS/libsndfile/CVE-2025-56226.patch @@ -0,0 +1,38 @@ +From 2a2283c82465326dafeb5b5440614bc3532e3936 Mon Sep 17 00:00:00 2001 +From: Sisyphus-wang <43361974+Sisyphus-wang@users.noreply.github.com> +Date: Fri, 11 Jul 2025 15:14:48 +0800 +Subject: [PATCH] Update mpeg_l3_encode.c + +fix memoryLeak bug + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/libsndfile/libsndfile/commit/d9a35ea0d5c64c19dd635ae578e0028df8f66d6a.patch +--- + src/mpeg_l3_encode.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/mpeg_l3_encode.c b/src/mpeg_l3_encode.c +index 97324f7..04b1d50 100644 +--- a/src/mpeg_l3_encode.c ++++ b/src/mpeg_l3_encode.c +@@ -87,7 +87,8 @@ mpeg_l3_encoder_init (SF_PRIVATE *psf, int info_tag) + if (! (pmpeg->lamef = lame_init ())) + return SFE_MALLOC_FAILED ; + +- pmpeg->compression = -1.0 ; /* Unset */ ++ psf->codec_close = mpeg_l3_encoder_close ; /* Set psf->codec_close early*/ ++ pmpeg->compression = -1.0 ; /* Unset */ + + lame_set_in_samplerate (pmpeg->lamef, psf->sf.samplerate) ; + lame_set_num_channels (pmpeg->lamef, psf->sf.channels) ; +@@ -115,7 +116,6 @@ mpeg_l3_encoder_init (SF_PRIVATE *psf, int info_tag) + } + + psf->sf.seekable = 0 ; +- psf->codec_close = mpeg_l3_encoder_close ; + psf->byterate = mpeg_l3_encoder_byterate ; + psf->datalength = 0 ; + +-- +2.45.4 + diff --git a/SPECS/libsndfile/libsndfile.spec b/SPECS/libsndfile/libsndfile.spec index a0f95ac1d88..78ae172f2e2 100644 --- a/SPECS/libsndfile/libsndfile.spec +++ b/SPECS/libsndfile/libsndfile.spec @@ -1,7 +1,7 @@ Summary: Library for reading and writing sound files Name: libsndfile Version: 1.2.2 -Release: 3%{?dist} +Release: 4%{?dist} License: BSD AND GPLv2+ AND LGPLv2+ AND MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -16,6 +16,7 @@ Patch1: revert.patch Patch100: CVE-2018-13419.nopatch Patch101: CVE-2022-33065.patch Patch102: CVE-2024-50612.patch +Patch103: CVE-2025-56226.patch BuildRequires: alsa-lib-devel BuildRequires: autogen @@ -140,6 +141,9 @@ LD_LIBRARY_PATH=$PWD/src/.libs make check %{_libdir}/pkgconfig/sndfile.pc %changelog +* Sat Jan 24 2026 Azure Linux Security Servicing Account - 1.2.2-4 +- Patch for CVE-2025-56226 + * Tue Jan 07 2025 Muhammad Falak - 1.2.2-3 - Patch CVE-2024-50612 diff --git a/SPECS/libsodium/libsodium.signatures.json b/SPECS/libsodium/libsodium.signatures.json index 2b8574ad7d9..2b7f78bf9bc 100644 --- a/SPECS/libsodium/libsodium.signatures.json +++ b/SPECS/libsodium/libsodium.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "libsodium-1.0.19.tar.gz": "018d79fe0a045cca07331d37bd0cb57b2e838c51bc48fd837a1472e50068bbea" + "libsodium-1.0.19-final.tar.gz": "a8ee12ac57f8357dc9976ea76a4a32051fda2577494f10c20067fb5586f12e3b" } } \ No newline at end of file diff --git a/SPECS/libsodium/libsodium.spec b/SPECS/libsodium/libsodium.spec index 9c4d597ae42..cac95efca38 100644 --- a/SPECS/libsodium/libsodium.spec +++ b/SPECS/libsodium/libsodium.spec @@ -3,12 +3,12 @@ Summary: The Sodium crypto library Name: libsodium Version: 1.0.19 -Release: 1%{?dist} +Release: 2%{?dist} License: ISC Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://libsodium.org/ -Source0: https://download.libsodium.org/%{name}/releases/%{name}-%{version}.tar.gz +Source0: https://github.com/jedisct1/%{name}/archive/refs/tags/%{version}-FINAL.tar.gz#/%{name}-%{version}-final.tar.gz BuildRequires: gcc BuildRequires: make @@ -34,7 +34,7 @@ This package contains libraries and header files for developing applications that use %{name} libraries. %prep -%autosetup -n %{name}-stable +%autosetup -p1 -n %{name}-%{version}-FINAL %build %configure \ @@ -68,6 +68,9 @@ find %{buildroot} -type f -name "*.a" -delete -print %changelog +* Tue Jan 06 2026 Kanishk Bansal - 1.0.19-2 +- Update to version 1.0.19-FINAL for CVE-2025-69277 + * Fri Feb 02 2024 Thien Trung Vuong - 1.0.19-1 - Update to version 1.0.19 - Update soname to 26 diff --git a/SPECS/libsoup/CVE-2025-12105.patch b/SPECS/libsoup/CVE-2025-12105.patch new file mode 100644 index 00000000000..e9c6eb040d3 --- /dev/null +++ b/SPECS/libsoup/CVE-2025-12105.patch @@ -0,0 +1,32 @@ +From 79aa14c1681114fe3defef61629e1266b6ede411 Mon Sep 17 00:00:00 2001 +From: Eugene Mutavchi +Date: Fri, 10 Oct 2025 16:24:27 +0000 +Subject: [PATCH] fix 'heap-use-after-free' caused by 'finishing' queue item + twice + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.gnome.org/GNOME/libsoup/-/merge_requests/481.patch +--- + libsoup/soup-session.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/libsoup/soup-session.c b/libsoup/soup-session.c +index 9f00b05..649902f 100644 +--- a/libsoup/soup-session.c ++++ b/libsoup/soup-session.c +@@ -2822,8 +2822,10 @@ run_until_read_done (SoupMessage *msg, + if (soup_message_io_in_progress (msg)) + soup_message_io_finished (msg); + item->paused = FALSE; +- item->state = SOUP_MESSAGE_FINISHING; +- soup_session_process_queue_item (item->session, item, FALSE); ++ if (item->state != SOUP_MESSAGE_FINISHED) { ++ item->state = SOUP_MESSAGE_FINISHING; ++ soup_session_process_queue_item (item->session, item, FALSE); ++ } + } + async_send_request_return_result (item, NULL, error); + } +-- +2.45.4 + diff --git a/SPECS/libsoup/CVE-2025-32049.patch b/SPECS/libsoup/CVE-2025-32049.patch new file mode 100644 index 00000000000..7613c86bfc8 --- /dev/null +++ b/SPECS/libsoup/CVE-2025-32049.patch @@ -0,0 +1,380 @@ +From c82f1acdd0855a7844cb4dc7b6537ab3246debae Mon Sep 17 00:00:00 2001 +From: Ignacio Casal Quinteiro +Date: Wed, 24 Jul 2024 15:20:35 +0200 +Subject: [PATCH 1/4] websocket: add a way to restrict the total message size + +Otherwise a client could send small packages smaller than +total-incoming-payload-size but still to break the server +with a big allocation + +Fixes: #390 +--- + libsoup/websocket/soup-websocket-connection.c | 106 +++++++++++++++++- + libsoup/websocket/soup-websocket-connection.h | 7 ++ + 2 files changed, 110 insertions(+), 3 deletions(-) + +diff --git a/libsoup/websocket/soup-websocket-connection.c b/libsoup/websocket/soup-websocket-connection.c +index df8f67d..51971db 100644 +--- a/libsoup/websocket/soup-websocket-connection.c ++++ b/libsoup/websocket/soup-websocket-connection.c +@@ -76,6 +76,7 @@ enum { + PROP_MAX_INCOMING_PAYLOAD_SIZE, + PROP_KEEPALIVE_INTERVAL, + PROP_EXTENSIONS, ++ PROP_MAX_TOTAL_MESSAGE_SIZE, + + LAST_PROPERTY + }; +@@ -118,6 +119,7 @@ typedef struct { + char *origin; + char *protocol; + guint64 max_incoming_payload_size; ++ guint64 max_total_message_size; + guint keepalive_interval; + + gushort peer_close_code; +@@ -148,6 +150,7 @@ typedef struct { + } SoupWebsocketConnectionPrivate; + + #define MAX_INCOMING_PAYLOAD_SIZE_DEFAULT 128 * 1024 ++#define MAX_TOTAL_MESSAGE_SIZE_DEFAULT 128 * 1024 + #define READ_BUFFER_SIZE 1024 + #define MASK_LENGTH 4 + +@@ -677,8 +680,8 @@ bad_data_error_and_close (SoupWebsocketConnection *self) + } + + static void +-too_big_error_and_close (SoupWebsocketConnection *self, +- guint64 payload_len) ++too_big_incoming_payload_error_and_close (SoupWebsocketConnection *self, ++ guint64 payload_len) + { + SoupWebsocketConnectionPrivate *priv = soup_websocket_connection_get_instance_private (self); + GError *error; +@@ -694,6 +697,24 @@ too_big_error_and_close (SoupWebsocketConnection *self, + emit_error_and_close (self, error, TRUE); + } + ++static void ++too_big_message_error_and_close (SoupWebsocketConnection *self, ++ guint64 len) ++{ ++ SoupWebsocketConnectionPrivate *priv = soup_websocket_connection_get_instance_private (self); ++ GError *error; ++ ++ error = g_error_new_literal (SOUP_WEBSOCKET_ERROR, ++ SOUP_WEBSOCKET_CLOSE_TOO_BIG, ++ priv->connection_type == SOUP_WEBSOCKET_CONNECTION_SERVER ? ++ "Received WebSocket payload from the client larger than configured max-total-message-size" : ++ "Received WebSocket payload from the server larger than configured max-total-message-size"); ++ g_debug ("%s received message of size %" G_GUINT64_FORMAT " or greater, but max supported size is %" G_GUINT64_FORMAT, ++ priv->connection_type == SOUP_WEBSOCKET_CONNECTION_SERVER ? "server" : "client", ++ len, priv->max_total_message_size); ++ emit_error_and_close (self, error, TRUE); ++} ++ + static void + close_connection (SoupWebsocketConnection *self, + gushort code, +@@ -937,6 +958,12 @@ process_contents (SoupWebsocketConnection *self, + switch (priv->message_opcode) { + case 0x01: + case 0x02: ++ /* Safety valve */ ++ if (priv->max_total_message_size > 0 && ++ (priv->message_data->len + payload_len) > priv->max_total_message_size) { ++ too_big_message_error_and_close (self, (priv->message_data->len + payload_len)); ++ return; ++ } + g_byte_array_append (priv->message_data, payload, payload_len); + break; + default: +@@ -1075,7 +1102,7 @@ process_frame (SoupWebsocketConnection *self) + /* Safety valve */ + if (priv->max_incoming_payload_size > 0 && + payload_len > priv->max_incoming_payload_size) { +- too_big_error_and_close (self, payload_len); ++ too_big_incoming_payload_error_and_close (self, payload_len); + return FALSE; + } + +@@ -1382,6 +1409,10 @@ soup_websocket_connection_get_property (GObject *object, + g_value_set_pointer (value, priv->extensions); + break; + ++ case PROP_MAX_TOTAL_MESSAGE_SIZE: ++ g_value_set_uint64 (value, priv->max_total_message_size); ++ break; ++ + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; +@@ -1435,6 +1466,10 @@ soup_websocket_connection_set_property (GObject *object, + priv->extensions = g_value_get_pointer (value); + break; + ++ case PROP_MAX_TOTAL_MESSAGE_SIZE: ++ priv->max_total_message_size = g_value_get_uint64 (value); ++ break; ++ + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); + break; +@@ -1640,6 +1675,26 @@ soup_websocket_connection_class_init (SoupWebsocketConnectionClass *klass) + G_PARAM_CONSTRUCT_ONLY | + G_PARAM_STATIC_STRINGS); + ++ /** ++ * SoupWebsocketConnection:max-total-message-size: ++ * ++ * The total message size for incoming packets. ++ * ++ * The protocol expects or 0 to not limit it. ++ * ++ * Since: 3.8 ++ */ ++ properties[PROP_MAX_TOTAL_MESSAGE_SIZE] = ++ g_param_spec_uint64 ("max-total-message-size", ++ "Max total message size", ++ "Max total message size ", ++ 0, ++ G_MAXUINT64, ++ MAX_TOTAL_MESSAGE_SIZE_DEFAULT, ++ G_PARAM_READWRITE | ++ G_PARAM_CONSTRUCT | ++ G_PARAM_STATIC_STRINGS); ++ + g_object_class_install_properties (gobject_class, LAST_PROPERTY, properties); + + /** +@@ -2110,6 +2165,51 @@ soup_websocket_connection_set_max_incoming_payload_size (SoupWebsocketConnection + } + } + ++/** ++ * soup_websocket_connection_get_max_total_message_size: ++ * @self: the WebSocket ++ * ++ * Gets the maximum total message size allowed for packets. ++ * ++ * Returns: the maximum total message size. ++ * ++ * Since: 3.8 ++ */ ++guint64 ++soup_websocket_connection_get_max_total_message_size (SoupWebsocketConnection *self) ++{ ++ SoupWebsocketConnectionPrivate *priv = soup_websocket_connection_get_instance_private (self); ++ ++ g_return_val_if_fail (SOUP_IS_WEBSOCKET_CONNECTION (self), MAX_TOTAL_MESSAGE_SIZE_DEFAULT); ++ ++ return priv->max_total_message_size; ++} ++ ++/** ++ * soup_websocket_connection_set_max_total_message_size: ++ * @self: the WebSocket ++ * @max_total_message_size: the maximum total message size ++ * ++ * Sets the maximum total message size allowed for packets. ++ * ++ * It does not limit the outgoing packet size. ++ * ++ * Since: 3.8 ++ */ ++void ++soup_websocket_connection_set_max_total_message_size (SoupWebsocketConnection *self, ++ guint64 max_total_message_size) ++{ ++ SoupWebsocketConnectionPrivate *priv = soup_websocket_connection_get_instance_private (self); ++ ++ g_return_if_fail (SOUP_IS_WEBSOCKET_CONNECTION (self)); ++ ++ if (priv->max_total_message_size != max_total_message_size) { ++ priv->max_total_message_size = max_total_message_size; ++ g_object_notify_by_pspec (G_OBJECT (self), properties[PROP_MAX_TOTAL_MESSAGE_SIZE]); ++ } ++} ++ + /** + * soup_websocket_connection_get_keepalive_interval: + * @self: the WebSocket +diff --git a/libsoup/websocket/soup-websocket-connection.h b/libsoup/websocket/soup-websocket-connection.h +index eeb093d..922de56 100644 +--- a/libsoup/websocket/soup-websocket-connection.h ++++ b/libsoup/websocket/soup-websocket-connection.h +@@ -88,6 +88,13 @@ SOUP_AVAILABLE_IN_ALL + void soup_websocket_connection_set_max_incoming_payload_size (SoupWebsocketConnection *self, + guint64 max_incoming_payload_size); + ++SOUP_AVAILABLE_IN_3_8 ++guint64 soup_websocket_connection_get_max_total_message_size (SoupWebsocketConnection *self); ++ ++SOUP_AVAILABLE_IN_3_8 ++void soup_websocket_connection_set_max_total_message_size (SoupWebsocketConnection *self, ++ guint64 max_total_message_size); ++ + SOUP_AVAILABLE_IN_ALL + guint soup_websocket_connection_get_keepalive_interval (SoupWebsocketConnection *self); + +-- +2.45.4 + + +From 108ddaf9be3b4f33e7703d715e7c88fb06012067 Mon Sep 17 00:00:00 2001 +From: Ignacio Casal Quinteiro +Date: Fri, 20 Sep 2024 12:12:38 +0200 +Subject: [PATCH 2/4] websocket-test: set the total message size + +This is required when sending a big amount of data +-- +2.45.4 + + +From f2bde4dfa2879933a3e90e9be7a0e92a0121b64f Mon Sep 17 00:00:00 2001 +From: Michael Catanzaro +Date: Thu, 8 May 2025 16:16:25 -0500 +Subject: [PATCH 3/4] Set message size limit in SoupServer rather than + SoupWebsocketConnection + +We're not sure about the compatibility implications of having a default +size limit for clients. + +Also not sure whether the server limit is actually set appropriately, +but there is probably very little server usage of +SoupWebsocketConnection in the wild, so it's not so likely to break +things. +--- + libsoup/server/soup-server.c | 24 +++++++++++++++---- + libsoup/websocket/soup-websocket-connection.c | 24 +++++++++++++------ + 2 files changed, 36 insertions(+), 12 deletions(-) + +diff --git a/libsoup/server/soup-server.c b/libsoup/server/soup-server.c +index 2d3dec9..5b4f89b 100644 +--- a/libsoup/server/soup-server.c ++++ b/libsoup/server/soup-server.c +@@ -186,6 +186,16 @@ static GParamSpec *properties[LAST_PROPERTY] = { NULL, }; + + G_DEFINE_TYPE_WITH_PRIVATE (SoupServer, soup_server, G_TYPE_OBJECT) + ++/* SoupWebsocketConnection by default limits only maximum packet size. But a ++ * message may consist of multiple packets, so SoupServer additionally restricts ++ * total message size to mitigate denial of service attacks on the server. ++ * SoupWebsocketConnection does not do this by default because I don't know ++ * whether that would or would not cause compatibility problems for websites. ++ * ++ * This size is in bytes and it is arbitrary. ++ */ ++#define MAX_TOTAL_MESSAGE_SIZE_DEFAULT 128 * 1024 ++ + static void request_finished (SoupServerMessage *msg, + SoupMessageIOCompletion completion, + SoupServer *server); +@@ -945,11 +955,15 @@ complete_websocket_upgrade (SoupServer *server, + + g_object_ref (msg); + stream = soup_server_message_steal_connection (msg); +- conn = soup_websocket_connection_new (stream, uri, +- SOUP_WEBSOCKET_CONNECTION_SERVER, +- soup_message_headers_get_one_common (soup_server_message_get_request_headers (msg), SOUP_HEADER_ORIGIN), +- soup_message_headers_get_one_common (soup_server_message_get_response_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_PROTOCOL), +- handler->websocket_extensions); ++ conn = SOUP_WEBSOCKET_CONNECTION (g_object_new (SOUP_TYPE_WEBSOCKET_CONNECTION, ++ "io-stream", stream, ++ "uri", uri, ++ "connection-type", SOUP_WEBSOCKET_CONNECTION_SERVER, ++ "origin", soup_message_headers_get_one_common (soup_server_message_get_request_headers (msg), SOUP_HEADER_ORIGIN), ++ "protocol", soup_message_headers_get_one_common (soup_server_message_get_response_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_PROTOCOL), ++ "extensions", handler->websocket_extensions, ++ "max-total-message-size", (guint64)MAX_TOTAL_MESSAGE_SIZE_DEFAULT, ++ NULL)); + handler->websocket_extensions = NULL; + g_object_unref (stream); + +diff --git a/libsoup/websocket/soup-websocket-connection.c b/libsoup/websocket/soup-websocket-connection.c +index 51971db..5896f38 100644 +--- a/libsoup/websocket/soup-websocket-connection.c ++++ b/libsoup/websocket/soup-websocket-connection.c +@@ -150,7 +150,6 @@ typedef struct { + } SoupWebsocketConnectionPrivate; + + #define MAX_INCOMING_PAYLOAD_SIZE_DEFAULT 128 * 1024 +-#define MAX_TOTAL_MESSAGE_SIZE_DEFAULT 128 * 1024 + #define READ_BUFFER_SIZE 1024 + #define MASK_LENGTH 4 + +@@ -1628,9 +1627,10 @@ soup_websocket_connection_class_init (SoupWebsocketConnectionClass *klass) + /** + * SoupWebsocketConnection:max-incoming-payload-size: + * +- * The maximum payload size for incoming packets. ++ * The maximum payload size for incoming packets, or 0 to not limit it. + * +- * The protocol expects or 0 to not limit it. ++ * Each message may consist of multiple packets, so also refer to ++ * [property@WebSocketConnection:max-total-message-size]. + */ + properties[PROP_MAX_INCOMING_PAYLOAD_SIZE] = + g_param_spec_uint64 ("max-incoming-payload-size", +@@ -1678,9 +1678,19 @@ soup_websocket_connection_class_init (SoupWebsocketConnectionClass *klass) + /** + * SoupWebsocketConnection:max-total-message-size: + * +- * The total message size for incoming packets. ++ * The maximum size for incoming messages. + * +- * The protocol expects or 0 to not limit it. ++ * Set to a value to limit the total message size, or 0 to not ++ * limit it. ++ * ++ * [method@Server.add_websocket_handler] will set this to a nonzero ++ * default value to mitigate denial of service attacks. Clients must ++ * choose their own default if they need to mitigate denial of service ++ * attacks. You also need to set your own default if creating your own ++ * server SoupWebsocketConnection without using SoupServer. ++ * ++ * Each message may consist of multiple packets, so also refer to ++ * [property@WebSocketConnection:max-incoming-payload-size]. + * + * Since: 3.8 + */ +@@ -1690,7 +1700,7 @@ soup_websocket_connection_class_init (SoupWebsocketConnectionClass *klass) + "Max total message size ", + 0, + G_MAXUINT64, +- MAX_TOTAL_MESSAGE_SIZE_DEFAULT, ++ 0, + G_PARAM_READWRITE | + G_PARAM_CONSTRUCT | + G_PARAM_STATIC_STRINGS); +@@ -2180,7 +2190,7 @@ soup_websocket_connection_get_max_total_message_size (SoupWebsocketConnection *s + { + SoupWebsocketConnectionPrivate *priv = soup_websocket_connection_get_instance_private (self); + +- g_return_val_if_fail (SOUP_IS_WEBSOCKET_CONNECTION (self), MAX_TOTAL_MESSAGE_SIZE_DEFAULT); ++ g_return_val_if_fail (SOUP_IS_WEBSOCKET_CONNECTION (self), 0); + + return priv->max_total_message_size; + } +-- +2.45.4 + + +From 77f0d95bb7a3a3f553111f80378ae4d6ea297aad Mon Sep 17 00:00:00 2001 +From: Michael Catanzaro +Date: Fri, 16 May 2025 16:55:40 -0500 +Subject: [PATCH 4/4] Add tests for max-incoming-packet-size and + max-total-message-size + +An even better test would verify that it's possible to send big messages +containing small packets, but libsoup doesn't offer control over packet +size, and I don't want to take the time to learn how WebSockets work to +figure out how to do that manually. Instead, I just check that both +limits work, for both client and server. + +I didn't add deflate variants of these tests because I doubt that would +add valuable coverage. +-- +2.45.4 + diff --git a/SPECS/libsoup/CVE-2026-1536.patch b/SPECS/libsoup/CVE-2026-1536.patch new file mode 100644 index 00000000000..0658420a469 --- /dev/null +++ b/SPECS/libsoup/CVE-2026-1536.patch @@ -0,0 +1,383 @@ +From 5a3096f183c00ee699b6dfcd166d7b3b417275ab Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Wed, 4 Feb 2026 05:32:24 +0000 +Subject: [PATCH] Always validate the headers value when coming from untrusted + source + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport from existing Build 1042652 of https://gitlab.gnome.org/GNOME/libsoup/-/commit/5c1a2e9c06a834eb715f60265a877f5b882cc1b1.patch +--- + libsoup/auth/soup-auth-manager.c | 2 +- + libsoup/auth/soup-auth-ntlm.c | 2 +- + libsoup/cache/soup-cache.c | 4 +- + .../content-decoder/soup-content-decoder.c | 2 +- + .../http1/soup-server-message-io-http1.c | 2 +- + libsoup/server/soup-server.c | 2 +- + libsoup/soup-message-headers-private.h | 8 ++-- + libsoup/soup-message-headers.c | 45 ++++++++++++++----- + libsoup/soup-message.c | 8 ++-- + libsoup/soup-session.c | 4 +- + libsoup/websocket/soup-websocket.c | 18 ++++---- + tests/http2-test.c | 2 +- + 12 files changed, 62 insertions(+), 37 deletions(-) + +diff --git a/libsoup/auth/soup-auth-manager.c b/libsoup/auth/soup-auth-manager.c +index 402967d..1800190 100644 +--- a/libsoup/auth/soup-auth-manager.c ++++ b/libsoup/auth/soup-auth-manager.c +@@ -441,7 +441,7 @@ update_authorization_header (SoupMessage *msg, SoupAuth *auth, gboolean is_proxy + if (!token) + return; + +- soup_message_headers_replace_common (soup_message_get_request_headers (msg), authorization_header, token); ++ soup_message_headers_replace_common (soup_message_get_request_headers (msg), authorization_header, token, TRUE); + g_free (token); + } + +diff --git a/libsoup/auth/soup-auth-ntlm.c b/libsoup/auth/soup-auth-ntlm.c +index 7108a32..b4fc1d1 100644 +--- a/libsoup/auth/soup-auth-ntlm.c ++++ b/libsoup/auth/soup-auth-ntlm.c +@@ -328,7 +328,7 @@ soup_auth_ntlm_update_connection (SoupConnectionAuth *auth, SoupMessage *msg, + conn->state = SOUP_NTLM_FAILED; + if (soup_message_is_keepalive (msg)) { + soup_message_headers_append_common (soup_message_get_response_headers (msg), +- SOUP_HEADER_CONNECTION, "close"); ++ SOUP_HEADER_CONNECTION, "close", TRUE); + } + return TRUE; + } +diff --git a/libsoup/cache/soup-cache.c b/libsoup/cache/soup-cache.c +index bc733a6..8e86682 100644 +--- a/libsoup/cache/soup-cache.c ++++ b/libsoup/cache/soup-cache.c +@@ -1471,11 +1471,11 @@ soup_cache_generate_conditional_request (SoupCache *cache, SoupMessage *original + if (last_modified) + soup_message_headers_append_common (soup_message_get_request_headers (msg), + SOUP_HEADER_IF_MODIFIED_SINCE, +- last_modified); ++ last_modified, TRUE); + if (etag) + soup_message_headers_append_common (soup_message_get_request_headers (msg), + SOUP_HEADER_IF_NONE_MATCH, +- etag); ++ etag, TRUE); + + return msg; + } +diff --git a/libsoup/content-decoder/soup-content-decoder.c b/libsoup/content-decoder/soup-content-decoder.c +index f75ebce..2fdad66 100644 +--- a/libsoup/content-decoder/soup-content-decoder.c ++++ b/libsoup/content-decoder/soup-content-decoder.c +@@ -249,7 +249,7 @@ soup_content_decoder_request_queued (SoupSessionFeature *feature, + #endif + + soup_message_headers_append_common (soup_message_get_request_headers (msg), +- SOUP_HEADER_ACCEPT_ENCODING, header); ++ SOUP_HEADER_ACCEPT_ENCODING, header, TRUE); + } + } + +diff --git a/libsoup/server/http1/soup-server-message-io-http1.c b/libsoup/server/http1/soup-server-message-io-http1.c +index 3803cab..e8de2c2 100644 +--- a/libsoup/server/http1/soup-server-message-io-http1.c ++++ b/libsoup/server/http1/soup-server-message-io-http1.c +@@ -762,7 +762,7 @@ io_read (SoupServerMessageIOHTTP1 *server_io, + * closed when we're done. + */ + soup_server_message_set_status (msg, status, NULL); +- soup_message_headers_append_common (request_headers, SOUP_HEADER_CONNECTION, "close"); ++ soup_message_headers_append_common (request_headers, SOUP_HEADER_CONNECTION, "close", TRUE); + io->read_state = SOUP_MESSAGE_IO_STATE_FINISHING; + break; + } +diff --git a/libsoup/server/soup-server.c b/libsoup/server/soup-server.c +index 1ea81fc..2d3dec9 100644 +--- a/libsoup/server/soup-server.c ++++ b/libsoup/server/soup-server.c +@@ -850,7 +850,7 @@ got_headers (SoupServer *server, + + date = g_date_time_new_now_utc (); + date_string = soup_date_time_to_string (date, SOUP_DATE_HTTP); +- soup_message_headers_replace_common (headers, SOUP_HEADER_DATE, date_string); ++ soup_message_headers_replace_common (headers, SOUP_HEADER_DATE, date_string, TRUE); + g_free (date_string); + g_date_time_unref (date); + +diff --git a/libsoup/soup-message-headers-private.h b/libsoup/soup-message-headers-private.h +index 9815464..ae7932a 100644 +--- a/libsoup/soup-message-headers-private.h ++++ b/libsoup/soup-message-headers-private.h +@@ -13,9 +13,10 @@ G_BEGIN_DECLS + void soup_message_headers_append_untrusted_data (SoupMessageHeaders *hdrs, + const char *name, + const char *value); +-void soup_message_headers_append_common (SoupMessageHeaders *hdrs, ++gboolean soup_message_headers_append_common (SoupMessageHeaders *hdrs, + SoupHeaderName name, +- const char *value); ++ const char *value, ++ gboolean trusted_value); + const char *soup_message_headers_get_one_common (SoupMessageHeaders *hdrs, + SoupHeaderName name); + const char *soup_message_headers_get_list_common (SoupMessageHeaders *hdrs, +@@ -24,7 +25,8 @@ void soup_message_headers_remove_common (SoupMessageHeaders *hdr + SoupHeaderName name); + void soup_message_headers_replace_common (SoupMessageHeaders *hdrs, + SoupHeaderName name, +- const char *value); ++ const char *value, ++ gboolean trusted_value); + gboolean soup_message_headers_header_contains_common (SoupMessageHeaders *hdrs, + SoupHeaderName name, + const char *token); +diff --git a/libsoup/soup-message-headers.c b/libsoup/soup-message-headers.c +index c1b7454..e5877aa 100644 +--- a/libsoup/soup-message-headers.c ++++ b/libsoup/soup-message-headers.c +@@ -267,13 +267,34 @@ soup_message_headers_clean_connection_headers (SoupMessageHeaders *hdrs) + soup_header_free_list (tokens); + } + +-void ++static inline gboolean is_valid_header_name (const char *name) ++{ ++ return name && *name && strpbrk (name, " \r\n:") == NULL; ++} ++ ++static inline gboolean is_valid_header_value (const char *value) ++{ ++ return value && strpbrk (value, "\r\n") == NULL; ++} ++ ++gboolean + soup_message_headers_append_common (SoupMessageHeaders *hdrs, + SoupHeaderName name, +- const char *value) ++ const char *value, ++ gboolean trusted_value) + { + SoupCommonHeader header; + ++ if (name == SOUP_HEADER_HOST && soup_message_headers_get_one (hdrs, "Host")) { ++ /* Prevent duplicate Host header */ ++ return FALSE; ++ } ++ ++ if (!trusted_value && !is_valid_header_value (value)) { ++ g_warning ("soup_message_headers_append: Ignoring bad value '%s'", value); ++ return FALSE; ++ } ++ + if (!hdrs->common_headers) + hdrs->common_headers = g_array_sized_new (FALSE, FALSE, sizeof (SoupCommonHeader), 6); + +@@ -284,6 +305,7 @@ soup_message_headers_append_common (SoupMessageHeaders *hdrs, + g_hash_table_remove (hdrs->common_concat, GUINT_TO_POINTER (header.name)); + + soup_message_headers_set (hdrs, name, value); ++ return TRUE; + } + + /** +@@ -332,7 +354,7 @@ soup_message_headers_append (SoupMessageHeaders *hdrs, + + header_name = soup_header_name_from_string (name); + if (header_name != SOUP_HEADER_UNKNOWN) { +- soup_message_headers_append_common (hdrs, header_name, value); ++ soup_message_headers_append_common (hdrs, header_name, value, FALSE); + return; + } + +@@ -364,10 +386,11 @@ soup_message_headers_append_untrusted_data (SoupMessageHeaders *hdrs, + void + soup_message_headers_replace_common (SoupMessageHeaders *hdrs, + SoupHeaderName name, +- const char *value) ++ const char *value, ++ gboolean trusted_value) + { + soup_message_headers_remove_common (hdrs, name); +- soup_message_headers_append_common (hdrs, name, value); ++ soup_message_headers_append_common (hdrs, name, value, trusted_value); + } + + /** +@@ -980,7 +1003,7 @@ soup_message_headers_set_encoding (SoupMessageHeaders *hdrs, + + case SOUP_ENCODING_CHUNKED: + soup_message_headers_remove_common (hdrs, SOUP_HEADER_CONTENT_LENGTH); +- soup_message_headers_replace_common (hdrs, SOUP_HEADER_TRANSFER_ENCODING, "chunked"); ++ soup_message_headers_replace_common (hdrs, SOUP_HEADER_TRANSFER_ENCODING, "chunked", TRUE); + break; + + default: +@@ -1039,7 +1062,7 @@ soup_message_headers_set_content_length (SoupMessageHeaders *hdrs, + g_snprintf (length, sizeof (length), "%" G_GUINT64_FORMAT, + content_length); + soup_message_headers_remove_common (hdrs, SOUP_HEADER_TRANSFER_ENCODING); +- soup_message_headers_replace_common (hdrs, SOUP_HEADER_CONTENT_LENGTH, length); ++ soup_message_headers_replace_common (hdrs, SOUP_HEADER_CONTENT_LENGTH, length, TRUE); + } + + /** +@@ -1090,7 +1113,7 @@ soup_message_headers_set_expectations (SoupMessageHeaders *hdrs, + g_return_if_fail ((expectations & ~SOUP_EXPECTATION_CONTINUE) == 0); + + if (expectations & SOUP_EXPECTATION_CONTINUE) +- soup_message_headers_replace_common (hdrs, SOUP_HEADER_EXPECT, "100-continue"); ++ soup_message_headers_replace_common (hdrs, SOUP_HEADER_EXPECT, "100-continue", TRUE); + else + soup_message_headers_remove_common (hdrs, SOUP_HEADER_EXPECT); + } +@@ -1324,7 +1347,7 @@ soup_message_headers_set_ranges (SoupMessageHeaders *hdrs, + } + } + +- soup_message_headers_replace_common (hdrs, SOUP_HEADER_RANGE, header->str); ++ soup_message_headers_replace_common (hdrs, SOUP_HEADER_RANGE, header->str, TRUE); + g_string_free (header, TRUE); + } + +@@ -1437,7 +1460,7 @@ soup_message_headers_set_content_range (SoupMessageHeaders *hdrs, + header = g_strdup_printf ("bytes %" G_GINT64_FORMAT "-%" + G_GINT64_FORMAT "/*", start, end); + } +- soup_message_headers_replace_common (hdrs, SOUP_HEADER_CONTENT_RANGE, header); ++ soup_message_headers_replace_common (hdrs, SOUP_HEADER_CONTENT_RANGE, header, TRUE); + g_free (header); + } + +@@ -1512,7 +1535,7 @@ set_content_foo (SoupMessageHeaders *hdrs, + } + } + +- soup_message_headers_replace_common (hdrs, header_name, str->str); ++ soup_message_headers_replace_common (hdrs, header_name, str->str, FALSE); + g_string_free (str, TRUE); + } + +diff --git a/libsoup/soup-message.c b/libsoup/soup-message.c +index ebe9e9f..fbd7af0 100644 +--- a/libsoup/soup-message.c ++++ b/libsoup/soup-message.c +@@ -1157,7 +1157,7 @@ soup_message_set_request_body (SoupMessage *msg, + g_warn_if_fail (strchr (content_type, '/') != NULL); + + if (soup_message_headers_get_content_type (priv->request_headers, NULL) != content_type) +- soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_CONTENT_TYPE, content_type); ++ soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_CONTENT_TYPE, content_type, FALSE); + } + + if (content_length == -1) +@@ -3234,12 +3234,12 @@ soup_message_set_request_host_from_uri (SoupMessage *msg, + + host = soup_uri_get_host_for_headers (uri); + if (soup_uri_uses_default_port (uri)) +- soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_HOST, host); ++ soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_HOST, host, FALSE); + else { + char *value; + + value = g_strdup_printf ("%s:%d", host, g_uri_get_port (uri)); +- soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_HOST, value); ++ soup_message_headers_replace_common (priv->request_headers, SOUP_HEADER_HOST, value, FALSE); + g_free (value); + } + g_free (host); +@@ -3279,7 +3279,7 @@ soup_message_force_keep_alive_if_needed (SoupMessage *msg) + if (!soup_message_headers_header_contains_common (priv->request_headers, SOUP_HEADER_CONNECTION, "Keep-Alive") && + !soup_message_headers_header_contains_common (priv->request_headers, SOUP_HEADER_CONNECTION, "close") && + !soup_message_headers_header_contains_common (priv->request_headers, SOUP_HEADER_CONNECTION, "Upgrade")) { +- soup_message_headers_append_common (priv->request_headers, SOUP_HEADER_CONNECTION, "Keep-Alive"); ++ soup_message_headers_append_common (priv->request_headers, SOUP_HEADER_CONNECTION, "Keep-Alive", TRUE); + } + } + +diff --git a/libsoup/soup-session.c b/libsoup/soup-session.c +index 649902f..4bca2a4 100644 +--- a/libsoup/soup-session.c ++++ b/libsoup/soup-session.c +@@ -1386,10 +1386,10 @@ soup_session_send_queue_item (SoupSession *session, + + request_headers = soup_message_get_request_headers (item->msg); + if (priv->user_agent) +- soup_message_headers_replace_common (request_headers, SOUP_HEADER_USER_AGENT, priv->user_agent); ++ soup_message_headers_replace_common (request_headers, SOUP_HEADER_USER_AGENT, priv->user_agent, FALSE); + + if (priv->accept_language && !soup_message_headers_get_list_common (request_headers, SOUP_HEADER_ACCEPT_LANGUAGE)) +- soup_message_headers_append_common (request_headers, SOUP_HEADER_ACCEPT_LANGUAGE, priv->accept_language); ++ soup_message_headers_append_common (request_headers, SOUP_HEADER_ACCEPT_LANGUAGE, priv->accept_language, FALSE); + + conn = soup_message_get_connection (item->msg); + soup_message_set_http_version (item->msg, soup_connection_get_negotiated_protocol (conn)); +diff --git a/libsoup/websocket/soup-websocket.c b/libsoup/websocket/soup-websocket.c +index 64e66fd..a9fd0fc 100644 +--- a/libsoup/websocket/soup-websocket.c ++++ b/libsoup/websocket/soup-websocket.c +@@ -257,21 +257,21 @@ soup_websocket_client_prepare_handshake (SoupMessage *msg, + + g_return_if_fail (SOUP_IS_MESSAGE (msg)); + +- soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_UPGRADE, "websocket"); +- soup_message_headers_append_common (soup_message_get_request_headers (msg), SOUP_HEADER_CONNECTION, "Upgrade"); ++ soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_UPGRADE, "websocket", TRUE); ++ soup_message_headers_append_common (soup_message_get_request_headers (msg), SOUP_HEADER_CONNECTION, "Upgrade", TRUE); + + raw[0] = g_random_int (); + raw[1] = g_random_int (); + raw[2] = g_random_int (); + raw[3] = g_random_int (); + key = g_base64_encode ((const guchar *)raw, sizeof (raw)); +- soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_KEY, key); ++ soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_KEY, key, TRUE); + g_free (key); + +- soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_VERSION, "13"); ++ soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_SEC_WEBSOCKET_VERSION, "13", TRUE); + + if (origin) +- soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_ORIGIN, origin); ++ soup_message_headers_replace_common (soup_message_get_request_headers (msg), SOUP_HEADER_ORIGIN, origin, FALSE); + + if (protocols && *protocols) { + char *protocols_str; +@@ -716,18 +716,18 @@ soup_websocket_server_process_handshake (SoupServerMessage *msg, + + soup_server_message_set_status (msg, SOUP_STATUS_SWITCHING_PROTOCOLS, NULL); + response_headers = soup_server_message_get_response_headers (msg); +- soup_message_headers_replace_common (response_headers, SOUP_HEADER_UPGRADE, "websocket"); +- soup_message_headers_append_common (response_headers, SOUP_HEADER_CONNECTION, "Upgrade"); ++ soup_message_headers_replace_common (response_headers, SOUP_HEADER_UPGRADE, "websocket", TRUE); ++ soup_message_headers_append_common (response_headers, SOUP_HEADER_CONNECTION, "Upgrade", TRUE); + + request_headers = soup_server_message_get_request_headers (msg); + key = soup_message_headers_get_one_common (request_headers, SOUP_HEADER_SEC_WEBSOCKET_KEY); + accept_key = compute_accept_key (key); +- soup_message_headers_append_common (response_headers, SOUP_HEADER_SEC_WEBSOCKET_ACCEPT, accept_key); ++ soup_message_headers_append_common (response_headers, SOUP_HEADER_SEC_WEBSOCKET_ACCEPT, accept_key, TRUE); + g_free (accept_key); + + choose_subprotocol (msg, (const char **) protocols, &chosen_protocol); + if (chosen_protocol) +- soup_message_headers_append_common (response_headers, SOUP_HEADER_SEC_WEBSOCKET_PROTOCOL, chosen_protocol); ++ soup_message_headers_append_common (response_headers, SOUP_HEADER_SEC_WEBSOCKET_PROTOCOL, chosen_protocol, TRUE); + + extensions = soup_message_headers_get_list_common (request_headers, SOUP_HEADER_SEC_WEBSOCKET_EXTENSIONS); + if (extensions && *extensions) { +diff --git a/tests/http2-test.c b/tests/http2-test.c +index 92944d6..9693c31 100644 +--- a/tests/http2-test.c ++++ b/tests/http2-test.c +@@ -1351,7 +1351,7 @@ server_handler (SoupServer *server, + SoupMessageHeaders *response_headers; + + response_headers = soup_server_message_get_response_headers (msg); +- /* Use soup_message_headers_append_common to skip the validation check. */ ++ /* Use soup_message_headers_append_common with trusted_value=TRUE to skip the validation check. */ + soup_message_headers_append_common (response_headers, SOUP_HEADER_CONTENT_TYPE, "\r"); + soup_server_message_set_status (msg, SOUP_STATUS_OK, NULL); + } else if (strcmp (path, "/invalid-header-rfc9113") == 0) { +-- +2.45.4 + diff --git a/SPECS/libsoup/libsoup.spec b/SPECS/libsoup/libsoup.spec index 505d33c13df..3a1b5f24196 100644 --- a/SPECS/libsoup/libsoup.spec +++ b/SPECS/libsoup/libsoup.spec @@ -4,7 +4,7 @@ Summary: libsoup HTTP client/server library Name: libsoup Version: 3.4.4 -Release: 10%{?dist} +Release: 12%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -68,6 +68,9 @@ Patch18: CVE-2025-32907.patch Patch19: CVE-2025-4948.patch Patch20: CVE-2025-4969.patch Patch21: CVE-2025-11021.patch +Patch22: CVE-2025-12105.patch +Patch23: CVE-2025-32049.patch +Patch24: CVE-2026-1536.patch %description libsoup is HTTP client/server library for GNOME @@ -135,6 +138,12 @@ find %{buildroot} -type f -name "*.la" -delete -print %defattr(-,root,root) %changelog +* Wed Feb 04 2026 Azure Linux Security Servicing Account - 3.4.4-12 +- Patch for CVE-2026-1536, CVE-2025-32049 + +* Tue Dec 23 2025 Azure Linux Security Servicing Account - 3.4.4-11 +- Patch for CVE-2025-12105 + * Wed Oct 29 2025 Azure Linux Security Servicing Account - 3.4.4-10 - Patch for CVE-2025-11021 diff --git a/SPECS/libtasn1/CVE-2025-13151.patch b/SPECS/libtasn1/CVE-2025-13151.patch new file mode 100644 index 00000000000..53b69269634 --- /dev/null +++ b/SPECS/libtasn1/CVE-2025-13151.patch @@ -0,0 +1,41 @@ +From 19c67d35287ca30929e0f4353cdc1b89de1ae75e Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Mon, 12 Jan 2026 16:41:34 +0000 +Subject: [PATCH] Fix for CVE-2025-13151: prevent stack-based buffer overflow + in asn1_expand_octet_string; update NEWS + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://gitlab.com/gnutls/libtasn1/-/commit/d276cc495a2a32b182c3c39851f1ba58f2d9f9b8.patch +--- + NEWS | 1 + + lib/decoding.c | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/NEWS b/NEWS +index cbd09eb..2100e6f 100644 +--- a/NEWS ++++ b/NEWS +@@ -4,6 +4,7 @@ GNU Libtasn1 NEWS -*- outline -*- + - Clarify libtasn1.map license. Closes: #38. + - Fix ETYPE_OK out of bounds read. Closes: #32. + - Update gnulib files and various maintenance fixes. ++- Fix for vulnerbaility CVE-2025-13151 Stack-based buffer overflow + + * Noteworthy changes in release 4.18.0 (2021-11-09) [stable] + - Improve GTK-DOC manual. Closes: #35. +diff --git a/lib/decoding.c b/lib/decoding.c +index b9245c4..bc45138 100644 +--- a/lib/decoding.c ++++ b/lib/decoding.c +@@ -1976,7 +1976,7 @@ int + asn1_expand_octet_string (asn1_node_const definitions, asn1_node * element, + const char *octetName, const char *objectName) + { +- char name[2 * ASN1_MAX_NAME_SIZE + 1], value[ASN1_MAX_NAME_SIZE]; ++ char name[2 * ASN1_MAX_NAME_SIZE + 2], value[ASN1_MAX_NAME_SIZE]; + int retCode = ASN1_SUCCESS, result; + int len, len2, len3; + asn1_node_const p2; +-- +2.45.4 + diff --git a/SPECS/libtasn1/libtasn1.spec b/SPECS/libtasn1/libtasn1.spec index 9f924d84c2e..f73a3e1afae 100644 --- a/SPECS/libtasn1/libtasn1.spec +++ b/SPECS/libtasn1/libtasn1.spec @@ -1,7 +1,7 @@ Summary: ASN.1 library Name: libtasn1 Version: 4.19.0 -Release: 2%{?dist} +Release: 3%{?dist} License: GPLv3+ AND LGPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -9,6 +9,7 @@ Group: System Environment/Libraries URL: https://www.gnu.org/software/libtasn1/ Source0: https://ftp.gnu.org/gnu/libtasn1/%{name}-%{version}.tar.gz Patch0: CVE-2024-12133.patch +Patch1: CVE-2025-13151.patch Provides: libtasn1-tools = %{version}-%{release} %description @@ -58,7 +59,10 @@ make %{?_smp_mflags} check %{_mandir}/man3/* %changelog -* Fri Feb 21 2024 Ankita Pareek - 4.19.0-2 +* Mon Jan 12 2026 Azure Linux Security Servicing Account - 4.19.0-3 +- Patch for CVE-2025-13151 + +* Fri Feb 21 2025 Ankita Pareek - 4.19.0-2 - Address CVE-2024-12133 * Tue Oct 25 2022 Pawel Winogrodzki - 4.19.0-1 diff --git a/SPECS/libvirt/CVE-2025-12748.patch b/SPECS/libvirt/CVE-2025-12748.patch new file mode 100644 index 00000000000..16cbc3875fa --- /dev/null +++ b/SPECS/libvirt/CVE-2025-12748.patch @@ -0,0 +1,1327 @@ +From 42813e33416a8b4c052829bea5e8a938a7ba9eca Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 + +Subject: [PATCH] Pre-requisite for CVE-2025-12748 + +Upstream Patch reference: +1. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=42813e33416a8b4c052829bea5e8a938a7ba9eca +2. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=d7d1c76f23d767698ce0838ec4eddf1b214c4bbf +3. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=1991519ee70ae620312e808c5bfd1bc296208431 +--- + src/qemu/qemu_driver.c | 46 ++++--- + src/qemu/qemu_saveimage.c | 268 +++++++++++++++++++++++--------------- + src/qemu/qemu_saveimage.h | 21 ++- + src/qemu/qemu_snapshot.c | 9 +- + 4 files changed, 207 insertions(+), 137 deletions(-) + +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 448e6b1..c4b0128 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -5757,9 +5757,12 @@ qemuDomainRestoreInternal(virConnectPtr conn, + if (flags & VIR_DOMAIN_SAVE_RESET_NVRAM) + reset_nvram = true; + +- fd = qemuSaveImageOpen(driver, NULL, path, &def, &data, ++ if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) ++ goto cleanup; ++ ++ fd = qemuSaveImageOpen(driver, path, + (flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) != 0, +- &wrapperFd, false, false); ++ &wrapperFd, false); + if (fd < 0) + goto cleanup; + +@@ -5888,15 +5891,11 @@ qemuDomainSaveImageGetXMLDesc(virConnectPtr conn, const char *path, + virQEMUDriver *driver = conn->privateData; + char *ret = NULL; + g_autoptr(virDomainDef) def = NULL; +- int fd = -1; + virQEMUSaveData *data = NULL; + + virCheckFlags(VIR_DOMAIN_SAVE_IMAGE_XML_SECURE, NULL); + +- fd = qemuSaveImageOpen(driver, NULL, path, &def, &data, +- false, NULL, false, false); +- +- if (fd < 0) ++ if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) + goto cleanup; + + if (virDomainSaveImageGetXMLDescEnsureACL(conn, def) < 0) +@@ -5906,7 +5905,6 @@ qemuDomainSaveImageGetXMLDesc(virConnectPtr conn, const char *path, + + cleanup: + virQEMUSaveDataFree(data); +- VIR_FORCE_CLOSE(fd); + return ret; + } + +@@ -5930,9 +5928,10 @@ qemuDomainSaveImageDefineXML(virConnectPtr conn, const char *path, + else if (flags & VIR_DOMAIN_SAVE_PAUSED) + state = 0; + +- fd = qemuSaveImageOpen(driver, NULL, path, &def, &data, +- false, NULL, true, false); ++ if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) ++ goto cleanup; + ++ fd = qemuSaveImageOpen(driver, path, 0, NULL, false); + if (fd < 0) + goto cleanup; + +@@ -5989,7 +5988,6 @@ qemuDomainManagedSaveGetXMLDesc(virDomainPtr dom, unsigned int flags) + g_autofree char *path = NULL; + char *ret = NULL; + g_autoptr(virDomainDef) def = NULL; +- int fd = -1; + virQEMUSaveData *data = NULL; + qemuDomainObjPrivate *priv; + +@@ -6011,15 +6009,13 @@ qemuDomainManagedSaveGetXMLDesc(virDomainPtr dom, unsigned int flags) + goto cleanup; + } + +- if ((fd = qemuSaveImageOpen(driver, priv->qemuCaps, path, &def, &data, +- false, NULL, false, false)) < 0) ++ if (qemuSaveImageGetMetadata(driver, priv->qemuCaps, path, &def, &data) < 0) + goto cleanup; + + ret = qemuDomainDefFormatXML(driver, priv->qemuCaps, def, flags); + + cleanup: + virQEMUSaveDataFree(data); +- VIR_FORCE_CLOSE(fd); + virDomainObjEndAPI(&vm); + return ret; + } +@@ -6075,14 +6071,26 @@ qemuDomainObjRestore(virConnectPtr conn, + virQEMUSaveData *data = NULL; + virFileWrapperFd *wrapperFd = NULL; + +- fd = qemuSaveImageOpen(driver, NULL, path, &def, &data, +- bypass_cache, &wrapperFd, false, true); +- if (fd < 0) { +- if (fd == -3) +- ret = 1; ++ ret = qemuSaveImageGetMetadata(driver, NULL, path, &def, &data); ++ if (ret < 0) { ++ if (qemuSaveImageIsCorrupt(driver, path)) { ++ if (unlink(path) < 0) { ++ virReportSystemError(errno, ++ _("cannot remove corrupt file: %1$s"), ++ path); ++ ret = -1; ++ } else { ++ virResetLastError(); ++ ret = 1; ++ } ++ } + goto cleanup; + } + ++ fd = qemuSaveImageOpen(driver, path, bypass_cache, &wrapperFd, false); ++ if (fd < 0) ++ goto cleanup; ++ + if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) { + int hookret; + +diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c +index 89112e3..db442a5 100644 +--- a/src/qemu/qemu_saveimage.c ++++ b/src/qemu/qemu_saveimage.c +@@ -247,6 +247,90 @@ qemuSaveImageGetCompressionCommand(virQEMUSaveFormat compression) + } + + ++static int ++qemuSaveImageReadHeader(int fd, virQEMUSaveData **ret_data) ++{ ++ g_autoptr(virQEMUSaveData) data = NULL; ++ virQEMUSaveHeader *header; ++ size_t xml_len; ++ size_t cookie_len; ++ ++ data = g_new0(virQEMUSaveData, 1); ++ header = &data->header; ++ if (saferead(fd, header, sizeof(*header)) != sizeof(*header)) { ++ virReportError(VIR_ERR_OPERATION_FAILED, ++ "%s", _("failed to read qemu header")); ++ return -1; ++ } ++ ++ if (memcmp(header->magic, QEMU_SAVE_MAGIC, sizeof(header->magic)) != 0) { ++ if (memcmp(header->magic, QEMU_SAVE_PARTIAL, sizeof(header->magic)) == 0) { ++ virReportError(VIR_ERR_OPERATION_FAILED, "%s", ++ _("save image is incomplete")); ++ return -1; ++ } ++ ++ virReportError(VIR_ERR_OPERATION_FAILED, "%s", ++ _("image magic is incorrect")); ++ return -1; ++ } ++ ++ if (header->version > QEMU_SAVE_VERSION) { ++ /* convert endianness and try again */ ++ qemuSaveImageBswapHeader(header); ++ } ++ ++ if (header->version > QEMU_SAVE_VERSION) { ++ virReportError(VIR_ERR_OPERATION_FAILED, ++ _("image version is not supported (%1$d > %2$d)"), ++ header->version, QEMU_SAVE_VERSION); ++ return -1; ++ } ++ ++ if (header->compressed >= QEMU_SAVE_FORMAT_LAST) { ++ virReportError(VIR_ERR_OPERATION_FAILED, ++ _("unsupported save image format: %1$d"), header->compressed); ++ return -1; ++ } ++ ++ if (header->data_len <= 0) { ++ virReportError(VIR_ERR_OPERATION_FAILED, ++ _("invalid header data length: %1$d"), header->data_len); ++ return -1; ++ } ++ ++ if (header->cookieOffset) ++ xml_len = header->cookieOffset; ++ else ++ xml_len = header->data_len; ++ ++ cookie_len = header->data_len - xml_len; ++ ++ data->xml = g_new0(char, xml_len); ++ ++ if (saferead(fd, data->xml, xml_len) != xml_len) { ++ virReportError(VIR_ERR_OPERATION_FAILED, ++ "%s", _("failed to read domain XML")); ++ return -1; ++ } ++ ++ if (cookie_len > 0) { ++ data->cookie = g_new0(char, cookie_len); ++ ++ if (saferead(fd, data->cookie, cookie_len) != cookie_len) { ++ virReportError(VIR_ERR_OPERATION_FAILED, "%s", ++ _("failed to read cookie")); ++ return -1; ++ } ++ } ++ ++ if (ret_data) ++ *ret_data = g_steal_pointer(&data); ++ ++ return 0; ++} ++ ++ + /** + * qemuSaveImageDecompressionStart: + * @data: data from memory state file +@@ -520,41 +604,99 @@ qemuSaveImageGetCompressionProgram(const char *imageFormat, + + + /** +- * qemuSaveImageOpen: ++ * qemuSaveImageIsCorrupt: ++ * @driver: qemu driver data ++ * @path: path of the save image ++ * ++ * Returns true if the save image file identified by @path does not exist or ++ * has a corrupt header. Returns false otherwise. ++ */ ++ ++bool ++qemuSaveImageIsCorrupt(virQEMUDriver *driver, const char *path) ++{ ++ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); ++ VIR_AUTOCLOSE fd = -1; ++ virQEMUSaveHeader header; ++ ++ if ((fd = qemuDomainOpenFile(cfg, NULL, path, O_RDONLY, NULL)) < 0) ++ return true; ++ ++ if (saferead(fd, &header, sizeof(header)) != sizeof(header)) ++ return true; ++ ++ if (memcmp(header.magic, QEMU_SAVE_MAGIC, sizeof(header.magic)) != 0 || ++ memcmp(header.magic, QEMU_SAVE_PARTIAL, sizeof(header.magic)) == 0) ++ return true; ++ ++ return false; ++} ++ ++ ++/** ++ * qemuSaveImageGetMetadata: + * @driver: qemu driver data + * @qemuCaps: pointer to qemuCaps if the domain is running or NULL + * @path: path of the save image + * @ret_def: returns domain definition created from the XML stored in the image + * @ret_data: returns structure filled with data from the image header ++ * ++ * Open the save image file, read libvirt's save image metadata, and populate ++ * the @ret_def and @ret_data structures. Returns 0 on success and -1 on failure. ++ */ ++int ++qemuSaveImageGetMetadata(virQEMUDriver *driver, ++ virQEMUCaps *qemuCaps, ++ const char *path, ++ virDomainDef **ret_def, ++ virQEMUSaveData **ret_data) ++{ ++ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); ++ VIR_AUTOCLOSE fd = -1; ++ virQEMUSaveData *data; ++ g_autoptr(virDomainDef) def = NULL; ++ int rc; ++ ++ if ((fd = qemuDomainOpenFile(cfg, NULL, path, O_RDONLY, NULL)) < 0) ++ return -1; ++ ++ if ((rc = qemuSaveImageReadHeader(fd, ret_data)) < 0) ++ return rc; ++ ++ data = *ret_data; ++ /* Create a domain from this XML */ ++ if (!(def = virDomainDefParseString(data->xml, driver->xmlopt, qemuCaps, ++ VIR_DOMAIN_DEF_PARSE_INACTIVE | ++ VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE))) ++ return -1; ++ ++ *ret_def = g_steal_pointer(&def); ++ ++ return 0; ++} ++ ++ ++/** ++ * qemuSaveImageOpen: ++ * @driver: qemu driver data ++ * @path: path of the save image + * @bypass_cache: bypass cache when opening the file + * @wrapperFd: returns the file wrapper structure + * @open_write: open the file for writing (for updates) +- * @unlink_corrupt: remove the image file if it is corrupted + * +- * Returns the opened fd of the save image file and fills the appropriate fields +- * on success. On error returns -1 on most failures, -3 if corrupt image was +- * unlinked (no error raised). ++ * Returns the opened fd of the save image file on success, -1 on failure. + */ + int + qemuSaveImageOpen(virQEMUDriver *driver, +- virQEMUCaps *qemuCaps, + const char *path, +- virDomainDef **ret_def, +- virQEMUSaveData **ret_data, + bool bypass_cache, + virFileWrapperFd **wrapperFd, +- bool open_write, +- bool unlink_corrupt) ++ bool open_write) + { + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + VIR_AUTOCLOSE fd = -1; + int ret = -1; +- g_autoptr(virQEMUSaveData) data = NULL; +- virQEMUSaveHeader *header; +- g_autoptr(virDomainDef) def = NULL; + int oflags = open_write ? O_RDWR : O_RDONLY; +- size_t xml_len; +- size_t cookie_len; + + if (bypass_cache) { + int directFlag = virFileDirectFdFlag(); +@@ -574,100 +716,10 @@ qemuSaveImageOpen(virQEMUDriver *driver, + VIR_FILE_WRAPPER_BYPASS_CACHE))) + return -1; + +- data = g_new0(virQEMUSaveData, 1); +- +- header = &data->header; +- if (saferead(fd, header, sizeof(*header)) != sizeof(*header)) { +- if (unlink_corrupt) { +- if (unlink(path) < 0) { +- virReportSystemError(errno, +- _("cannot remove corrupt file: %1$s"), +- path); +- return -1; +- } else { +- return -3; +- } +- } +- +- virReportError(VIR_ERR_OPERATION_FAILED, +- "%s", _("failed to read qemu header")); ++ /* Read the header to position the file pointer for QEMU. Unfortunately we ++ * can't use lseek with virFileWrapperFD. */ ++ if (qemuSaveImageReadHeader(fd, NULL) < 0) + return -1; +- } +- +- if (memcmp(header->magic, QEMU_SAVE_MAGIC, sizeof(header->magic)) != 0) { +- if (memcmp(header->magic, QEMU_SAVE_PARTIAL, sizeof(header->magic)) == 0) { +- if (unlink_corrupt) { +- if (unlink(path) < 0) { +- virReportSystemError(errno, +- _("cannot remove corrupt file: %1$s"), +- path); +- return -1; +- } else { +- return -3; +- } +- } +- +- virReportError(VIR_ERR_OPERATION_FAILED, "%s", +- _("save image is incomplete")); +- return -1; +- } +- +- virReportError(VIR_ERR_OPERATION_FAILED, "%s", +- _("image magic is incorrect")); +- return -1; +- } +- +- if (header->version > QEMU_SAVE_VERSION) { +- /* convert endianness and try again */ +- qemuSaveImageBswapHeader(header); +- } +- +- if (header->version > QEMU_SAVE_VERSION) { +- virReportError(VIR_ERR_OPERATION_FAILED, +- _("image version is not supported (%1$d > %2$d)"), +- header->version, QEMU_SAVE_VERSION); +- return -1; +- } +- +- if (header->data_len <= 0) { +- virReportError(VIR_ERR_OPERATION_FAILED, +- _("invalid header data length: %1$d"), header->data_len); +- return -1; +- } +- +- if (header->cookieOffset) +- xml_len = header->cookieOffset; +- else +- xml_len = header->data_len; +- +- cookie_len = header->data_len - xml_len; +- +- data->xml = g_new0(char, xml_len); +- +- if (saferead(fd, data->xml, xml_len) != xml_len) { +- virReportError(VIR_ERR_OPERATION_FAILED, +- "%s", _("failed to read domain XML")); +- return -1; +- } +- +- if (cookie_len > 0) { +- data->cookie = g_new0(char, cookie_len); +- +- if (saferead(fd, data->cookie, cookie_len) != cookie_len) { +- virReportError(VIR_ERR_OPERATION_FAILED, "%s", +- _("failed to read cookie")); +- return -1; +- } +- } +- +- /* Create a domain from this XML */ +- if (!(def = virDomainDefParseString(data->xml, driver->xmlopt, qemuCaps, +- VIR_DOMAIN_DEF_PARSE_INACTIVE | +- VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE))) +- return -1; +- +- *ret_def = g_steal_pointer(&def); +- *ret_data = g_steal_pointer(&data); + + ret = fd; + fd = -1; +diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h +index e541792..8859fab 100644 +--- a/src/qemu/qemu_saveimage.h ++++ b/src/qemu/qemu_saveimage.h +@@ -69,17 +69,26 @@ qemuSaveImageStartVM(virConnectPtr conn, + virDomainAsyncJob asyncJob) + ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6); + ++bool ++qemuSaveImageIsCorrupt(virQEMUDriver *driver, ++ const char *path) ++ ATTRIBUTE_NONNULL(2); ++ ++int ++qemuSaveImageGetMetadata(virQEMUDriver *driver, ++ virQEMUCaps *qemuCaps, ++ const char *path, ++ virDomainDef **ret_def, ++ virQEMUSaveData **ret_data) ++ ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5); ++ + int + qemuSaveImageOpen(virQEMUDriver *driver, +- virQEMUCaps *qemuCaps, + const char *path, +- virDomainDef **ret_def, +- virQEMUSaveData **ret_data, + bool bypass_cache, + virFileWrapperFd **wrapperFd, +- bool open_write, +- bool unlink_corrupt) +- ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4); ++ bool open_write) ++ ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(4); + + int + qemuSaveImageGetCompressionProgram(const char *imageFormat, +diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c +index 9819448..a1f1f32 100644 +--- a/src/qemu/qemu_snapshot.c ++++ b/src/qemu/qemu_snapshot.c +@@ -2083,11 +2083,12 @@ qemuSnapshotRevertExternalPrepare(virDomainObj *vm, + g_autoptr(virDomainDef) savedef = NULL; + + memdata->path = snapdef->memorysnapshotfile; +- memdata->fd = qemuSaveImageOpen(driver, NULL, memdata->path, +- &savedef, &memdata->data, +- false, NULL, +- false, false); ++ if (qemuSaveImageGetMetadata(driver, NULL, memdata->path, &savedef, ++ &memdata->data) < 0) ++ return -1; + ++ memdata->fd = qemuSaveImageOpen(driver, memdata->path, ++ false, NULL, false); + if (memdata->fd < 0) + return -1; + +-- +2.43.0 + +From 4ceddd072ab794135b59364cb95386ef70b51b92 Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 1/8] conf: Add virDomainDefIDsParseString + +Upstream Patch reference: +1. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=4ceddd072ab794135b59364cb95386ef70b51b92 +2. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=97a53db9756cbc880ed3f4dc054e9a15690a2176 +3. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=1b8d1bd4b10e8eaa9a247487d709440ebba014ed +4. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=7a62099efb32ca270e1808fead3263569b81eeea +5. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=b4b37eb7ae3296c8b8520b5ad5e4cbce4dbcb8ca +6. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=8928cf500e1a94b6f78bc5b1840fa300f18406dd +7. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=e8e0d3ce28a0856c14d1fe3d2b5977f164938b9a +8. https://git.launchpad.net/ubuntu/+source/libvirt/patch/?id=6ab6e8705128808aa90b9281bcf046a8cf61dbba + +--- + src/conf/domain_conf.c | 29 +++++++++++++++++++++++++++++ + src/conf/domain_conf.h | 3 +++ + src/libvirt_private.syms | 1 + + 3 files changed, 33 insertions(+) + +diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c +index 5d55d2a..8726161 100644 +--- a/src/conf/domain_conf.c ++++ b/src/conf/domain_conf.c +@@ -19626,6 +19626,35 @@ virDomainDefParse(const char *xmlStr, + return virDomainDefParseNode(ctxt, xmlopt, parseOpaque, flags); + } + ++virDomainDef * ++virDomainDefIDsParseString(const char *xmlStr, ++ virDomainXMLOption *xmlopt, ++ unsigned int flags) ++{ ++ g_autoptr(virDomainDef) def = NULL; ++ g_autoptr(xmlDoc) xml = NULL; ++ g_autoptr(xmlXPathContext) ctxt = NULL; ++ bool uuid_generated = false; ++ ++ xml = virXMLParseWithIndent(NULL, xmlStr, _("(domain_definition)"), ++ "domain", &ctxt, "domain.rng", false); ++ ++ if (!xml) ++ return NULL; ++ ++ def = virDomainDefNew(xmlopt); ++ if (!def) ++ return NULL; ++ ++ if (virDomainDefParseIDs(def, ctxt, flags, &uuid_generated) < 0) ++ return NULL; ++ ++ if (uuid_generated) ++ memset(def->uuid, 0, VIR_UUID_BUFLEN); ++ ++ return g_steal_pointer(&def); ++} ++ + virDomainDef * + virDomainDefParseString(const char *xmlStr, + virDomainXMLOption *xmlopt, +diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h +index d176bda..4073012 100644 +--- a/src/conf/domain_conf.h ++++ b/src/conf/domain_conf.h +@@ -3754,6 +3754,9 @@ virDomainDiskDef *virDomainDiskDefParse(const char *xmlStr, + virStorageSource *virDomainDiskDefParseSource(const char *xmlStr, + virDomainXMLOption *xmlopt, + unsigned int flags); ++virDomainDef * virDomainDefIDsParseString(const char *xmlStr, ++ virDomainXMLOption *xmlopt, ++ unsigned int flags); + virDomainDef *virDomainDefParseString(const char *xmlStr, + virDomainXMLOption *xmlopt, + void *parseOpaque, +diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms +index fc26109..a1ec464 100644 +--- a/src/libvirt_private.syms ++++ b/src/libvirt_private.syms +@@ -345,6 +345,7 @@ virDomainDefHasSpiceGraphics; + virDomainDefHasUSB; + virDomainDefHasVcpusOffline; + virDomainDefHasVDPANet; ++virDomainDefIDsParseString; + virDomainDefLifecycleActionAllowed; + virDomainDefMaybeAddController; + virDomainDefMaybeAddInput; +-- +2.43.0 + +From 97a53db9756cbc880ed3f4dc054e9a15690a2176 Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 2/8] bhyve: Check ACLs before parsing the whole domain XML + +--- + src/bhyve/bhyve_driver.c | 24 ++++++++++++++++++------ + 1 file changed, 18 insertions(+), 6 deletions(-) + +diff --git a/src/bhyve/bhyve_driver.c b/src/bhyve/bhyve_driver.c +index 4203b13..c48bca3 100644 +--- a/src/bhyve/bhyve_driver.c ++++ b/src/bhyve/bhyve_driver.c +@@ -505,6 +505,15 @@ bhyveDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flag + if (!caps) + return NULL; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, provconn->xmlopt, parse_flags))) ++ return NULL; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) ++ return NULL; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ + if ((def = virDomainDefParseString(xml, privconn->xmlopt, + NULL, parse_flags)) == NULL) + goto cleanup; +@@ -512,9 +521,6 @@ bhyveDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flag + if (virXMLCheckIllegalChars("name", def->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +- goto cleanup; +- + if (bhyveDomainAssignAddresses(def, NULL) < 0) + goto cleanup; + +@@ -878,11 +884,17 @@ bhyveDomainCreateXML(virConnectPtr conn, + if (flags & VIR_DOMAIN_START_AUTODESTROY) + start_flags |= VIR_BHYVE_PROCESS_START_AUTODESTROY; + +- if ((def = virDomainDefParseString(xml, privconn->xmlopt, +- NULL, parse_flags)) == NULL) +- goto cleanup; ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, provconn->xmlopt, parse_flags))) ++ return NULL; + + if (virDomainCreateXMLEnsureACL(conn, def) < 0) ++ return NULL; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ ++ if ((def = virDomainDefParseString(xml, privconn->xmlopt, ++ NULL, parse_flags)) == NULL) + goto cleanup; + + if (bhyveDomainAssignAddresses(def, NULL) < 0) +-- +2.43.0 + +From 1b8d1bd4b10e8eaa9a247487d709440ebba014ed Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 3/8] libxl: Check ACLs before parsing the whole domain XML + +--- + src/libxl/libxl_driver.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c +index 6c843b9..19f5090 100644 +--- a/src/libxl/libxl_driver.c ++++ b/src/libxl/libxl_driver.c +@@ -1036,13 +1036,18 @@ libxlDomainCreateXML(virConnectPtr conn, const char *xml, + if (flags & VIR_DOMAIN_START_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + +- if (!(def = virDomainDefParseString(xml, driver->xmlopt, +- NULL, parse_flags))) ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) + goto cleanup; + + if (virDomainCreateXMLEnsureACL(conn, def) < 0) + goto cleanup; + ++ g_clear_pointer(&def, virDomainDefFree); ++ ++ if (!(def = virDomainDefParseString(xml, driver->xmlopt, ++ NULL, parse_flags))) ++ goto cleanup; ++ + if (!(vm = virDomainObjListAdd(driver->domains, &def, + driver->xmlopt, + VIR_DOMAIN_OBJ_LIST_ADD_LIVE | +@@ -2822,6 +2827,14 @@ libxlDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flag + if (flags & VIR_DOMAIN_DEFINE_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ goto cleanup; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) ++ goto cleanup; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ + if (!(def = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags))) + goto cleanup; +@@ -2829,9 +2842,6 @@ libxlDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flag + if (virXMLCheckIllegalChars("name", def->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +- goto cleanup; +- + if (!(vm = virDomainObjListAdd(driver->domains, &def, + driver->xmlopt, + 0, +-- +2.43.0 + +From 7a62099efb32ca270e1808fead3263569b81eeea Mon Sep 17 00:00:00 2001 +From: akhila-guruju +Date: Thu, 15 Jan 2026 09:00:31 +0000 +Subject: [PATCH 4/8] lxc: Check ACLs before parsing the whole domain XML + +--- + src/lxc/lxc_driver.c | 22 +++++++++++++++++----- + 1 file changed, 17 insertions(+), 5 deletions(-) + +diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c +index c281998..e338624 100644 +--- a/src/lxc/lxc_driver.c ++++ b/src/lxc/lxc_driver.c +@@ -408,6 +408,15 @@ lxcDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (!(caps = virLXCDriverGetCapabilities(driver, false))) + goto cleanup; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ goto cleanup; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) ++ goto cleanup; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ + if (!(def = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags))) + goto cleanup; +@@ -415,9 +424,6 @@ lxcDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (virXMLCheckIllegalChars("name", def->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +- goto cleanup; +- + if (virSecurityManagerVerify(driver->securityManager, def) < 0) + goto cleanup; + +@@ -1077,13 +1083,19 @@ lxcDomainCreateXMLWithFiles(virConnectPtr conn, + if (!(caps = virLXCDriverGetCapabilities(driver, false))) + goto cleanup; + +- if (!(def = virDomainDefParseString(xml, driver->xmlopt, +- NULL, parse_flags))) ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) + goto cleanup; + + if (virDomainCreateXMLWithFilesEnsureACL(conn, def) < 0) + goto cleanup; + ++ g_clear_pointer(&def, virDomainDefFree); ++ ++ if (!(def = virDomainDefParseString(xml, driver->xmlopt, ++ NULL, parse_flags))) ++ goto cleanup; ++ + if (virSecurityManagerVerify(driver->securityManager, def) < 0) + goto cleanup; + +-- +2.43.0 + +From b4b37eb7ae3296c8b8520b5ad5e4cbce4dbcb8ca Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 5/8] vz: Check ACLs before parsing the whole domain XML + +--- + src/vz/vz_driver.c | 18 ++++++++++++------ + 1 file changed, 12 insertions(+), 6 deletions(-) + +diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c +index c7ceec2..1f8cad4 100644 +--- a/src/vz/vz_driver.c ++++ b/src/vz/vz_driver.c +@@ -795,6 +795,15 @@ vzDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (flags & VIR_DOMAIN_DEFINE_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ return NULL; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) ++ return NULL; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ + if ((def = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags)) == NULL) + goto cleanup; +@@ -802,9 +811,6 @@ vzDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (virXMLCheckIllegalChars("name", def->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +- goto cleanup; +- + dom = virDomainObjListFindByUUID(driver->domains, def->uuid); + if (dom == NULL) { + virResetLastError(); +@@ -2972,9 +2978,9 @@ vzDomainMigratePrepare3Params(virConnectPtr conn, + | VZ_MIGRATION_COOKIE_DOMAIN_NAME) < 0) + return -1; + +- if (!(def = virDomainDefParseString(dom_xml, driver->xmlopt, +- NULL, +- VIR_DOMAIN_DEF_PARSE_INACTIVE))) ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(dom_xml, driver->xmlopt, ++ VIR_DOMAIN_DEF_PARSE_INACTIVE))) + return -1; + + if (dname) { +-- +2.43.0 + +From 8928cf500e1a94b6f78bc5b1840fa300f18406dd Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 6/8] ch: Check ACLs before parsing the whole domain XML + +--- + src/ch/ch_driver.c | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) + +diff --git a/src/ch/ch_driver.c b/src/ch/ch_driver.c +index 96de504..722b74c 100644 +--- a/src/ch/ch_driver.c ++++ b/src/ch/ch_driver.c +@@ -202,14 +202,19 @@ chDomainCreateXML(virConnectPtr conn, + if (flags & VIR_DOMAIN_START_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(vmdef = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ return NULL; ++ ++ if (virDomainCreateXMLEnsureACL(conn, vmdef) < 0) ++ return NULL; ++ ++ g_clear_pointer(&vmdef, virDomainDefFree); + + if ((vmdef = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags)) == NULL) + goto cleanup; + +- if (virDomainCreateXMLEnsureACL(conn, vmdef) < 0) +- goto cleanup; +- + if (!(vm = virDomainObjListAdd(driver->domains, + &vmdef, + driver->xmlopt, +@@ -284,6 +289,15 @@ chDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (flags & VIR_DOMAIN_START_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(vmdef = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ return NULL; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, vmdef) < 0) ++ return NULL; ++ ++ g_clear_pointer(&vmdef, virDomainDefFree); ++ + if ((vmdef = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags)) == NULL) + goto cleanup; +@@ -291,9 +305,6 @@ chDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags) + if (virXMLCheckIllegalChars("name", vmdef->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, vmdef) < 0) +- goto cleanup; +- + if (!(vm = virDomainObjListAdd(driver->domains, &vmdef, + driver->xmlopt, + 0, NULL))) +-- +2.43.0 + +From e8e0d3ce28a0856c14d1fe3d2b5977f164938b9a Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 7/8] qemu: Check ACLs before parsing the whole domain XML + +--- + src/qemu/qemu_driver.c | 90 ++++++++++++++++++++------------------- + src/qemu/qemu_migration.c | 24 ++++++++++- + src/qemu/qemu_migration.h | 4 +- + src/qemu/qemu_saveimage.c | 25 +++++++++-- + src/qemu/qemu_saveimage.h | 4 +- + src/qemu/qemu_snapshot.c | 4 +- + 6 files changed, 98 insertions(+), 53 deletions(-) + +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index c4b0128..ba83992 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -1604,11 +1604,17 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, + if (flags & VIR_DOMAIN_START_RESET_NVRAM) + start_flags |= VIR_QEMU_PROCESS_START_RESET_NVRAM; + +- if (!(def = virDomainDefParseString(xml, driver->xmlopt, +- NULL, parse_flags))) +- goto cleanup; ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ return NULL; + + if (virDomainCreateXMLEnsureACL(conn, def) < 0) ++ return NULL; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ ++ if (!(def = virDomainDefParseString(xml, driver->xmlopt, ++ NULL, parse_flags))) + goto cleanup; + + if (!(vm = virDomainObjListAdd(driver->domains, &def, +@@ -5757,7 +5763,7 @@ qemuDomainRestoreInternal(virConnectPtr conn, + if (flags & VIR_DOMAIN_SAVE_RESET_NVRAM) + reset_nvram = true; + +- if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) ++ if (qemuSaveImageGetMetadata(driver, NULL, path, ensureACL, conn, &def, &data) < 0) + goto cleanup; + + fd = qemuSaveImageOpen(driver, path, +@@ -5766,9 +5772,6 @@ qemuDomainRestoreInternal(virConnectPtr conn, + if (fd < 0) + goto cleanup; + +- if (ensureACL(conn, def) < 0) +- goto cleanup; +- + if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) { + int hookret; + +@@ -5895,10 +5898,9 @@ qemuDomainSaveImageGetXMLDesc(virConnectPtr conn, const char *path, + + virCheckFlags(VIR_DOMAIN_SAVE_IMAGE_XML_SECURE, NULL); + +- if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) +- goto cleanup; +- +- if (virDomainSaveImageGetXMLDescEnsureACL(conn, def) < 0) ++ if (qemuSaveImageGetMetadata(driver, NULL, path, ++ virDomainSaveImageGetXMLDescEnsureACL, ++ conn, &def, &data) < 0) + goto cleanup; + + ret = qemuDomainDefFormatXML(driver, NULL, def, flags); +@@ -5928,16 +5930,15 @@ qemuDomainSaveImageDefineXML(virConnectPtr conn, const char *path, + else if (flags & VIR_DOMAIN_SAVE_PAUSED) + state = 0; + +- if (qemuSaveImageGetMetadata(driver, NULL, path, &def, &data) < 0) ++ if (qemuSaveImageGetMetadata(driver, NULL, path, ++ virDomainSaveImageDefineXMLEnsureACL, ++ conn, &def, &data) < 0) + goto cleanup; + + fd = qemuSaveImageOpen(driver, path, 0, NULL, false); + if (fd < 0) + goto cleanup; + +- if (virDomainSaveImageDefineXMLEnsureACL(conn, def) < 0) +- goto cleanup; +- + if (STREQ(data->xml, dxml) && + (state < 0 || state == data->header.was_running)) { + /* no change to the XML */ +@@ -6009,7 +6010,8 @@ qemuDomainManagedSaveGetXMLDesc(virDomainPtr dom, unsigned int flags) + goto cleanup; + } + +- if (qemuSaveImageGetMetadata(driver, priv->qemuCaps, path, &def, &data) < 0) ++ if (qemuSaveImageGetMetadata(driver, priv->qemuCaps, path, ++ NULL, NULL, &def, &data) < 0) + goto cleanup; + + ret = qemuDomainDefFormatXML(driver, priv->qemuCaps, def, flags); +@@ -6071,7 +6073,7 @@ qemuDomainObjRestore(virConnectPtr conn, + virQEMUSaveData *data = NULL; + virFileWrapperFd *wrapperFd = NULL; + +- ret = qemuSaveImageGetMetadata(driver, NULL, path, &def, &data); ++ ret = qemuSaveImageGetMetadata(driver, NULL, path, NULL, NULL, &def, &data); + if (ret < 0) { + if (qemuSaveImageIsCorrupt(driver, path)) { + if (unlink(path) < 0) { +@@ -6419,6 +6421,15 @@ qemuDomainDefineXMLFlags(virConnectPtr conn, + if (flags & VIR_DOMAIN_DEFINE_VALIDATE) + parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA; + ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(def = virDomainDefIDsParseString(xml, driver->xmlopt, parse_flags))) ++ return NULL; ++ ++ if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) ++ return NULL; ++ ++ g_clear_pointer(&def, virDomainDefFree); ++ + if (!(def = virDomainDefParseString(xml, driver->xmlopt, + NULL, parse_flags))) + return NULL; +@@ -6426,9 +6437,6 @@ qemuDomainDefineXMLFlags(virConnectPtr conn, + if (virXMLCheckIllegalChars("name", def->name, "\n") < 0) + goto cleanup; + +- if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +- goto cleanup; +- + if (!(vm = virDomainObjListAdd(driver->domains, &def, + driver->xmlopt, + 0, &oldDef))) +@@ -10637,10 +10645,9 @@ qemuDomainMigratePrepareTunnel(virConnectPtr dconn, + return -1; + } + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepareTunnelEnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepareTunnelEnsureACL))) + return -1; + + return qemuMigrationDstPrepareTunnel(driver, dconn, +@@ -10690,10 +10697,9 @@ qemuDomainMigratePrepare2(virConnectPtr dconn, + return -1; + } + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepare2EnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepare2EnsureACL))) + return -1; + + /* Do not use cookies in v2 protocol, since the cookie +@@ -10912,10 +10918,9 @@ qemuDomainMigratePrepare3(virConnectPtr dconn, + QEMU_MIGRATION_DESTINATION))) + return -1; + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepare3EnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepare3EnsureACL))) + return -1; + + return qemuMigrationDstPrepareDirect(driver, dconn, +@@ -11020,10 +11025,9 @@ qemuDomainMigratePrepare3Params(virConnectPtr dconn, + return -1; + } + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepare3ParamsEnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepare3ParamsEnsureACL))) + return -1; + + return qemuMigrationDstPrepareDirect(driver, dconn, +@@ -11065,10 +11069,9 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, + QEMU_MIGRATION_DESTINATION))) + return -1; + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepareTunnel3EnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepareTunnel3EnsureACL))) + return -1; + + return qemuMigrationDstPrepareTunnel(driver, dconn, +@@ -11117,10 +11120,9 @@ qemuDomainMigratePrepareTunnel3Params(virConnectPtr dconn, + QEMU_MIGRATION_DESTINATION))) + return -1; + +- if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname))) +- return -1; +- +- if (virDomainMigratePrepareTunnel3ParamsEnsureACL(dconn, def) < 0) ++ if (!(def = qemuMigrationAnyPrepareDef(driver, NULL, dom_xml, dname, &origname, ++ dconn, ++ virDomainMigratePrepareTunnel3ParamsEnsureACL))) + return -1; + + return qemuMigrationDstPrepareTunnel(driver, dconn, +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 25dc16a..e2bfb8a 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -3850,7 +3850,9 @@ qemuMigrationAnyPrepareDef(virQEMUDriver *driver, + virQEMUCaps *qemuCaps, + const char *dom_xml, + const char *dname, +- char **origname) ++ char **origname, ++ virConnectPtr sconn, ++ int (*ensureACL)(virConnectPtr, virDomainDef *)) + { + virDomainDef *def; + char *name = NULL; +@@ -3861,6 +3863,24 @@ qemuMigrationAnyPrepareDef(virQEMUDriver *driver, + return NULL; + } + ++ if (ensureACL) { ++ g_autoptr(virDomainDef) aclDef = NULL; ++ ++ /* Avoid parsing the whole domain definition for ACL checks */ ++ if (!(aclDef = virDomainDefIDsParseString(dom_xml, driver->xmlopt, ++ VIR_DOMAIN_DEF_PARSE_INACTIVE))) ++ return NULL; ++ ++ if (dname) { ++ VIR_FREE(aclDef->name); ++ aclDef->name = g_strdup(dname); ++ } ++ ++ if (ensureACL(sconn, aclDef) < 0) { ++ return NULL; ++ } ++ } ++ + if (!(def = virDomainDefParseString(dom_xml, driver->xmlopt, + qemuCaps, + VIR_DOMAIN_DEF_PARSE_INACTIVE))) +@@ -4794,6 +4814,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver, + if (!(persistDef = qemuMigrationAnyPrepareDef(driver, + priv->qemuCaps, + persist_xml, ++ NULL, NULL, + NULL, NULL))) + goto error; + } else if (xmlin) { +@@ -4803,6 +4824,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver, + if (!(persistDef = qemuMigrationAnyPrepareDef(driver, + priv->qemuCaps, + xmlin, ++ NULL, NULL, + NULL, NULL))) + goto error; + } else { +diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h +index ed62fd4..f43a96f 100644 +--- a/src/qemu/qemu_migration.h ++++ b/src/qemu/qemu_migration.h +@@ -131,7 +131,9 @@ qemuMigrationAnyPrepareDef(virQEMUDriver *driver, + virQEMUCaps *qemuCaps, + const char *dom_xml, + const char *dname, +- char **origname); ++ char **origname, ++ virConnectPtr sconn, ++ int (*ensureACL)(virConnectPtr, virDomainDef *)); + + int + qemuMigrationDstPrepareTunnel(virQEMUDriver *driver, +diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c +index db442a5..469a210 100644 +--- a/src/qemu/qemu_saveimage.c ++++ b/src/qemu/qemu_saveimage.c +@@ -638,16 +638,21 @@ qemuSaveImageIsCorrupt(virQEMUDriver *driver, const char *path) + * @driver: qemu driver data + * @qemuCaps: pointer to qemuCaps if the domain is running or NULL + * @path: path of the save image ++ * @ensureACL: ACL callback to check against the definition or NULL ++ * @conn: parameter for the @ensureACL callback + * @ret_def: returns domain definition created from the XML stored in the image + * @ret_data: returns structure filled with data from the image header + * +- * Open the save image file, read libvirt's save image metadata, and populate +- * the @ret_def and @ret_data structures. Returns 0 on success and -1 on failure. ++ * Open the save image file, read libvirt's save image metadata, optionally ++ * check ACLs before parsing the whole domain definition and populate the ++ * @ret_def and @ret_data structures. Returns 0 on success and -1 on failure. + */ + int + qemuSaveImageGetMetadata(virQEMUDriver *driver, + virQEMUCaps *qemuCaps, + const char *path, ++ int (*ensureACL)(virConnectPtr, virDomainDef *), ++ virConnectPtr conn, + virDomainDef **ret_def, + virQEMUSaveData **ret_data) + { +@@ -655,6 +660,8 @@ qemuSaveImageGetMetadata(virQEMUDriver *driver, + VIR_AUTOCLOSE fd = -1; + virQEMUSaveData *data; + g_autoptr(virDomainDef) def = NULL; ++ unsigned int parse_flags = VIR_DOMAIN_DEF_PARSE_INACTIVE | ++ VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE; + int rc; + + if ((fd = qemuDomainOpenFile(cfg, NULL, path, O_RDONLY, NULL)) < 0) +@@ -664,10 +671,20 @@ qemuSaveImageGetMetadata(virQEMUDriver *driver, + return rc; + + data = *ret_data; ++ ++ if (ensureACL) { ++ /* Parse only the IDs for ACL checks */ ++ g_autoptr(virDomainDef) aclDef = virDomainDefIDsParseString(data->xml, ++ driver->xmlopt, ++ parse_flags); ++ ++ if (!aclDef || ensureACL(conn, aclDef) < 0) ++ return -1; ++ } ++ + /* Create a domain from this XML */ + if (!(def = virDomainDefParseString(data->xml, driver->xmlopt, qemuCaps, +- VIR_DOMAIN_DEF_PARSE_INACTIVE | +- VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE))) ++ parse_flags))) + return -1; + + *ret_def = g_steal_pointer(&def); +diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h +index 8859fab..d02f5ea 100644 +--- a/src/qemu/qemu_saveimage.h ++++ b/src/qemu/qemu_saveimage.h +@@ -78,9 +78,11 @@ int + qemuSaveImageGetMetadata(virQEMUDriver *driver, + virQEMUCaps *qemuCaps, + const char *path, ++ int (*ensureACL)(virConnectPtr, virDomainDef *), ++ virConnectPtr conn, + virDomainDef **ret_def, + virQEMUSaveData **ret_data) +- ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5); ++ ATTRIBUTE_NONNULL(6) ATTRIBUTE_NONNULL(7); + + int + qemuSaveImageOpen(virQEMUDriver *driver, +diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c +index a1f1f32..eab22b7 100644 +--- a/src/qemu/qemu_snapshot.c ++++ b/src/qemu/qemu_snapshot.c +@@ -2083,8 +2083,8 @@ qemuSnapshotRevertExternalPrepare(virDomainObj *vm, + g_autoptr(virDomainDef) savedef = NULL; + + memdata->path = snapdef->memorysnapshotfile; +- if (qemuSaveImageGetMetadata(driver, NULL, memdata->path, &savedef, +- &memdata->data) < 0) ++ if (qemuSaveImageGetMetadata(driver, NULL, memdata->path, NULL, NULL, ++ &savedef, &memdata->data) < 0) + return -1; + + memdata->fd = qemuSaveImageOpen(driver, memdata->path, +-- +2.43.0 + +From 6ab6e8705128808aa90b9281bcf046a8cf61dbba Mon Sep 17 00:00:00 2001 +From: Marc Deslauriers +Date: Mon, 8 Dec 2025 10:52:22 -0500 +Subject: [PATCH 8/8] bhyve: s/provconn/privcon/ + +--- + src/bhyve/bhyve_driver.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/bhyve/bhyve_driver.c b/src/bhyve/bhyve_driver.c +index c48bca3..2a7e3eb 100644 +--- a/src/bhyve/bhyve_driver.c ++++ b/src/bhyve/bhyve_driver.c +@@ -506,7 +506,7 @@ bhyveDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flag + return NULL; + + /* Avoid parsing the whole domain definition for ACL checks */ +- if (!(def = virDomainDefIDsParseString(xml, provconn->xmlopt, parse_flags))) ++ if (!(def = virDomainDefIDsParseString(xml, privconn->xmlopt, parse_flags))) + return NULL; + + if (virDomainDefineXMLFlagsEnsureACL(conn, def) < 0) +@@ -885,7 +885,7 @@ bhyveDomainCreateXML(virConnectPtr conn, + start_flags |= VIR_BHYVE_PROCESS_START_AUTODESTROY; + + /* Avoid parsing the whole domain definition for ACL checks */ +- if (!(def = virDomainDefIDsParseString(xml, provconn->xmlopt, parse_flags))) ++ if (!(def = virDomainDefIDsParseString(xml, privconn->xmlopt, parse_flags))) + return NULL; + + if (virDomainCreateXMLEnsureACL(conn, def) < 0) +-- +2.43.0 + + diff --git a/SPECS/libvirt/libvirt.spec b/SPECS/libvirt/libvirt.spec index a943ce7e76d..f6a1c5b845d 100644 --- a/SPECS/libvirt/libvirt.spec +++ b/SPECS/libvirt/libvirt.spec @@ -185,7 +185,7 @@ Summary: Library providing a simple virtualization API Name: libvirt Version: 10.0.0 -Release: 6%{?dist} +Release: 7%{?dist} License: GPL-2.0-or-later AND LGPL-2.1-only AND LGPL-2.1-or-later AND OFL-1.1 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -200,6 +200,7 @@ Patch1: CVE-2024-1441.patch Patch2: CVE-2024-2494.patch Patch3: CVE-2024-4418.patch Patch4: CVE-2025-13193.patch +Patch5: CVE-2025-12748.patch Requires: libvirt-daemon = %{version}-%{release} Requires: libvirt-daemon-config-network = %{version}-%{release} @@ -2190,6 +2191,9 @@ exit 0 %endif %changelog +* Thu Jan 15 2026 Akhila Guruju - 10.0.0-7 +- Patch CVE-2025-12748 + * Fri Nov 21 2025 Azure Linux Security Servicing Account - 10.0.0-6 - Patch for CVE-2025-13193 diff --git a/SPECS/libvma/libvma.signatures.json b/SPECS/libvma/libvma.signatures.json new file mode 100644 index 00000000000..fa2b6a68270 --- /dev/null +++ b/SPECS/libvma/libvma.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "libvma-9.8.72.tar.gz": "fb47f472892a224b04260b636c165e04f6979b43c5c80086e58c32fe4b6be928" + } +} \ No newline at end of file diff --git a/SPECS/libvma/libvma.spec b/SPECS/libvma/libvma.spec new file mode 100644 index 00000000000..984e22b739b --- /dev/null +++ b/SPECS/libvma/libvma.spec @@ -0,0 +1,142 @@ +%{!?configure_options: %global configure_options %{nil}} +%{!?use_rel: %global use_rel 1} + +%{!?make_build: %global make_build %{__make} %{?_smp_mflags} %{?mflags} V=1} +%{!?run_ldconfig: %global run_ldconfig %{?ldconfig}} +%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} +# Azure Linux build with use_systemd +%global use_systemd 1 + +Name: libvma +Version: 9.8.72 +Release: 1%{?dist} +Summary: A library for boosting TCP and UDP traffic (over RDMA hardware) +Group: System Environment/Libraries +License: GPLv2 or BSD +Url: https://github.com/Mellanox/%{name} +Vendor: Microsoft Corporation +Distribution: Azure Linux +Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz + +# libvma currently supports only the following architectures +ExclusiveArch: x86_64 ppc64le ppc64 aarch64 + +BuildRequires: pkgconfig +BuildRequires: automake +BuildRequires: autoconf +BuildRequires: libtool +BuildRequires: gcc-c++ +BuildRequires: rdma-core-devel +BuildRequires: libnl3-devel +%if "%{use_systemd}" == "1" +BuildRequires: systemd +%endif +BuildRequires: make + +%description +libvma is a LD_PRELOAD-able library that boosts performance of TCP and +UDP traffic. It allows application written over standard socket API to +handle fast path data traffic from user space over Ethernet and/or +Infiniband with full network stack bypass and get better throughput, +latency and packets/sec rate. + +No application binary change is required for that. +libvma is supported by RDMA capable devices that support "verbs" +IBV_QPT_RAW_PACKET QP for Ethernet and/or IBV_QPT_UD QP for IPoIB. + +%package devel +Summary: Header files required to develop with libvma +Group: System Environment/Libraries +Requires: %{name} = %{version}-%{release} + +%description devel +This package includes headers for building programs with libvma's +interfaces. + +%package utils +Summary: Utilities used with libvma +Group: System Environment/Libraries +Requires: %{name} = %{version}-%{release} + +%description utils +This package contains the tool for collecting and analyzing libvma statistic. + +%prep +%setup -q + +%build + +export CFLAGS="$CFLAGS -Werror=discarded-qualifiers" + +if [ ! -e configure ] && [ -e autogen.sh ]; then + PRJ_RELEASE=%{use_rel} ./autogen.sh +fi + +%if %{use_rel} > 0 +%configure --enable-opt-log=none --enable-debug \ + %{?configure_options} +%{make_build} +cp -f src/vma/.libs/%{name}.so %{name}-debug.so +%{make_build} clean +%endif + +%configure --docdir=%{_pkgdocdir} \ + %{?configure_options} +%{make_build} + +%install +%{make_build} DESTDIR=${RPM_BUILD_ROOT} install + +find $RPM_BUILD_ROOT%{_libdir} -name '*.la' -delete +%if "%{use_systemd}" == "1" +install -D -m 644 contrib/scripts/vma.service $RPM_BUILD_ROOT/%{_prefix}/lib/systemd/system/vma.service +%endif + +%if %{use_rel} > 0 +install -m 755 ./%{name}-debug.so $RPM_BUILD_ROOT/%{_libdir}/%{name}-debug.so +%endif + +%post +%systemd_post vma.service + +%preun +%systemd_preun vma.service + +%postun +%systemd_postun_with_restart vma.service + +%files +%license LICENSE +%{_libdir}/%{name}.so* +%dir %{_pkgdocdir} +%doc %{_pkgdocdir}/README +%doc %{_pkgdocdir}/CHANGES +%config(noreplace) %{_sysconfdir}/libvma.conf +%{_sbindir}/vmad +%if "%{use_systemd}" == "1" +%{_prefix}/lib/systemd/system/vma.service +%endif +%{_mandir}/man7/vma.* +%{_mandir}/man8/vmad.* + +%files devel +%dir %{_includedir}/mellanox +%{_includedir}/mellanox/vma_extra.h +%if %{use_rel} > 0 +%{_libdir}/%{name}-debug.so +%endif + +%files utils +%{_bindir}/vma_stats +%{_mandir}/man8/vma_stats.* + +%changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 9.8.72-1 +- Initial Azure Linux import from NVIDIA (license: GPLv2) +- License verified +- Update build with use_systemd=1 + +* Wed Feb 28 2024 NVIDIA CORPORATION 9.8.60-1 +- Bump version to 9.8.60 +- Please refer to CHANGES for full changelog. + diff --git a/SPECS/libxml2/CVE-2025-7425.patch b/SPECS/libxml2/CVE-2025-7425.patch new file mode 100644 index 00000000000..4133c5d5adf --- /dev/null +++ b/SPECS/libxml2/CVE-2025-7425.patch @@ -0,0 +1,798 @@ +From b7d2ad6e1b376c10edffcb0973485c861dc89559 Mon Sep 17 00:00:00 2001 +From: David Kilzer +Date: Mon, 23 Jun 2025 14:41:56 -0700 +Subject: [PATCH] libxslt: heap-use-after-free in xmlFreeID caused by `atype` + corruption + +* include/libxml/tree.h: +(XML_ATTR_CLEAR_ATYPE): Add. +(XML_ATTR_GET_ATYPE): Add. +(XML_ATTR_SET_ATYPE): Add. +(XML_NODE_ADD_EXTRA): Add. +(XML_NODE_CLEAR_EXTRA): Add. +(XML_NODE_GET_EXTRA): Add. +(XML_NODE_SET_EXTRA): Add. +(XML_DOC_ADD_PROPERTIES): Add. +(XML_DOC_CLEAR_PROPERTIES): Add. +(XML_DOC_GET_PROPERTIES): Add. +(XML_DOC_SET_PROPERTIES): Add. +- Add macros for accessing fields with upper bits that may be set by + libxslt. + +* HTMLparser.c: +(htmlNewDocNoDtD): +* SAX2.c: +(xmlSAX2StartDocument): +(xmlSAX2EndDocument): +* parser.c: +(xmlParseEntityDecl): +(xmlParseExternalSubset): +(xmlParseReference): +(xmlCtxtParseDtd): +* runxmlconf.c: +(xmlconfTestInvalid): +(xmlconfTestValid): +* tree.c: +(xmlNewDoc): +(xmlFreeProp): +(xmlNodeSetDoc): +(xmlSetNsProp): +(xmlDOMWrapAdoptBranch): +* valid.c: +(xmlFreeID): +(xmlAddIDInternal): +(xmlValidateAttributeValueInternal): +(xmlValidateOneAttribute): +(xmlValidateRef): +* xmlreader.c: +(xmlTextReaderStartElement): +(xmlTextReaderStartElementNs): +(xmlTextReaderValidateEntity): +(xmlTextReaderRead): +(xmlTextReaderNext): +(xmlTextReaderIsEmptyElement): +(xmlTextReaderPreserve): +* xmlschemas.c: +(xmlSchemaPValAttrNodeID): +* xmlschemastypes.c: +(xmlSchemaValAtomicType): +- Adopt macros by renaming the struct fields, recompiling and fixing + compiler failures, then changing the struct field names back. + +Upstream patch reference: https://gitlab.gnome.org/-/project/1762/uploads/302ecfda701895ebd0fa438a66d1a7a4/gnome-libxslt-bug-140-apple-fix.diff +--- + HTMLparser.c | 2 +- + SAX2.c | 6 ++-- + include/libxml/tree.h | 14 ++++++++- + parser.c | 8 ++--- + runxmlconf.c | 4 +-- + tree.c | 20 ++++++------- + valid.c | 68 +++++++++++++++++++++---------------------- + xmlreader.c | 30 +++++++++---------- + xmlschemas.c | 4 +-- + xmlschemastypes.c | 12 ++++---- + 10 files changed, 90 insertions(+), 78 deletions(-) + +diff --git a/HTMLparser.c b/HTMLparser.c +index abcdfe2..6781b43 100644 +--- a/HTMLparser.c ++++ b/HTMLparser.c +@@ -2489,7 +2489,7 @@ htmlNewDocNoDtD(const xmlChar *URI, const xmlChar *ExternalID) { + cur->refs = NULL; + cur->_private = NULL; + cur->charset = XML_CHAR_ENCODING_UTF8; +- cur->properties = XML_DOC_HTML | XML_DOC_USERBUILT; ++ XML_DOC_SET_PROPERTIES(cur, XML_DOC_HTML | XML_DOC_USERBUILT); + if ((ExternalID != NULL) || + (URI != NULL)) + xmlCreateIntSubset(cur, BAD_CAST "html", ExternalID, URI); +diff --git a/SAX2.c b/SAX2.c +index b6be1a6..1a520a6 100644 +--- a/SAX2.c ++++ b/SAX2.c +@@ -973,7 +973,7 @@ xmlSAX2StartDocument(void *ctx) + xmlSAX2ErrMemory(ctxt, "xmlSAX2StartDocument"); + return; + } +- ctxt->myDoc->properties = XML_DOC_HTML; ++ XML_DOC_SET_PROPERTIES(ctxt->myDoc, XML_DOC_HTML); + ctxt->myDoc->parseFlags = ctxt->options; + #else + xmlGenericError(xmlGenericErrorContext, +@@ -986,9 +986,9 @@ xmlSAX2StartDocument(void *ctx) + } else { + doc = ctxt->myDoc = xmlNewDoc(ctxt->version); + if (doc != NULL) { +- doc->properties = 0; ++ XML_DOC_CLEAR_PROPERTIES(doc); + if (ctxt->options & XML_PARSE_OLD10) +- doc->properties |= XML_DOC_OLD10; ++ XML_DOC_ADD_PROPERTIES(doc, XML_DOC_OLD10); + doc->parseFlags = ctxt->options; + if (ctxt->encoding != NULL) + doc->encoding = xmlStrdup(ctxt->encoding); +diff --git a/include/libxml/tree.h b/include/libxml/tree.h +index b498966..93f7e31 100644 +--- a/include/libxml/tree.h ++++ b/include/libxml/tree.h +@@ -367,7 +367,6 @@ struct _xmlElement { + #endif + }; + +- + /** + * XML_LOCAL_NAMESPACE: + * +@@ -448,6 +447,10 @@ struct _xmlAttr { + void *psvi; /* for type/PSVI information */ + }; + ++#define XML_ATTR_CLEAR_ATYPE(attr) (((attr)->atype) = 0) ++#define XML_ATTR_GET_ATYPE(attr) (((attr)->atype) & ~(15U << 27)) ++#define XML_ATTR_SET_ATYPE(attr, type) ((attr)->atype = ((((attr)->atype) & (15U << 27)) | ((type) & ~(15U << 27)))) ++ + /** + * xmlID: + * +@@ -509,6 +512,11 @@ struct _xmlNode { + unsigned short extra; /* extra data for XPath/XSLT */ + }; + ++#define XML_NODE_ADD_EXTRA(node, type) ((node)->extra |= ((type) & ~(15U << 12))) ++#define XML_NODE_CLEAR_EXTRA(node) (((node)->extra) = 0) ++#define XML_NODE_GET_EXTRA(node) (((node)->extra) & ~(15U << 12)) ++#define XML_NODE_SET_EXTRA(node, type) ((node)->extra = ((((node)->extra) & (15U << 12)) | ((type) & ~(15U << 12)))) ++ + /** + * XML_GET_CONTENT: + * +@@ -587,6 +595,10 @@ struct _xmlDoc { + set at the end of parsing */ + }; + ++#define XML_DOC_ADD_PROPERTIES(doc, type) ((doc)->properties |= ((type) & ~(15U << 27))) ++#define XML_DOC_CLEAR_PROPERTIES(doc) (((doc)->properties) = 0) ++#define XML_DOC_GET_PROPERTIES(doc) (((doc)->properties) & ~(15U << 27)) ++#define XML_DOC_SET_PROPERTIES(doc, type) ((doc)->properties = ((((doc)->properties) & (15U << 27)) | ((type) & ~(15U << 27)))) + + typedef struct _xmlDOMWrapCtxt xmlDOMWrapCtxt; + typedef xmlDOMWrapCtxt *xmlDOMWrapCtxtPtr; +diff --git a/parser.c b/parser.c +index d3f30b2..9a259a1 100644 +--- a/parser.c ++++ b/parser.c +@@ -5489,7 +5489,7 @@ xmlParseEntityDecl(xmlParserCtxtPtr ctxt) { + xmlErrMemory(ctxt, "New Doc failed"); + goto done; + } +- ctxt->myDoc->properties = XML_DOC_INTERNAL; ++ XML_DOC_SET_PROPERTIES(ctxt->myDoc, XML_DOC_INTERNAL); + } + if (ctxt->myDoc->intSubset == NULL) + ctxt->myDoc->intSubset = xmlNewDtd(ctxt->myDoc, +@@ -5560,7 +5560,7 @@ xmlParseEntityDecl(xmlParserCtxtPtr ctxt) { + xmlErrMemory(ctxt, "New Doc failed"); + goto done; + } +- ctxt->myDoc->properties = XML_DOC_INTERNAL; ++ XML_DOC_SET_PROPERTIES(ctxt->myDoc, XML_DOC_INTERNAL); + } + + if (ctxt->myDoc->intSubset == NULL) +@@ -7022,7 +7022,7 @@ xmlParseExternalSubset(xmlParserCtxtPtr ctxt, const xmlChar *ExternalID, + xmlErrMemory(ctxt, "New Doc failed"); + return; + } +- ctxt->myDoc->properties = XML_DOC_INTERNAL; ++ XML_DOC_SET_PROPERTIES(ctxt->myDoc, XML_DOC_INTERNAL); + } + if ((ctxt->myDoc != NULL) && (ctxt->myDoc->intSubset == NULL)) + xmlCreateIntSubset(ctxt->myDoc, NULL, ExternalID, SystemID); +@@ -7405,7 +7405,7 @@ xmlParseReference(xmlParserCtxtPtr ctxt) { + (nw != NULL) && + (nw->type == XML_ELEMENT_NODE) && + (nw->children == NULL)) +- nw->extra = 1; ++ XML_NODE_SET_EXTRA(nw, 1); + + break; + } +diff --git a/runxmlconf.c b/runxmlconf.c +index 8df9df1..1356c84 100644 +--- a/runxmlconf.c ++++ b/runxmlconf.c +@@ -192,7 +192,7 @@ xmlconfTestInvalid(const char *id, const char *filename, int options) { + id, filename); + } else { + /* invalidity should be reported both in the context and in the document */ +- if ((ctxt->valid != 0) || (doc->properties & XML_DOC_DTDVALID)) { ++ if ((ctxt->valid != 0) || (XML_DOC_GET_PROPERTIES(doc) & XML_DOC_DTDVALID)) { + test_log("test %s : %s failed to detect invalid document\n", + id, filename); + nb_errors++; +@@ -224,7 +224,7 @@ xmlconfTestValid(const char *id, const char *filename, int options) { + ret = 0; + } else { + /* validity should be reported both in the context and in the document */ +- if ((ctxt->valid == 0) || ((doc->properties & XML_DOC_DTDVALID) == 0)) { ++ if ((ctxt->valid == 0) || ((XML_DOC_GET_PROPERTIES(doc) & XML_DOC_DTDVALID) == 0)) { + test_log("test %s : %s failed to validate a valid document\n", + id, filename); + nb_errors++; +diff --git a/tree.c b/tree.c +index eae778d..2fc26cd 100644 +--- a/tree.c ++++ b/tree.c +@@ -1185,7 +1185,7 @@ xmlNewDoc(const xmlChar *version) { + cur->compression = -1; /* not initialized */ + cur->doc = cur; + cur->parseFlags = 0; +- cur->properties = XML_DOC_USERBUILT; ++ XML_DOC_SET_PROPERTIES(cur, XML_DOC_USERBUILT); + /* + * The in memory encoding is always UTF8 + * This field will never change and would +@@ -2122,7 +2122,7 @@ xmlFreeProp(xmlAttrPtr cur) { + xmlDeregisterNodeDefaultValue((xmlNodePtr)cur); + + /* Check for ID removal -> leading to invalid references ! */ +- if ((cur->doc != NULL) && (cur->atype == XML_ATTRIBUTE_ID)) { ++ if ((cur->doc != NULL) && (XML_ATTR_GET_ATYPE(cur) == XML_ATTRIBUTE_ID)) { + xmlRemoveID(cur->doc, cur); + } + if (cur->children != NULL) xmlFreeNodeList(cur->children); +@@ -2871,7 +2871,7 @@ xmlSetTreeDoc(xmlNodePtr tree, xmlDocPtr doc) { + if(tree->type == XML_ELEMENT_NODE) { + prop = tree->properties; + while (prop != NULL) { +- if (prop->atype == XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(prop) == XML_ATTRIBUTE_ID) { + xmlRemoveID(tree->doc, prop); + } + +@@ -7019,9 +7019,9 @@ xmlSetNsProp(xmlNodePtr node, xmlNsPtr ns, const xmlChar *name, + /* + * Modify the attribute's value. + */ +- if (prop->atype == XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(prop) == XML_ATTRIBUTE_ID) { + xmlRemoveID(node->doc, prop); +- prop->atype = XML_ATTRIBUTE_ID; ++ XML_ATTR_SET_ATYPE(prop, XML_ATTRIBUTE_ID); + } + if (prop->children != NULL) + xmlFreeNodeList(prop->children); +@@ -7041,7 +7041,7 @@ xmlSetNsProp(xmlNodePtr node, xmlNsPtr ns, const xmlChar *name, + tmp = tmp->next; + } + } +- if (prop->atype == XML_ATTRIBUTE_ID) ++ if (XML_ATTR_GET_ATYPE(prop) == XML_ATTRIBUTE_ID) + xmlAddID(NULL, node->doc, value, prop); + return(prop); + } +@@ -9296,7 +9296,7 @@ ns_end: + if (cur->type == XML_ELEMENT_NODE) { + cur->psvi = NULL; + cur->line = 0; +- cur->extra = 0; ++ XML_NODE_CLEAR_EXTRA(cur); + /* + * Walk attributes. + */ +@@ -9312,11 +9312,11 @@ ns_end: + * Attributes. + */ + if ((sourceDoc != NULL) && +- (((xmlAttrPtr) cur)->atype == XML_ATTRIBUTE_ID)) ++ (XML_ATTR_GET_ATYPE((xmlAttrPtr) cur) == XML_ATTRIBUTE_ID)) + { + xmlRemoveID(sourceDoc, (xmlAttrPtr) cur); + } +- ((xmlAttrPtr) cur)->atype = 0; ++ XML_ATTR_CLEAR_ATYPE((xmlAttrPtr) cur); + ((xmlAttrPtr) cur)->psvi = NULL; + } + break; +@@ -10037,7 +10037,7 @@ xmlDOMWrapAdoptAttr(xmlDOMWrapCtxtPtr ctxt, + } + + XML_TREE_ADOPT_STR(attr->name); +- attr->atype = 0; ++ XML_ATTR_CLEAR_ATYPE(attr); + attr->psvi = NULL; + /* + * Walk content. +diff --git a/valid.c b/valid.c +index 67e1b1d..b17b8f7 100644 +--- a/valid.c ++++ b/valid.c +@@ -1887,7 +1887,7 @@ xmlScanIDAttributeDecl(xmlValidCtxtPtr ctxt, xmlElementPtr elem, int err) { + if (elem == NULL) return(0); + cur = elem->attributes; + while (cur != NULL) { +- if (cur->atype == XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(cur) == XML_ATTRIBUTE_ID) { + ret ++; + if ((ret > 1) && (err)) + xmlErrValidNode(ctxt, (xmlNodePtr) elem, XML_DTD_MULTIPLE_ID, +@@ -2260,7 +2260,7 @@ xmlDumpAttributeDecl(xmlBufferPtr buf, xmlAttributePtr attr) { + xmlBufferWriteChar(buf, ":"); + } + xmlBufferWriteCHAR(buf, attr->name); +- switch (attr->atype) { ++ switch (XML_ATTR_GET_ATYPE(attr)) { + case XML_ATTRIBUTE_CDATA: + xmlBufferWriteChar(buf, " CDATA"); + break; +@@ -2733,7 +2733,7 @@ xmlAddID(xmlValidCtxtPtr ctxt, xmlDocPtr doc, const xmlChar *value, + return(NULL); + } + if (attr != NULL) +- attr->atype = XML_ATTRIBUTE_ID; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_ID); + return(ret); + } + +@@ -2812,7 +2812,7 @@ xmlIsID(xmlDocPtr doc, xmlNodePtr elem, xmlAttrPtr attr) { + if ((fullelemname != felem) && (fullelemname != elem->name)) + xmlFree(fullelemname); + +- if ((attrDecl != NULL) && (attrDecl->atype == XML_ATTRIBUTE_ID)) ++ if ((attrDecl != NULL) && (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_ID)) + return(1); + } + return(0); +@@ -2853,7 +2853,7 @@ xmlRemoveID(xmlDocPtr doc, xmlAttrPtr attr) { + + xmlHashRemoveEntry(table, ID, xmlFreeIDTableEntry); + xmlFree(ID); +- attr->atype = 0; ++ XML_ATTR_CLEAR_ATYPE(attr); + return(0); + } + +@@ -3138,8 +3138,8 @@ xmlIsRef(xmlDocPtr doc, xmlNodePtr elem, xmlAttrPtr attr) { + elem->name, attr->name); + + if ((attrDecl != NULL) && +- (attrDecl->atype == XML_ATTRIBUTE_IDREF || +- attrDecl->atype == XML_ATTRIBUTE_IDREFS)) ++ (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_IDREF || ++ XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_IDREFS)) + return(1); + } + return(0); +@@ -3523,7 +3523,7 @@ xmlIsMixedElement(xmlDocPtr doc, const xmlChar *name) { + + static int + xmlIsDocNameStartChar(xmlDocPtr doc, int c) { +- if ((doc == NULL) || (doc->properties & XML_DOC_OLD10) == 0) { ++ if ((doc == NULL) || (XML_DOC_GET_PROPERTIES(doc) & XML_DOC_OLD10) == 0) { + /* + * Use the new checks of production [4] [4a] amd [5] of the + * Update 5 of XML-1.0 +@@ -3553,7 +3553,7 @@ xmlIsDocNameStartChar(xmlDocPtr doc, int c) { + + static int + xmlIsDocNameChar(xmlDocPtr doc, int c) { +- if ((doc == NULL) || (doc->properties & XML_DOC_OLD10) == 0) { ++ if ((doc == NULL) || (XML_DOC_GET_PROPERTIES(doc) & XML_DOC_OLD10) == 0) { + /* + * Use the new checks of production [4] [4a] amd [5] of the + * Update 5 of XML-1.0 +@@ -4103,7 +4103,7 @@ xmlValidCtxtNormalizeAttributeValue(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + + if (attrDecl == NULL) + return(NULL); +- if (attrDecl->atype == XML_ATTRIBUTE_CDATA) ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_CDATA) + return(NULL); + + ret = xmlStrdup(value); +@@ -4165,7 +4165,7 @@ xmlValidNormalizeAttributeValue(xmlDocPtr doc, xmlNodePtr elem, + + if (attrDecl == NULL) + return(NULL); +- if (attrDecl->atype == XML_ATTRIBUTE_CDATA) ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_CDATA) + return(NULL); + + ret = xmlStrdup(value); +@@ -4180,7 +4180,7 @@ xmlValidateAttributeIdCallback(void *payload, void *data, + const xmlChar *name ATTRIBUTE_UNUSED) { + xmlAttributePtr attr = (xmlAttributePtr) payload; + int *count = (int *) data; +- if (attr->atype == XML_ATTRIBUTE_ID) (*count)++; ++ if (XML_ATTR_GET_ATYPE(attr) == XML_ATTRIBUTE_ID) (*count)++; + } + + /** +@@ -4212,7 +4212,7 @@ xmlValidateAttributeDecl(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + /* Attribute Default Legal */ + /* Enumeration */ + if (attr->defaultValue != NULL) { +- val = xmlValidateAttributeValueInternal(doc, attr->atype, ++ val = xmlValidateAttributeValueInternal(doc, XML_ATTR_GET_ATYPE(attr), + attr->defaultValue); + if (val == 0) { + xmlErrValidNode(ctxt, (xmlNodePtr) attr, XML_DTD_ATTRIBUTE_DEFAULT, +@@ -4223,7 +4223,7 @@ xmlValidateAttributeDecl(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + } + + /* ID Attribute Default */ +- if ((attr->atype == XML_ATTRIBUTE_ID)&& ++ if ((XML_ATTR_GET_ATYPE(attr) == XML_ATTRIBUTE_ID)&& + (attr->def != XML_ATTRIBUTE_IMPLIED) && + (attr->def != XML_ATTRIBUTE_REQUIRED)) { + xmlErrValidNode(ctxt, (xmlNodePtr) attr, XML_DTD_ID_FIXED, +@@ -4233,7 +4233,7 @@ xmlValidateAttributeDecl(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + } + + /* One ID per Element Type */ +- if (attr->atype == XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(attr) == XML_ATTRIBUTE_ID) { + int nbId; + + /* the trick is that we parse DtD as their own internal subset */ +@@ -4492,9 +4492,9 @@ xmlValidateOneAttribute(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + attr->name, elem->name, NULL); + return(0); + } +- attr->atype = attrDecl->atype; ++ XML_ATTR_SET_ATYPE(attr, attrDecl->atype); + +- val = xmlValidateAttributeValueInternal(doc, attrDecl->atype, value); ++ val = xmlValidateAttributeValueInternal(doc, XML_ATTR_GET_ATYPE(attrDecl), value); + if (val == 0) { + xmlErrValidNode(ctxt, elem, XML_DTD_ATTRIBUTE_VALUE, + "Syntax of value for attribute %s of %s is not valid\n", +@@ -4513,19 +4513,19 @@ xmlValidateOneAttribute(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + } + + /* Validity Constraint: ID uniqueness */ +- if (attrDecl->atype == XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_ID) { + if (xmlAddID(ctxt, doc, value, attr) == NULL) + ret = 0; + } + +- if ((attrDecl->atype == XML_ATTRIBUTE_IDREF) || +- (attrDecl->atype == XML_ATTRIBUTE_IDREFS)) { ++ if ((XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_IDREF) || ++ (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_IDREFS)) { + if (xmlAddRef(ctxt, doc, value, attr) == NULL) + ret = 0; + } + + /* Validity Constraint: Notation Attributes */ +- if (attrDecl->atype == XML_ATTRIBUTE_NOTATION) { ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_NOTATION) { + xmlEnumerationPtr tree = attrDecl->tree; + xmlNotationPtr nota; + +@@ -4555,7 +4555,7 @@ xmlValidateOneAttribute(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + } + + /* Validity Constraint: Enumeration */ +- if (attrDecl->atype == XML_ATTRIBUTE_ENUMERATION) { ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_ENUMERATION) { + xmlEnumerationPtr tree = attrDecl->tree; + while (tree != NULL) { + if (xmlStrEqual(tree->name, value)) break; +@@ -4580,7 +4580,7 @@ xmlValidateOneAttribute(xmlValidCtxtPtr ctxt, xmlDocPtr doc, + + /* Extra check for the attribute value */ + ret &= xmlValidateAttributeValue2(ctxt, doc, attr->name, +- attrDecl->atype, value); ++ XML_ATTR_GET_ATYPE(attrDecl), value); + + return(ret); + } +@@ -4679,7 +4679,7 @@ xmlNodePtr elem, const xmlChar *prefix, xmlNsPtr ns, const xmlChar *value) { + return(0); + } + +- val = xmlValidateAttributeValueInternal(doc, attrDecl->atype, value); ++ val = xmlValidateAttributeValueInternal(doc, XML_ATTR_GET_ATYPE(attrDecl), value); + if (val == 0) { + if (ns->prefix != NULL) { + xmlErrValidNode(ctxt, elem, XML_DTD_INVALID_DEFAULT, +@@ -4729,7 +4729,7 @@ xmlNodePtr elem, const xmlChar *prefix, xmlNsPtr ns, const xmlChar *value) { + #endif + + /* Validity Constraint: Notation Attributes */ +- if (attrDecl->atype == XML_ATTRIBUTE_NOTATION) { ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_NOTATION) { + xmlEnumerationPtr tree = attrDecl->tree; + xmlNotationPtr nota; + +@@ -4771,7 +4771,7 @@ xmlNodePtr elem, const xmlChar *prefix, xmlNsPtr ns, const xmlChar *value) { + } + + /* Validity Constraint: Enumeration */ +- if (attrDecl->atype == XML_ATTRIBUTE_ENUMERATION) { ++ if (XML_ATTR_GET_ATYPE(attrDecl) == XML_ATTRIBUTE_ENUMERATION) { + xmlEnumerationPtr tree = attrDecl->tree; + while (tree != NULL) { + if (xmlStrEqual(tree->name, value)) break; +@@ -4809,10 +4809,10 @@ xmlNodePtr elem, const xmlChar *prefix, xmlNsPtr ns, const xmlChar *value) { + /* Extra check for the attribute value */ + if (ns->prefix != NULL) { + ret &= xmlValidateAttributeValue2(ctxt, doc, ns->prefix, +- attrDecl->atype, value); ++ XML_ATTR_GET_ATYPE(attrDecl), value); + } else { + ret &= xmlValidateAttributeValue2(ctxt, doc, BAD_CAST "xmlns", +- attrDecl->atype, value); ++ XML_ATTR_GET_ATYPE(attrDecl), value); + } + + return(ret); +@@ -6570,7 +6570,7 @@ xmlValidateRef(xmlRefPtr ref, xmlValidCtxtPtr ctxt, + while (IS_BLANK_CH(*cur)) cur++; + } + xmlFree(dup); +- } else if (attr->atype == XML_ATTRIBUTE_IDREF) { ++ } else if (XML_ATTR_GET_ATYPE(attr) == XML_ATTRIBUTE_IDREF) { + id = xmlGetID(ctxt->doc, name); + if (id == NULL) { + xmlErrValidNode(ctxt, attr->parent, XML_DTD_UNKNOWN_ID, +@@ -6578,7 +6578,7 @@ xmlValidateRef(xmlRefPtr ref, xmlValidCtxtPtr ctxt, + attr->name, name, NULL); + ctxt->valid = 0; + } +- } else if (attr->atype == XML_ATTRIBUTE_IDREFS) { ++ } else if (XML_ATTR_GET_ATYPE(attr) == XML_ATTRIBUTE_IDREFS) { + xmlChar *dup, *str = NULL, *cur, save; + + dup = xmlStrdup(name); +@@ -6778,7 +6778,7 @@ xmlValidateAttributeCallback(void *payload, void *data, + + if (cur == NULL) + return; +- switch (cur->atype) { ++ switch (XML_ATTR_GET_ATYPE(cur)) { + case XML_ATTRIBUTE_CDATA: + case XML_ATTRIBUTE_ID: + case XML_ATTRIBUTE_IDREF : +@@ -6793,7 +6793,7 @@ xmlValidateAttributeCallback(void *payload, void *data, + if (cur->defaultValue != NULL) { + + ret = xmlValidateAttributeValue2(ctxt, ctxt->doc, cur->name, +- cur->atype, cur->defaultValue); ++ XML_ATTR_GET_ATYPE(cur), cur->defaultValue); + if ((ret == 0) && (ctxt->valid == 1)) + ctxt->valid = 0; + } +@@ -6801,14 +6801,14 @@ xmlValidateAttributeCallback(void *payload, void *data, + xmlEnumerationPtr tree = cur->tree; + while (tree != NULL) { + ret = xmlValidateAttributeValue2(ctxt, ctxt->doc, +- cur->name, cur->atype, tree->name); ++ cur->name, XML_ATTR_GET_ATYPE(cur), tree->name); + if ((ret == 0) && (ctxt->valid == 1)) + ctxt->valid = 0; + tree = tree->next; + } + } + } +- if (cur->atype == XML_ATTRIBUTE_NOTATION) { ++ if (XML_ATTR_GET_ATYPE(cur) == XML_ATTRIBUTE_NOTATION) { + doc = cur->doc; + if (cur->elem == NULL) { + xmlErrValid(ctxt, XML_ERR_INTERNAL_ERROR, +diff --git a/xmlreader.c b/xmlreader.c +index 979385a..38196fa 100644 +--- a/xmlreader.c ++++ b/xmlreader.c +@@ -610,7 +610,7 @@ xmlTextReaderStartElement(void *ctx, const xmlChar *fullname, + if ((ctxt->node != NULL) && (ctxt->input != NULL) && + (ctxt->input->cur != NULL) && (ctxt->input->cur[0] == '/') && + (ctxt->input->cur[1] == '>')) +- ctxt->node->extra = NODE_IS_EMPTY; ++ XML_NODE_SET_EXTRA(ctxt->node, NODE_IS_EMPTY); + } + if (reader != NULL) + reader->state = XML_TEXTREADER_ELEMENT; +@@ -675,7 +675,7 @@ xmlTextReaderStartElementNs(void *ctx, + if ((ctxt->node != NULL) && (ctxt->input != NULL) && + (ctxt->input->cur != NULL) && (ctxt->input->cur[0] == '/') && + (ctxt->input->cur[1] == '>')) +- ctxt->node->extra = NODE_IS_EMPTY; ++ XML_NODE_SET_EXTRA(ctxt->node, NODE_IS_EMPTY); + } + if (reader != NULL) + reader->state = XML_TEXTREADER_ELEMENT; +@@ -1075,7 +1075,7 @@ skip_children: + xmlNodePtr tmp; + if (reader->entNr == 0) { + while ((tmp = node->last) != NULL) { +- if ((tmp->extra & NODE_IS_PRESERVED) == 0) { ++ if ((XML_NODE_GET_EXTRA(tmp) & NODE_IS_PRESERVED) == 0) { + xmlUnlinkNode(tmp); + xmlTextReaderFreeNode(reader, tmp); + } else +@@ -1327,7 +1327,7 @@ get_next_node: + if ((oldstate == XML_TEXTREADER_ELEMENT) && + (reader->node->type == XML_ELEMENT_NODE) && + (reader->node->children == NULL) && +- ((reader->node->extra & NODE_IS_EMPTY) == 0) ++ ((XML_NODE_GET_EXTRA(reader->node) & NODE_IS_EMPTY) == 0) + #ifdef LIBXML_XINCLUDE_ENABLED + && (reader->in_xinclude <= 0) + #endif +@@ -1341,7 +1341,7 @@ get_next_node: + xmlTextReaderValidatePop(reader); + #endif /* LIBXML_REGEXP_ENABLED */ + if ((reader->preserves > 0) && +- (reader->node->extra & NODE_IS_SPRESERVED)) ++ (XML_NODE_GET_EXTRA(reader->node) & NODE_IS_SPRESERVED)) + reader->preserves--; + reader->node = reader->node->next; + reader->state = XML_TEXTREADER_ELEMENT; +@@ -1357,7 +1357,7 @@ get_next_node: + (reader->node->prev != NULL) && + (reader->node->prev->type != XML_DTD_NODE)) { + xmlNodePtr tmp = reader->node->prev; +- if ((tmp->extra & NODE_IS_PRESERVED) == 0) { ++ if ((XML_NODE_GET_EXTRA(tmp) & NODE_IS_PRESERVED) == 0) { + if (oldnode == tmp) + oldnode = NULL; + xmlUnlinkNode(tmp); +@@ -1370,7 +1370,7 @@ get_next_node: + if ((oldstate == XML_TEXTREADER_ELEMENT) && + (reader->node->type == XML_ELEMENT_NODE) && + (reader->node->children == NULL) && +- ((reader->node->extra & NODE_IS_EMPTY) == 0)) {; ++ ((XML_NODE_GET_EXTRA(reader->node) & NODE_IS_EMPTY) == 0)) {; + reader->state = XML_TEXTREADER_END; + goto node_found; + } +@@ -1379,7 +1379,7 @@ get_next_node: + xmlTextReaderValidatePop(reader); + #endif /* LIBXML_REGEXP_ENABLED */ + if ((reader->preserves > 0) && +- (reader->node->extra & NODE_IS_SPRESERVED)) ++ (XML_NODE_GET_EXTRA(reader->node) & NODE_IS_SPRESERVED)) + reader->preserves--; + reader->node = reader->node->parent; + if ((reader->node == NULL) || +@@ -1403,7 +1403,7 @@ get_next_node: + #endif + (reader->entNr == 0) && + (oldnode->type != XML_DTD_NODE) && +- ((oldnode->extra & NODE_IS_PRESERVED) == 0)) { ++ ((XML_NODE_GET_EXTRA(oldnode) & NODE_IS_PRESERVED) == 0)) { + xmlUnlinkNode(oldnode); + xmlTextReaderFreeNode(reader, oldnode); + } +@@ -1416,7 +1416,7 @@ get_next_node: + #endif + (reader->entNr == 0) && + (reader->node->last != NULL) && +- ((reader->node->last->extra & NODE_IS_PRESERVED) == 0)) { ++ ((XML_NODE_GET_EXTRA(reader->node->last) & NODE_IS_PRESERVED) == 0)) { + xmlNodePtr tmp = reader->node->last; + xmlUnlinkNode(tmp); + xmlTextReaderFreeNode(reader, tmp); +@@ -1599,7 +1599,7 @@ xmlTextReaderNext(xmlTextReaderPtr reader) { + return(xmlTextReaderRead(reader)); + if (reader->state == XML_TEXTREADER_END || reader->state == XML_TEXTREADER_BACKTRACK) + return(xmlTextReaderRead(reader)); +- if (cur->extra & NODE_IS_EMPTY) ++ if (XML_NODE_GET_EXTRA(cur) & NODE_IS_EMPTY) + return(xmlTextReaderRead(reader)); + do { + ret = xmlTextReaderRead(reader); +@@ -3022,7 +3022,7 @@ xmlTextReaderIsEmptyElement(xmlTextReaderPtr reader) { + if (reader->in_xinclude > 0) + return(1); + #endif +- return((reader->node->extra & NODE_IS_EMPTY) != 0); ++ return((XML_NODE_GET_EXTRA(reader->node) & NODE_IS_EMPTY) != 0); + } + + /** +@@ -3884,15 +3884,15 @@ xmlTextReaderPreserve(xmlTextReaderPtr reader) { + return(NULL); + + if ((cur->type != XML_DOCUMENT_NODE) && (cur->type != XML_DTD_NODE)) { +- cur->extra |= NODE_IS_PRESERVED; +- cur->extra |= NODE_IS_SPRESERVED; ++ XML_NODE_ADD_EXTRA(cur, NODE_IS_PRESERVED); ++ XML_NODE_ADD_EXTRA(cur, NODE_IS_SPRESERVED); + } + reader->preserves++; + + parent = cur->parent;; + while (parent != NULL) { + if (parent->type == XML_ELEMENT_NODE) +- parent->extra |= NODE_IS_PRESERVED; ++ XML_NODE_ADD_EXTRA(parent, NODE_IS_PRESERVED); + parent = parent->parent; + } + return(cur); +diff --git a/xmlschemas.c b/xmlschemas.c +index 4662ebb..f205828 100644 +--- a/xmlschemas.c ++++ b/xmlschemas.c +@@ -6032,7 +6032,7 @@ xmlSchemaPValAttrNodeID(xmlSchemaParserCtxtPtr ctxt, xmlAttrPtr attr) + /* + * NOTE: the IDness might have already be declared in the DTD + */ +- if (attr->atype != XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(attr) != XML_ATTRIBUTE_ID) { + xmlIDPtr res; + xmlChar *strip; + +@@ -6055,7 +6055,7 @@ xmlSchemaPValAttrNodeID(xmlSchemaParserCtxtPtr ctxt, xmlAttrPtr attr) + NULL, NULL, "Duplicate value '%s' of simple " + "type 'xs:ID'", value, NULL); + } else +- attr->atype = XML_ATTRIBUTE_ID; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_ID); + } + } else if (ret > 0) { + ret = XML_SCHEMAP_S4S_ATTR_INVALID_VALUE; +diff --git a/xmlschemastypes.c b/xmlschemastypes.c +index 60268e2..d6e131a 100644 +--- a/xmlschemastypes.c ++++ b/xmlschemastypes.c +@@ -2990,7 +2990,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + /* + * NOTE: the IDness might have already be declared in the DTD + */ +- if (attr->atype != XML_ATTRIBUTE_ID) { ++ if (XML_ATTR_GET_ATYPE(attr) != XML_ATTRIBUTE_ID) { + xmlIDPtr res; + xmlChar *strip; + +@@ -3003,7 +3003,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + if (res == NULL) { + ret = 2; + } else { +- attr->atype = XML_ATTRIBUTE_ID; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_ID); + } + } + } +@@ -3028,7 +3028,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + xmlFree(strip); + } else + xmlAddRef(NULL, node->doc, value, attr); +- attr->atype = XML_ATTRIBUTE_IDREF; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_IDREF); + } + goto done; + case XML_SCHEMAS_IDREFS: +@@ -3042,7 +3042,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + (node->type == XML_ATTRIBUTE_NODE)) { + xmlAttrPtr attr = (xmlAttrPtr) node; + +- attr->atype = XML_ATTRIBUTE_IDREFS; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_IDREFS); + } + goto done; + case XML_SCHEMAS_ENTITY:{ +@@ -3073,7 +3073,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + (node->type == XML_ATTRIBUTE_NODE)) { + xmlAttrPtr attr = (xmlAttrPtr) node; + +- attr->atype = XML_ATTRIBUTE_ENTITY; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_ENTITY); + } + goto done; + } +@@ -3090,7 +3090,7 @@ xmlSchemaValAtomicType(xmlSchemaTypePtr type, const xmlChar * value, + (node->type == XML_ATTRIBUTE_NODE)) { + xmlAttrPtr attr = (xmlAttrPtr) node; + +- attr->atype = XML_ATTRIBUTE_ENTITIES; ++ XML_ATTR_SET_ATYPE(attr, XML_ATTRIBUTE_ENTITIES); + } + goto done; + case XML_SCHEMAS_NOTATION:{ +-- +2.43.0 + diff --git a/SPECS/libxml2/libxml2.spec b/SPECS/libxml2/libxml2.spec index ad8f7c77572..d8ffd218cb2 100644 --- a/SPECS/libxml2/libxml2.spec +++ b/SPECS/libxml2/libxml2.spec @@ -1,7 +1,7 @@ Summary: Libxml2 Name: libxml2 Version: 2.11.5 -Release: 7%{?dist} +Release: 8%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -21,6 +21,7 @@ Patch9: CVE-2025-6021.patch Patch10: CVE-2025-6170.patch Patch11: CVE-2025-49794_CVE-2025-49796.patch Patch12: CVE-2025-49795.patch +Patch13: CVE-2025-7425.patch BuildRequires: python3-devel BuildRequires: python3-xml @@ -92,6 +93,9 @@ find %{buildroot} -type f -name "*.la" -delete -print %{_libdir}/cmake/libxml2/libxml2-config.cmake %changelog +* Mon Jan 12 2026 Akhila Guruju - 2.11.5-8 +- Patch CVE-2025-7525 + * Wed Oct 29 2025 Azure Linux Security Servicing Account - 2.11.5-7 - Patch for CVE-2025-49795 diff --git a/SPECS/lldpd/lldpd.spec b/SPECS/lldpd/lldpd.spec index 5eebcf7902d..3c63652c284 100644 --- a/SPECS/lldpd/lldpd.spec +++ b/SPECS/lldpd/lldpd.spec @@ -5,7 +5,7 @@ Distribution: Azure Linux Name: lldpd Version: 1.0.17 -Release: 1%{?dist} +Release: 2%{?dist} Summary: ISC-licensed implementation of LLDP License: ISC @@ -162,6 +162,9 @@ fi %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 1.0.17-2 +- Bumping release to rebuild with new 'net-snmp' libs. + * Mon Nov 06 2023 CBL-Mariner Servicing Account - 1.0.17-1 - Auto-upgrade to 1.0.17 - Azure Linux 3.0 - package upgrades diff --git a/SPECS/mariadb/CVE-2023-52971.patch b/SPECS/mariadb/CVE-2023-52971.patch deleted file mode 100644 index 539898527cf..00000000000 --- a/SPECS/mariadb/CVE-2023-52971.patch +++ /dev/null @@ -1,157 +0,0 @@ -From a9b6bf9fa83604ac13e921c150a2806a64d23f92 Mon Sep 17 00:00:00 2001 -From: Mayank Singh -Date: Mon, 5 May 2025 09:20:46 +0000 -Subject: [PATCH] Address CVE-2023-52971 -Upstream Reference Link: https://github.com/MariaDB/server/commit/3b4de4c281cb3e33e6d3ee9537e542bf0a84b83e - ---- - mysql-test/main/join_nested.result | 12 +++++ - mysql-test/main/join_nested.test | 13 ++++++ - mysql-test/main/join_nested_jcl6.result | 12 +++++ - sql/sql_select.cc | 58 +++++++++++++++++++++++-- - 4 files changed, 91 insertions(+), 4 deletions(-) - -diff --git a/mysql-test/main/join_nested.result b/mysql-test/main/join_nested.result -index cb9dffc8..56468518 100644 ---- a/mysql-test/main/join_nested.result -+++ b/mysql-test/main/join_nested.result -@@ -2051,3 +2051,15 @@ a b c a a b - DROP TABLE t1, t2, t3; - set join_cache_level= @save_join_cache_level; - # end of 10.3 tests -+# -+# MDEV-32084: Assertion in best_extension_by_limited_search(), or crash elsewhere in release -+# -+CREATE TABLE t1 (i int); -+INSERT INTO t1 values (1),(2); -+SELECT 1 FROM t1 WHERE i IN -+(SELECT 1 FROM t1 c -+LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i); -+1 -+1 -+DROP TABLE t1; -+# end of 10.11 tests -diff --git a/mysql-test/main/join_nested.test b/mysql-test/main/join_nested.test -index ed1fe4c9..62370b95 100644 ---- a/mysql-test/main/join_nested.test -+++ b/mysql-test/main/join_nested.test -@@ -1458,3 +1458,16 @@ DROP TABLE t1, t2, t3; - set join_cache_level= @save_join_cache_level; - - --echo # end of 10.3 tests -+ -+--echo # -+--echo # MDEV-32084: Assertion in best_extension_by_limited_search(), or crash elsewhere in release -+--echo # -+CREATE TABLE t1 (i int); -+INSERT INTO t1 values (1),(2); -+ -+SELECT 1 FROM t1 WHERE i IN -+ (SELECT 1 FROM t1 c -+ LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i); -+ -+DROP TABLE t1; -+--echo # end of 10.11 tests -diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result -index 0bda8d43..50a1e83a 100644 ---- a/mysql-test/main/join_nested_jcl6.result -+++ b/mysql-test/main/join_nested_jcl6.result -@@ -2060,6 +2060,18 @@ a b c a a b - DROP TABLE t1, t2, t3; - set join_cache_level= @save_join_cache_level; - # end of 10.3 tests -+# -+# MDEV-32084: Assertion in best_extension_by_limited_search(), or crash elsewhere in release -+# -+CREATE TABLE t1 (i int); -+INSERT INTO t1 values (1),(2); -+SELECT 1 FROM t1 WHERE i IN -+(SELECT 1 FROM t1 c -+LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i); -+1 -+1 -+DROP TABLE t1; -+# end of 10.11 tests - CREATE TABLE t5 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); - CREATE TABLE t6 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); - CREATE TABLE t7 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); -diff --git a/sql/sql_select.cc b/sql/sql_select.cc -index b88e8b4c..b8e15264 100644 ---- a/sql/sql_select.cc -+++ b/sql/sql_select.cc -@@ -18544,6 +18544,8 @@ simplify_joins(JOIN *join, List *join_list, COND *conds, bool top, - prev_table->dep_tables|= used_tables; - if (prev_table->on_expr) - { -+ /* If the ON expression is still there, it's an outer join */ -+ DBUG_ASSERT(prev_table->outer_join); - prev_table->dep_tables|= table->on_expr_dep_tables; - table_map prev_used_tables= prev_table->nested_join ? - prev_table->nested_join->used_tables : -@@ -18558,11 +18560,59 @@ simplify_joins(JOIN *join, List *join_list, COND *conds, bool top, - prevents update of inner table dependences. - For example it might happen if RAND() function - is used in JOIN ON clause. -- */ -- if (!((prev_table->on_expr->used_tables() & -- ~(OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) & -- ~prev_used_tables)) -+ */ -+ table_map prev_on_expr_deps= prev_table->on_expr->used_tables() & -+ ~(OUTER_REF_TABLE_BIT | RAND_TABLE_BIT); -+ prev_on_expr_deps&= ~prev_used_tables; -+ -+ if (!prev_on_expr_deps) - prev_table->dep_tables|= used_tables; -+ else -+ { -+ /* -+ Another possible case is when prev_on_expr_deps!=0 but it depends -+ on a table outside this join nest. SQL name resolution don't allow -+ this but it is possible when LEFT JOIN is inside a subquery which -+ is converted into a semi-join nest, Example: -+ -+ t1 SEMI JOIN ( -+ t2 -+ LEFT JOIN (t3 LEFT JOIN t4 ON t4.col=t1.col) ON expr -+ ) ON ... -+ -+ here, we would have prev_table=t4, table=t3. The condition -+ "ON t4.col=t1.col" depends on tables {t1, t4}. To make sure the -+ optimizer puts t3 before t4 we need to make sure t4.dep_tables -+ includes t3. -+ */ -+ -+ DBUG_ASSERT(table->embedding == prev_table->embedding); -+ if (table->embedding) -+ { -+ /* -+ Find what are the "peers" of "table" in the join nest. Normally, -+ it is table->embedding->nested_join->used_tables, but here we are -+ in the process of recomputing that value. -+ So, we walk the join list and collect the bitmap of peers: -+ */ -+ table_map peers= 0; -+ List_iterator_fast li(*join_list); -+ TABLE_LIST *peer; -+ while ((peer= li++)) -+ { -+ table_map curmap= peer->nested_join -+ ? peer->nested_join->used_tables -+ : peer->get_map(); -+ peers|= curmap; -+ } -+ /* -+ If prev_table doesn't depend on any of its peers, add a -+ dependency on nearest peer, that is, on 'table'. -+ */ -+ if (!(prev_on_expr_deps & peers)) -+ prev_table->dep_tables|= used_tables; -+ } -+ } - } - } - prev_table= table; --- -2.45.3 - diff --git a/SPECS/mariadb/mariadb.signatures.json b/SPECS/mariadb/mariadb.signatures.json index 2481d68eed7..0ef27ae00fd 100644 --- a/SPECS/mariadb/mariadb.signatures.json +++ b/SPECS/mariadb/mariadb.signatures.json @@ -4,7 +4,7 @@ "README.mariadb-docs": "c3c6584dbdc35445014ac48023da59cafc5abc6996859cebb4e357c2f380990f", "README.wsrep_sst_rsync_tunnel": "f121b2f6e804a8aaf01e0c835e62b64a0d0bf6cd922cc1a21897f196f8b0714f", "clustercheck.sh": "4be47a46f99b714bc3681fdf11b09d242dae5e3eb81274b3040a73f9d7800d50", - "mariadb-10.11.11.tar.gz": "14cc0d9d9a7a330231d9ed91ac28f29b502d2f1e7021d81c940280db52bac812", + "mariadb-10.11.15.tar.gz": "6190529d9d047163259967a92095b505df15b39195ea55cdf856314eef4546f5", "mariadb-check-socket.sh": "6d04410549275140f07b89a1dcef99f31cd47751ef9142d14e7898e7cbcff023", "mariadb-check-upgrade.sh": "e49c23e79155d416f7bad292d073213c0beafed99c172a06d909ec3e24ee6e75", "mariadb-prepare-db-dir.sh": "ff8d2e719f6db158eda0acb58a9d84b43c959baf0d2a8f4d9ce7a62f13af36d0", @@ -21,4 +21,4 @@ "rh-skipped-tests-s390.list": "5e826f9f3cc920c0fe67434fd32b25a205d6a8530552e998edb376c4661b59f3", "wsrep_sst_rsync_tunnel": "5194ed1971d0afe8d2836c1d143263f6891311c9ac0fae536b866f2a885d056e" } -} +} \ No newline at end of file diff --git a/SPECS/mariadb/mariadb.spec b/SPECS/mariadb/mariadb.spec index 2c115a681f9..ad8e7bcf0b7 100644 --- a/SPECS/mariadb/mariadb.spec +++ b/SPECS/mariadb/mariadb.spec @@ -1,9 +1,8 @@ # Plain package name for cases, where %%{name} differs (e.g. for versioned packages) %global majorname mariadb -%define package_version 10.11.11 +%define package_version 10.11.15 %define majorversion %(echo %{package_version} | cut -d'.' -f1-2 ) - %define _vpath_builddir . # Set if this package will be the default one in distribution @@ -201,7 +200,6 @@ Patch12: rocksdb-6.8-gcc13.patch Patch13: %{majorname}-libfmt.patch # Patch14: make MTR port calculation reasonably predictable Patch14: %{majorname}-mtr.patch -Patch15: CVE-2023-52971.patch %global pkgname %{majorname} @@ -823,7 +821,6 @@ rm -r storage/rocksdb/ %endif %patch -P14 -p1 -%patch -P15 -p1 # generate a list of tests that fail, but are not disabled by upstream cat %{SOURCE50} | tee -a mysql-test/unstable-tests @@ -1772,6 +1769,9 @@ fi %endif %changelog +* Mon Dec 29 2025 BinduSri Adabala - 10.11.15-1 +- Upgrade to 10.11.15 for CVE-2025-13699 + * Fri Apr 04 2025 Mayank Singh - 10.11.11-1 - Initial Azure Linux import from Fedora 42 (license: MIT). - License verified diff --git a/SPECS/memcached/CVE-2026-24809.patch b/SPECS/memcached/CVE-2026-24809.patch new file mode 100644 index 00000000000..eda2322a150 --- /dev/null +++ b/SPECS/memcached/CVE-2026-24809.patch @@ -0,0 +1,57 @@ +From 3bb38d48f5e04d56ff7e08391dd17f902e7828ff Mon Sep 17 00:00:00 2001 +From: npt-1707 +Date: Mon, 21 Apr 2025 23:05:53 +0800 +Subject: [PATCH] Save stack space while handling errors + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/praydog/REFramework/pull/1320.patch +--- + vendor/lua/src/ldebug.c | 5 ++++- + vendor/lua/src/lvm.c | 6 ++++-- + 2 files changed, 8 insertions(+), 3 deletions(-) + +diff --git a/vendor/lua/src/ldebug.c b/vendor/lua/src/ldebug.c +index 1feaab2..5524fae 100644 +--- a/vendor/lua/src/ldebug.c ++++ b/vendor/lua/src/ldebug.c +@@ -783,8 +783,11 @@ l_noret luaG_runerror (lua_State *L, const char *fmt, ...) { + va_start(argp, fmt); + msg = luaO_pushvfstring(L, fmt, argp); /* format message */ + va_end(argp); +- if (isLua(ci)) /* if Lua function, add source:line information */ ++ if (isLua(ci)) { /* if Lua function, add source:line information */ + luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci)); ++ setobjs2s(L, L->top - 2, L->top - 1); /* remove 'msg' from the stack */ ++ L->top--; ++ } + luaG_errormsg(L); + } + +diff --git a/vendor/lua/src/lvm.c b/vendor/lua/src/lvm.c +index c9729bc..51b9614 100644 +--- a/vendor/lua/src/lvm.c ++++ b/vendor/lua/src/lvm.c +@@ -656,8 +656,10 @@ void luaV_concat (lua_State *L, int total) { + /* collect total length and number of strings */ + for (n = 1; n < total && tostring(L, s2v(top - n - 1)); n++) { + size_t l = vslen(s2v(top - n - 1)); +- if (l_unlikely(l >= (MAX_SIZE/sizeof(char)) - tl)) ++ if (l_unlikely(l >= (MAX_SIZE/sizeof(char)) - tl)) { ++ L->top = top - total; /* pop strings to avoid wasting stack */ + luaG_runerror(L, "string length overflow"); ++ } + tl += l; + } + if (tl <= LUAI_MAXSHORTLEN) { /* is result a short string? */ +@@ -672,7 +674,7 @@ void luaV_concat (lua_State *L, int total) { + setsvalue2s(L, top - n, ts); /* create result */ + } + total -= n-1; /* got 'n' strings to create 1 new */ +- L->top -= n-1; /* popped 'n' strings and pushed one */ ++ L->top = top - (n - 1); /* popped 'n' strings and pushed one */ + } while (total > 1); /* repeat until only 1 result left */ + } + +-- +2.45.4 + diff --git a/SPECS/memcached/memcached.spec b/SPECS/memcached/memcached.spec index e5b5d3d6514..daa26801925 100644 --- a/SPECS/memcached/memcached.spec +++ b/SPECS/memcached/memcached.spec @@ -7,7 +7,7 @@ Summary: High Performance, Distributed Memory Object Cache Name: memcached Version: 1.6.27 -Release: 3%{?dist} +Release: 4%{?dist} License: BSD Vendor: Microsoft Corporation Distribution: Azure Linux @@ -17,6 +17,7 @@ Source1: memcached.sysconfig Patch0: memcached-unit.patch Patch1: CVE-2021-43519.patch Patch2: CVE-2021-44647.patch +Patch3: CVE-2026-24809.patch BuildRequires: gcc BuildRequires: libevent-devel BuildRequires: systemd-devel @@ -131,6 +132,9 @@ exit 0 %{_unitdir}/memcached.service %changelog +* Thu Jan 29 2026 Azure Linux Security Servicing Account - 1.6.27-4 +- Patch for CVE-2026-24809 + * Thu Mar 20 2025 Jyoti Kanase - 1.6.27-3 - Fix CVE-2023-6228 diff --git a/SPECS/mft_kernel-hwe/mft_kernel-hwe.signatures.json b/SPECS/mft_kernel-hwe/mft_kernel-hwe.signatures.json index 71b70dfe5ed..53ce0b64e3d 100644 --- a/SPECS/mft_kernel-hwe/mft_kernel-hwe.signatures.json +++ b/SPECS/mft_kernel-hwe/mft_kernel-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "kernel-mft-4.30.0.tgz": "9f882bd84a3345c1270e0d05233df66bffc5cb506484916bb15c80da50f41fe6" + "kernel-mft-4.33.0.tgz": "7b6ecb25695fed61d3a9443ae73e775b24921dc7e0df5671147ff5871f8c195b" } } \ No newline at end of file diff --git a/SPECS/mft_kernel-hwe/mft_kernel-hwe.spec b/SPECS/mft_kernel-hwe/mft_kernel-hwe.spec index 0130e712c92..670360f045e 100644 --- a/SPECS/mft_kernel-hwe/mft_kernel-hwe.spec +++ b/SPECS/mft_kernel-hwe/mft_kernel-hwe.spec @@ -1,8 +1,7 @@ - %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -29,21 +28,23 @@ # take path to kernel sources if provided, otherwise look in default location (for non KMP rpms). %{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} -%{!?version: %global version 4.30.0} -%{!?_release: %global _release 1} +%{!?version: %global version 4.33.0} +%{!?_release: %global _release 2} %global _kmp_rel %{_release}%{?_kmp_build_num}%{?_dist} Name: mft_kernel-hwe Summary: %{name} Kernel Module for the %{KVERSION} kernel -Version: 4.30.0 -Release: 24%{release_suffix}%{?dist} +Version: 4.33.0 +Release: 2%{release_suffix}%{?dist} License: Dual BSD/GPLv2 Group: System Environment/Kernel BuildRoot: /var/tmp/%{name}-%{version}-build -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/kernel-mft-4.30.0.tgz#/kernel-mft-%{version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/kernel-mft-%{version}.tgz Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 BuildRequires: gcc BuildRequires: make @@ -212,6 +213,14 @@ find %{buildroot} -type f -name \*.ko -exec %{__strip} -p --strip-debug --discar %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 4.33.0-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 4.33.0-1_6.12.57.1.1 +- Upgrade version to 4.33.0. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 4.30.0-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/mft_kernel/mft_kernel.signatures.json b/SPECS/mft_kernel/mft_kernel.signatures.json index 71b70dfe5ed..53ce0b64e3d 100644 --- a/SPECS/mft_kernel/mft_kernel.signatures.json +++ b/SPECS/mft_kernel/mft_kernel.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "kernel-mft-4.30.0.tgz": "9f882bd84a3345c1270e0d05233df66bffc5cb506484916bb15c80da50f41fe6" + "kernel-mft-4.33.0.tgz": "7b6ecb25695fed61d3a9443ae73e775b24921dc7e0df5671147ff5871f8c195b" } } \ No newline at end of file diff --git a/SPECS/mft_kernel/mft_kernel.spec b/SPECS/mft_kernel/mft_kernel.spec index 0b02d74a1ed..b15ed380b55 100644 --- a/SPECS/mft_kernel/mft_kernel.spec +++ b/SPECS/mft_kernel/mft_kernel.spec @@ -28,18 +28,21 @@ # take path to kernel sources if provided, otherwise look in default location (for non KMP rpms). %{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} -%{!?version: %global version 4.30.0} +%{!?version: %global version 4.33.0} %{!?_release: %global _release 1} %global _kmp_rel %{_release}%{?_kmp_build_num}%{?_dist} Name: mft_kernel Summary: %{name} Kernel Module for the %{KVERSION} kernel -Version: 4.30.0 -Release: 20%{release_suffix}%{?dist} +Version: 4.33.0 +Release: 1%{release_suffix}%{?dist} License: Dual BSD/GPLv2 Group: System Environment/Kernel BuildRoot: /var/tmp/%{name}-%{version}-build -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/kernel-mft-4.30.0.tgz#/kernel-mft-%{version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/kernel-mft-%{version}.tgz Vendor: Microsoft Corporation Distribution: Azure Linux ExclusiveArch: x86_64 @@ -227,6 +230,10 @@ find %{buildroot} -type f -name \*.ko -exec %{__strip} -p --strip-debug --discar %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 4.33.0-1 +- Upgrade version to 4.33.0. +- Update source path + * Thu May 29 2025 Nicolas Guibourge - 4.30.0-20 - Add kernel version and release nb into release nb diff --git a/SPECS/mlnx-ethtool/mlnx-ethtool.signatures.json b/SPECS/mlnx-ethtool/mlnx-ethtool.signatures.json index eda2cb8014a..687ee71cc11 100644 --- a/SPECS/mlnx-ethtool/mlnx-ethtool.signatures.json +++ b/SPECS/mlnx-ethtool/mlnx-ethtool.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-ethtool-6.9.tar.gz": "927834fe390dda259484cebfa033c962eadcc293422817aa442847ce167b919a" + "mlnx-ethtool-6.14.tar.gz": "44a729f1ce2cf42cc84b46d9579facd522e72b48f1e946a633fc9dfe80f5a26b" } } \ No newline at end of file diff --git a/SPECS/mlnx-ethtool/mlnx-ethtool.spec b/SPECS/mlnx-ethtool/mlnx-ethtool.spec index acb954a58f9..4a1a3828435 100644 --- a/SPECS/mlnx-ethtool/mlnx-ethtool.spec +++ b/SPECS/mlnx-ethtool/mlnx-ethtool.spec @@ -1,6 +1,6 @@ Name: mlnx-ethtool -Version: 6.9 -Release: 3%{?dist} +Version: 6.14 +Release: 1%{?dist} Group: Utilities Summary: Settings tool for Ethernet and other network devices License: GPLv2 @@ -8,10 +8,17 @@ Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://ftp.kernel.org/pub/software/network/ethtool/ Buildroot: /var/tmp/%{name}-%{version}-build -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ethtool-6.9.tar.gz#/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz BuildRequires: libmnl-devel +Provides: ethtool +# To avoid file conflicts +Conflicts: ethtool + %description This utility allows querying and changing settings such as speed, port, auto-negotiation, PCI locations and checksum offload on many @@ -35,11 +42,21 @@ make install DESTDIR=${RPM_BUILD_ROOT} %{_sbindir}/ethtool %{_mandir}/man8/ethtool.8* %{_datadir}/bash-completion/completions/ethtool +%{_datadir}/metainfo/org.kernel.software.network.ethtool.metainfo.xml %doc AUTHORS NEWS README %license COPYING %changelog +* Thu Dec 11 2025 Suresh Babu Chalamalasetty - 6.14-1 +- Upgrade version to 6.14. +- Update source path +* Tue Nov 11 2025 Mayank Singh - 6.9-4 +- Updated dependency handling for kexec-tools: + Changed from hard dependency on a single package. + Allows installation to satisfy dependency with either `ethtool` or `mlnx-ethtool`. + Ensures flexibility for image builds and user choice at install time. + Added mutual exclusivity between providers to prevent file conflicts. * Mon Sep 15 2025 Elaheh Dehghani - 6.9-3 - Enable ARM64 build by removing ExclusiveArch * Tue Dec 17 2024 Binu Jose Philip - 6.9-2 diff --git a/SPECS/mlnx-iproute2/mlnx-iproute2.signatures.json b/SPECS/mlnx-iproute2/mlnx-iproute2.signatures.json index 7a3487a6604..b264ea852c7 100644 --- a/SPECS/mlnx-iproute2/mlnx-iproute2.signatures.json +++ b/SPECS/mlnx-iproute2/mlnx-iproute2.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-iproute2-6.10.0.tar.gz": "f8333badc404ccd6e79eb29d2761a4f53db122eb86ac4193c65fdb10d6f916f1" + "mlnx-iproute2-6.15.0.tar.gz": "e4132919727d09a9d77c7f1709212ca3f1593775970293a6e258762fcff4a97a" } } \ No newline at end of file diff --git a/SPECS/mlnx-iproute2/mlnx-iproute2.spec b/SPECS/mlnx-iproute2/mlnx-iproute2.spec index a205128e61b..105b236f6a6 100644 --- a/SPECS/mlnx-iproute2/mlnx-iproute2.spec +++ b/SPECS/mlnx-iproute2/mlnx-iproute2.spec @@ -9,7 +9,7 @@ %global _prefix /opt/mellanox/iproute2 %global _exec_prefix %{_prefix} %global package_name mlnx-iproute2 -%global package_version 6.10.0 +%global package_version 6.15.0 %global configs_under_prefix 1 %global netns_package_name netns-mlnx @@ -32,20 +32,23 @@ %global netns_config_dir %{_sysconfdir}/%{netns_package_name} %endif -Summary: Advanced IP routing and network device configuration tools -Name: mlnx-iproute2 -Version: 6.10.0 -Release: 2%{?dist} -License: GPLv2 -Group: Networking/Admin +Summary: Advanced IP routing and network device configuration tools +Name: mlnx-iproute2 +Version: 6.15.0 +Release: 1%{?dist} +License: GPLv2 +Group: Networking/Admin Vendor: Microsoft Corporation Distribution: Azure Linux -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-iproute2-6.10.0.tar.gz#/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz URL: http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2 -ExclusiveArch: x86_64 BuildRequires: bison BuildRequires: flex +BuildRequires: libmnl-devel BuildRoot: /var/tmp/%{name}-%{version}-build %description @@ -111,6 +114,13 @@ rm -rf $RPM_BUILD_ROOT %{_sbindir}/* %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 6.15.0-1 +- Upgrade version to 6.15.0. +- Update source path + +* Wed Oct 29 2025 Elaheh Dehghani - 6.10.0-3 +- Enable build on aarch64. + * Tue Dec 17 2024 Binu Jose Philip - Initial Azure Linux import from NVIDIA (license: GPLv2) - License verified diff --git a/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.signatures.json b/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.signatures.json index c4fe2cb95e4..5b178d76d18 100644 --- a/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.signatures.json +++ b/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-nfsrdma-24.10.tgz": "d2e66a9b6d6e40e621728ea25fa10b4b9ccd5ea3952fdaf2546e9420687a648c" + "mlnx-nfsrdma-25.07.tgz": "f320b79189d253c862f75a06a8c3bc01b9e4c43686693e78e3678b1bfa82b664" } } \ No newline at end of file diff --git a/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.spec b/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.spec index 93f6499da8a..e79afd41543 100644 --- a/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.spec +++ b/SPECS/mlnx-nfsrdma-hwe/mlnx-nfsrdma-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -41,9 +41,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name mlnx-nfsrdma-hwe} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-24%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-2%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -67,16 +67,18 @@ Summary: %{_name} Driver Name: mlnx-nfsrdma-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-nfsrdma-24.10.tgz#/mlnx-nfsrdma-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/mlnx-nfsrdma-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 BuildRequires: gcc BuildRequires: make @@ -250,6 +252,14 @@ fi %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.signatures.json b/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.signatures.json index c4fe2cb95e4..5b178d76d18 100644 --- a/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.signatures.json +++ b/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-nfsrdma-24.10.tgz": "d2e66a9b6d6e40e621728ea25fa10b4b9ccd5ea3952fdaf2546e9420687a648c" + "mlnx-nfsrdma-25.07.tgz": "f320b79189d253c862f75a06a8c3bc01b9e4c43686693e78e3678b1bfa82b664" } } \ No newline at end of file diff --git a/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.spec b/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.spec index f8eca955e7d..c55b4fe59d9 100644 --- a/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.spec +++ b/SPECS/mlnx-nfsrdma/mlnx-nfsrdma.spec @@ -40,9 +40,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name mlnx-nfsrdma} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-21%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-1%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -66,12 +66,15 @@ Summary: %{_name} Driver Name: mlnx-nfsrdma -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-nfsrdma-24.10.tgz#/%{_name}-%{_version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{_name}-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux @@ -250,6 +253,10 @@ fi %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS/mlnx-ofa_kernel-hwe/001-fix-module-init-for-ibt.patch b/SPECS/mlnx-ofa_kernel-hwe/001-fix-module-init-for-ibt.patch deleted file mode 100644 index 3914f862473..00000000000 --- a/SPECS/mlnx-ofa_kernel-hwe/001-fix-module-init-for-ibt.patch +++ /dev/null @@ -1,20 +0,0 @@ -# This patch is based on -# https://opendev.org/starlingx/kernel/raw/commit/b6f8503866c3cae009d5b2e8e8db17149c917d9c/kernel-modules/mlnx-ofa_kernel/debian/patches/0003-Fix-the-obsolete-module-init.patch -# modified to apply cleanly with no change in logic. - ---- a/include/linux/compat-2.6.h -+++ b/include/linux/compat-2.6.h -@@ -31,9 +31,11 @@ - #define module_init(initfn) \ - static int __init __init_backport(void) \ - { \ -- mlx_backport_dependency_symbol(); \ -+ mlx_backport_dependency_symbol(); \ - return initfn(); \ - } \ -- int init_module(void) __copy(initfn) __attribute__((alias("__init_backport"))); -+ int init_module(void) __copy(initfn) \ -+ __attribute__((alias("__init_backport"))); \ -+ ___ADDRESSABLE(init_module, __initdata); - - #endif /* LINUX_26_COMPAT_H */ diff --git a/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.signatures.json b/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.signatures.json index 1bc6c70886f..6b8f7af1459 100644 --- a/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.signatures.json +++ b/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-ofa_kernel-24.10.tgz": "571588614a9f10409078703252357f69760577909191a3089b95258185b49d2f" + "mlnx-ofa_kernel-25.07.tgz": "1eb4ab97fb48e4db8df43f75efbba43cebd5126ddadd7bceb0fb7de5218951f0" } } \ No newline at end of file diff --git a/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.spec b/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.spec index 5c1db0559f0..546c9b88e00 100644 --- a/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.spec +++ b/SPECS/mlnx-ofa_kernel-hwe/mlnx-ofa_kernel-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -85,9 +85,10 @@ %global base_name mlnx-ofa_kernel %{!?_name: %global _name %{base_name}-hwe} -%{!?_version: %global _version 24.10} -%{!?_release: %global _release OFED.24.10.0.7.0.1} +%{!?_version: %global _version 25.07} +%{!?_release: %global _release OFED.25.07.0.9.7.1} %global _kmp_rel %{_release}%{?_kmp_build_num}%{?_dist} +%global MLNX_OFA_DRV_SRC 24.10-0.7.0 %global utils_pname %{name} %global devel_pname %{name}-devel @@ -100,18 +101,19 @@ Summary: Infiniband HCA Driver Name: mlnx-ofa_kernel-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com/ Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ofa_kernel-24.10.tgz#/mlnx-ofa_kernel-%{_version}.tgz -Patch0: 001-fix-module-init-for-ibt.patch +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/mlnx-ofa_kernel-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 Obsoletes: kernel-ib Obsoletes: mlnx-en @@ -139,7 +141,7 @@ BuildRequires: /usr/bin/perl %description InfiniBand "verbs", Access Layer and ULPs. Utilities rpm. -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-%{MLNX_OFA_DRV_SRC}.tgz # build KMP rpms? @@ -176,6 +178,9 @@ Obsoletes: mlnx-en-doc Obsoletes: mlnx-en-debuginfo Obsoletes: mlnx-en-sources Obsoletes: mlnx-rdma-rxe +Obsoletes: fwctl-hwe <= 24.10 +Provides: fwctl-hwe = %{version}-%{release} + Summary: Infiniband Driver and ULPs kernel modules Group: System Environment/Libraries @@ -197,7 +202,7 @@ Conflicts: mlnx-ofa_kernel-modules %description -n %{non_kmp_pname} Core, HW and ULPs kernel modules Non-KMP format kernel modules rpm. -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-%{MLNX_OFA_DRV_SRC}.tgz %endif #end if "%{KMP}" == "1" %package -n %{devel_pname} @@ -221,7 +226,7 @@ Summary: Infiniband Driver and ULPs kernel modules sources Group: System Environment/Libraries %description -n %{devel_pname} Core, HW and ULPs kernel modules sources -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/ # # setup module sign scripts if paths to the keys are given @@ -278,7 +283,7 @@ The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-o %prep %setup -n mlnx-ofa_kernel-%{_version} -%patch 0 -p1 + set -- * mkdir source mv "$@" source/ @@ -444,6 +449,14 @@ update-alternatives --remove \ %{_prefix}/src/ofa_kernel/%{_arch}/[0-9]* %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/mlnx-ofa_kernel/001-fix-module-init-for-ibt.patch b/SPECS/mlnx-ofa_kernel/001-fix-module-init-for-ibt.patch deleted file mode 100644 index 3914f862473..00000000000 --- a/SPECS/mlnx-ofa_kernel/001-fix-module-init-for-ibt.patch +++ /dev/null @@ -1,20 +0,0 @@ -# This patch is based on -# https://opendev.org/starlingx/kernel/raw/commit/b6f8503866c3cae009d5b2e8e8db17149c917d9c/kernel-modules/mlnx-ofa_kernel/debian/patches/0003-Fix-the-obsolete-module-init.patch -# modified to apply cleanly with no change in logic. - ---- a/include/linux/compat-2.6.h -+++ b/include/linux/compat-2.6.h -@@ -31,9 +31,11 @@ - #define module_init(initfn) \ - static int __init __init_backport(void) \ - { \ -- mlx_backport_dependency_symbol(); \ -+ mlx_backport_dependency_symbol(); \ - return initfn(); \ - } \ -- int init_module(void) __copy(initfn) __attribute__((alias("__init_backport"))); -+ int init_module(void) __copy(initfn) \ -+ __attribute__((alias("__init_backport"))); \ -+ ___ADDRESSABLE(init_module, __initdata); - - #endif /* LINUX_26_COMPAT_H */ diff --git a/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.signatures.json b/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.signatures.json index 1bc6c70886f..6b8f7af1459 100644 --- a/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.signatures.json +++ b/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-ofa_kernel-24.10.tgz": "571588614a9f10409078703252357f69760577909191a3089b95258185b49d2f" + "mlnx-ofa_kernel-25.07.tgz": "1eb4ab97fb48e4db8df43f75efbba43cebd5126ddadd7bceb0fb7de5218951f0" } } \ No newline at end of file diff --git a/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.spec b/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.spec index f8346325c15..77d5bd244dd 100644 --- a/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.spec +++ b/SPECS/mlnx-ofa_kernel/mlnx-ofa_kernel.spec @@ -90,9 +90,10 @@ %{!?KERNEL_SOURCES: %global KERNEL_SOURCES /lib/modules/%{KVERSION}/source} %{!?_name: %global _name mlnx-ofa_kernel} -%{!?_version: %global _version 24.10} -%{!?_release: %global _release OFED.24.10.0.7.0.1} +%{!?_version: %global _version 25.07} +%{!?_release: %global _release OFED.25.07.0.9.7.1} %global _kmp_rel %{_release}%{?_kmp_build_num}%{?_dist} +%global MLNX_OFA_DRV_SRC 24.10-0.7.0 %global utils_pname %{_name} %global devel_pname %{_name}-devel @@ -105,13 +106,15 @@ Summary: Infiniband HCA Driver Name: mlnx-ofa_kernel -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com/ Group: System Environment/Base -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ofa_kernel-24.10.tgz#/%{_name}-%{_version}.tgz -Patch0: 001-fix-module-init-for-ibt.patch +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{_name}-%{_version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation @@ -158,7 +161,7 @@ BuildRequires: /usr/bin/perl %description InfiniBand "verbs", Access Layer and ULPs. Utilities rpm. -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-%{MLNX_OFA_DRV_SRC}.tgz # build KMP rpms? @@ -197,6 +200,9 @@ Obsoletes: mlnx-en-doc Obsoletes: mlnx-en-debuginfo Obsoletes: mlnx-en-sources Obsoletes: mlnx-rdma-rxe +Obsoletes: fwctl <= 24.10 +Provides: fwctl = %{version}-%{release} + Summary: Infiniband Driver and ULPs kernel modules Group: System Environment/Libraries @@ -205,7 +211,7 @@ Requires: kernel = %{target_kernel_version_full} %description -n %{non_kmp_pname} Core, HW and ULPs kernel modules Non-KMP format kernel modules rpm. -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-%{MLNX_OFA_DRV_SRC}.tgz %endif %endif #end if "%{KMP}" == "1" @@ -230,7 +236,7 @@ Summary: Infiniband Driver and ULPs kernel modules sources Group: System Environment/Libraries %description -n %{devel_pname} Core, HW and ULPs kernel modules sources -The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-24.10-0.7.0.tgz +The driver sources are located at: http://www.mellanox.com/downloads/ofed/mlnx-ofa_kernel-%{MLNX_OFA_DRV_SRC}.tgz %package source Summary: Source of the MLNX_OFED main kernel driver @@ -297,7 +303,6 @@ drivers against it. %prep %setup -n %{_name}-%{_version} -%patch 0 -p1 set -- * mkdir source mv "$@" source/ @@ -716,6 +721,8 @@ update-alternatives --remove \ /lib/udev/auxdev-sf-netdev-rename /usr/sbin/setup_mr_cache.sh %_datadir/mlnx_ofed/mlnx_bf_assign_ct_cores.sh +%_datadir/mlnx_ofed/mlnx_drv_ctl +%_datadir/mlnx_ofed/mod_load_funcs %config(noreplace) /etc/modprobe.d/mlnx.conf %config(noreplace) /etc/modprobe.d/mlnx-bf.conf %{_sbindir}/* @@ -759,6 +766,10 @@ update-alternatives --remove \ %{_prefix}/src/mlnx-ofa_kernel-%version %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Adjusted package dependencies on user space components. diff --git a/SPECS/mlnx-tools/mlnx-tools.signatures.json b/SPECS/mlnx-tools/mlnx-tools.signatures.json index 5efcf940176..c33706f4123 100644 --- a/SPECS/mlnx-tools/mlnx-tools.signatures.json +++ b/SPECS/mlnx-tools/mlnx-tools.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mlnx-tools-24.10.tar.gz": "7459a03eebc0c7f939ff9356dc0bcca4ad64120f957623f03f74523d62ee392d" + "mlnx-tools-25.07.tar.gz": "c0185bdf81fe1279f013fee782c8140586599dc305bc6aaa96d111888eb86db2" } } diff --git a/SPECS/mlnx-tools/mlnx-tools.spec b/SPECS/mlnx-tools/mlnx-tools.spec index adfc50da576..f86b25dc4bf 100644 --- a/SPECS/mlnx-tools/mlnx-tools.spec +++ b/SPECS/mlnx-tools/mlnx-tools.spec @@ -26,18 +26,21 @@ # # -%global MLNX_OFED_VERSION 24.10-0.7.0.0 +%global MLNX_OFED_VERSION 25.07.0.9.7.1 Summary: Mellanox userland tools and scripts Name: mlnx-tools -Version: 24.10 +Version: 25.07 Release: 1%{?dist} License: GPLv2 or BSD Vendor: Microsoft Corporation Distribution: Azure Linux Group: Applications/System URL: https://github.com/Mellanox/mlnx-tools -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/%{MLNX_OFED_VERSION}/SRPMS/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz Obsoletes: mlnx-ofa_kernel < 5.4 Obsoletes: mlnx_en-utils < 5.4 @@ -103,6 +106,7 @@ rm -rf %{buildroot} /sbin/sysctl_perf_tuning /sbin/mlnx_bf_configure /sbin/mlnx-sf +/sbin/doca-hugepages %{_sbindir}/* %{_bindir}/* %{_mandir}/man8/*.8* @@ -112,6 +116,10 @@ rm -rf %{buildroot} /lib/udev/mlnx_bf_udev %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Thu Jan 09 2025 Alberto David Perez Guevara 24.10-1 - Upgrade package to version 24.10 diff --git a/SPECS/mysql/mysql.signatures.json b/SPECS/mysql/mysql.signatures.json index 3a961ff8ce0..dfbe0c936dd 100644 --- a/SPECS/mysql/mysql.signatures.json +++ b/SPECS/mysql/mysql.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "mysql-boost-8.0.44.tar.gz": "a8cc09a35af63668c5235cf282aef789428c6f30c1d9a581b337c816ce8ce8bb" + "mysql-boost-8.0.45.tar.gz": "f679707d05f0c2b61e9b14961302e7f540c23e9e5e2bffd8ad9193599e295cee" } } \ No newline at end of file diff --git a/SPECS/mysql/mysql.spec b/SPECS/mysql/mysql.spec index 1389ff98986..25fc0bd8371 100644 --- a/SPECS/mysql/mysql.spec +++ b/SPECS/mysql/mysql.spec @@ -2,8 +2,8 @@ Summary: MySQL. Name: mysql -Version: 8.0.44 -Release: 2%{?dist} +Version: 8.0.45 +Release: 1%{?dist} License: GPLv2 with exceptions AND LGPLv2 AND BSD Vendor: Microsoft Corporation Distribution: Azure Linux @@ -114,6 +114,10 @@ sudo -u test ctest || { cat Testing/Temporary/LastTest.log || echo 'No log found %{_libdir}/pkgconfig/mysqlclient.pc %changelog +* Wed Jan 21 2026 Kanishk Bansal - 8.0.45-1 +- Upgrade to 8.0.45 for CVE-2026-21948, CVE-2026-21968, + CVE-2026-21941, CVE-2026-21964, CVE-2026-21936, CVE-2026-21937 + * Tue Oct 28 2025 Azure Linux Security Servicing Account - 8.0.44-2 - Patch for CVE-2025-62813 diff --git a/SPECS/net-snmp/net-snmp.signatures.json b/SPECS/net-snmp/net-snmp.signatures.json index b66781eb87d..0b9b16050aa 100644 --- a/SPECS/net-snmp/net-snmp.signatures.json +++ b/SPECS/net-snmp/net-snmp.signatures.json @@ -2,6 +2,6 @@ "Signatures": { "snmpd.service": "5e17bf9f66f2b77e1a6c6dff7356cecb8ed488ce3df361738a72b4436096b694", "snmptrapd.service": "ef3e3dbe80c8ab455b30cd83db23db136263c1295ce2f23dcc4a1a1b60799229", - "net-snmp-5.9.4.tar.gz": "8b4de01391e74e3c7014beb43961a2d6d6fa03acc34280b9585f4930745b0544" + "net-snmp-5.9.5.2.tar.gz": "16707719f833184a4b72835dac359ae188123b06b5e42817c00790d7dc1384bf" } } diff --git a/SPECS/net-snmp/net-snmp.spec b/SPECS/net-snmp/net-snmp.spec index 5b22ac7bc3e..e82dd62a5ea 100644 --- a/SPECS/net-snmp/net-snmp.spec +++ b/SPECS/net-snmp/net-snmp.spec @@ -1,7 +1,7 @@ %global __requires_exclude perl\\(.*\\) Summary: Net-SNMP is a suite of applications used to implement SNMP v1, SNMP v2c and SNMP v3 using both IPv4 and IPv6. Name: net-snmp -Version: 5.9.4 +Version: 5.9.5.2 Release: 1%{?dist} License: MIT Vendor: Microsoft Corporation @@ -80,6 +80,7 @@ install -d %{buildroot}%{_localstatedir}/lib/net-snmp install -d %{buildroot}%{_localstatedir}/lib/net-snmp/mib_indexes install -d %{buildroot}%{_localstatedir}/lib/net-snmp/cert_indexes install -d %{buildroot}%{_localstatedir}/run/net-snmp +rm -rf %{buildroot}/usr/share/licenses/net-snmp-devel %check pushd testing @@ -102,6 +103,7 @@ popd /sbin/* %files devel +%exclude /usr/share/licenses/net-snmp/COPYING %defattr(-,root,root) %{_datadir}/* %{_includedir}/* @@ -109,24 +111,25 @@ popd %{perl_vendorarch}/* %{_libdir}/*.so %exclude %{_libdir}/perl5/perllocal.pod +%exclude /usr/share/licenses/net-snmp/COPYING %files libs %license COPYING %doc README FAQ NEWS TODO %{_libdir}/*.so.* -%{_datadir}/snmp -%{_datadir}/snmp/mibs %{_datadir}/snmp/mibs/* -%{_localstatedir}/lib/net-snmp %{_localstatedir}/lib/net-snmp/mib_indexes %{_localstatedir}/lib/net-snmp/cert_indexes %{_localstatedir}/run/net-snmp %changelog +* Mon Dec 29 2025 Archana Shettigar - 5.9.5.2-1 +- Auto-upgrade to 5.9.5.2 - for CVE-2025-68615 + * Wed Feb 14 2024 CBL-Mariner Servicing Account - 5.9.4-1 - Auto-upgrade to 5.9.4 - none -* Fri Apr 07 2022 Minghe Ren - 5.9.1-2 +* Thu Apr 07 2022 Minghe Ren - 5.9.1-2 - Add net-snmp-lib subpackage and UCD-SNMP * Fri Mar 04 2022 Minghe Ren - 5.9.1-1 diff --git a/SPECS/nginx/0001-remove-Werror-in-upstream-build-scripts.patch b/SPECS/nginx/0001-remove-Werror-in-upstream-build-scripts.patch new file mode 100644 index 00000000000..cd3873c1c92 --- /dev/null +++ b/SPECS/nginx/0001-remove-Werror-in-upstream-build-scripts.patch @@ -0,0 +1,30 @@ +From d6baea50cd55bc1ae9df1b8d1327b998ac738e0e Mon Sep 17 00:00:00 2001 +From: Felix Kaechele +Date: Sun, 7 Jun 2020 12:14:02 -0400 +Subject: [PATCH 1/5] remove Werror in upstream build scripts + +removes -Werror in upstream build scripts. -Werror conflicts with +-D_FORTIFY_SOURCE=2 causing warnings to turn into errors. + +Signed-off-by: Felix Kaechele +--- + auto/cc/gcc | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/auto/cc/gcc b/auto/cc/gcc +index a5c5c18fba3f..cdbbadb54023 100644 +--- a/auto/cc/gcc ++++ b/auto/cc/gcc +@@ -166,7 +166,9 @@ esac + + + # stop on warning +-CFLAGS="$CFLAGS -Werror" ++# This combined with Fedora's FORTIFY_SOURCE=2 option causes it nginx ++# to not compile. ++#CFLAGS="$CFLAGS -Werror" + + # debug + CFLAGS="$CFLAGS -g" +-- +2.52.0 diff --git a/SPECS/nginx/0002-fix-PIDFile-handling.patch b/SPECS/nginx/0002-fix-PIDFile-handling.patch new file mode 100644 index 00000000000..6c51db190cb --- /dev/null +++ b/SPECS/nginx/0002-fix-PIDFile-handling.patch @@ -0,0 +1,107 @@ +From 03b18ac401de8df804b1cc455ded06359dae2374 Mon Sep 17 00:00:00 2001 +From: Felix Kaechele +Date: Tue, 20 Apr 2021 21:28:18 -0400 +Subject: [PATCH 2/5] fix PIDFile handling + +Corresponding RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1869026 + +Rejected upstream: https://trac.nginx.org/nginx/ticket/1897 + +Taken from: https://git.launchpad.net/ubuntu/+source/nginx/tree/debian/patches/nginx-fix-pidfile.patch + +From original patch: +Author: Tj +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/nginx/+bug/1581864 +Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=876365 +Last-Update: 2020-06-24 + +Signed-off-by: Felix Kaechele +--- + src/core/nginx.c | 24 +++++++++++++++++++++--- + src/os/unix/ngx_daemon.c | 8 ++++++-- + 2 files changed, 27 insertions(+), 5 deletions(-) + +diff --git a/src/core/nginx.c b/src/core/nginx.c +index 0deb27b7f98a..23edb59ff105 100644 +--- a/src/core/nginx.c ++++ b/src/core/nginx.c +@@ -340,14 +340,21 @@ main(int argc, char *const *argv) + ngx_process = NGX_PROCESS_MASTER; + } + ++ /* tell-tale to detect if this is parent or child process */ ++ ngx_int_t child_pid = NGX_BUSY; ++ + #if !(NGX_WIN32) + + if (ngx_init_signals(cycle->log) != NGX_OK) { + return 1; + } + ++ /* tell-tale that this code has been executed */ ++ child_pid--; ++ + if (!ngx_inherited && ccf->daemon) { +- if (ngx_daemon(cycle->log) != NGX_OK) { ++ child_pid = ngx_daemon(cycle->log); ++ if (child_pid == NGX_ERROR) { + return 1; + } + +@@ -360,8 +367,19 @@ main(int argc, char *const *argv) + + #endif + +- if (ngx_create_pidfile(&ccf->pid, cycle->log) != NGX_OK) { +- return 1; ++ /* If ngx_daemon() returned the child's PID in the parent process ++ * after the fork() set ngx_pid to the child_pid, which gets ++ * written to the PID file, then exit. ++ * For NGX_WIN32 always write the PID file ++ * For others, only write it from the parent process */ ++ if (child_pid < NGX_OK || child_pid > NGX_OK) { ++ ngx_pid = child_pid > NGX_OK ? child_pid : ngx_pid; ++ if (ngx_create_pidfile(&ccf->pid, cycle->log) != NGX_OK) { ++ return 1; ++ } ++ } ++ if (child_pid > NGX_OK) { ++ exit(0); + } + + if (ngx_log_redirect_stderr(cycle) != NGX_OK) { +diff --git a/src/os/unix/ngx_daemon.c b/src/os/unix/ngx_daemon.c +index 385c49b6c3d1..3719854c52b0 100644 +--- a/src/os/unix/ngx_daemon.c ++++ b/src/os/unix/ngx_daemon.c +@@ -7,14 +7,17 @@ + + #include + #include ++#include + + + ngx_int_t + ngx_daemon(ngx_log_t *log) + { + int fd; ++ /* retain the return value for passing back to caller */ ++ pid_t pid_child = fork(); + +- switch (fork()) { ++ switch (pid_child) { + case -1: + ngx_log_error(NGX_LOG_EMERG, log, ngx_errno, "fork() failed"); + return NGX_ERROR; +@@ -23,7 +26,8 @@ ngx_daemon(ngx_log_t *log) + break; + + default: +- exit(0); ++ /* let caller do the exit() */ ++ return pid_child; + } + + ngx_parent = ngx_pid; +-- +2.52.0 diff --git a/SPECS/nginx/0003-Add-SSL-passphrase-dialog.patch b/SPECS/nginx/0003-Add-SSL-passphrase-dialog.patch new file mode 100644 index 00000000000..9966c521054 --- /dev/null +++ b/SPECS/nginx/0003-Add-SSL-passphrase-dialog.patch @@ -0,0 +1,805 @@ +From a9ba90c8c01ab88884c93b655ca22c2ade06e6d2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= +Date: Tue, 8 Jul 2025 15:19:41 +0200 +Subject: [PATCH 3/5] Add SSL passphrase dialog + +--- + contrib/vim/syntax/nginx.vim | 1 + + src/event/ngx_event_openssl.c | 34 ++++++-- + src/event/ngx_event_openssl.h | 16 +++- + src/event/ngx_event_openssl_cache.c | 102 ++++++++++++++++++++++- + src/http/modules/ngx_http_grpc_module.c | 2 +- + src/http/modules/ngx_http_proxy_module.c | 2 +- + src/http/modules/ngx_http_ssl_module.c | 70 +++++++++++++++- + src/http/modules/ngx_http_ssl_module.h | 2 + + src/http/modules/ngx_http_uwsgi_module.c | 2 +- + src/mail/ngx_mail_ssl_module.c | 66 ++++++++++++++- + src/mail/ngx_mail_ssl_module.h | 2 + + src/stream/ngx_stream_proxy_module.c | 2 +- + src/stream/ngx_stream_ssl_module.c | 61 +++++++++++++- + src/stream/ngx_stream_ssl_module.h | 2 + + 14 files changed, 344 insertions(+), 20 deletions(-) + +diff --git a/contrib/vim/syntax/nginx.vim b/contrib/vim/syntax/nginx.vim +index 29eef7a23cd7..e7227eb7f873 100644 +--- a/contrib/vim/syntax/nginx.vim ++++ b/contrib/vim/syntax/nginx.vim +@@ -593,6 +593,7 @@ syn keyword ngxDirective contained ssl_ocsp + syn keyword ngxDirective contained ssl_ocsp_cache + syn keyword ngxDirective contained ssl_ocsp_responder + syn keyword ngxDirective contained ssl_password_file ++syn keyword ngxDirective contained ssl_pass_phrase_dialog + syn keyword ngxDirective contained ssl_prefer_server_ciphers + syn keyword ngxDirective contained ssl_preread + syn keyword ngxDirective contained ssl_protocols +diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c +index defffa583eb5..fb6a5e950504 100644 +--- a/src/event/ngx_event_openssl.c ++++ b/src/event/ngx_event_openssl.c +@@ -419,7 +419,7 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_t protocols, void *data) + + ngx_int_t + ngx_ssl_certificates(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_array_t *certs, +- ngx_array_t *keys, ngx_array_t *passwords) ++ ngx_array_t *keys, ngx_array_t *passwords, ngx_ssl_ppdialog_conf_t *dlg) + { + ngx_str_t *cert, *key; + ngx_uint_t i; +@@ -429,7 +429,7 @@ ngx_ssl_certificates(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_array_t *certs, + + for (i = 0; i < certs->nelts; i++) { + +- if (ngx_ssl_certificate(cf, ssl, &cert[i], &key[i], passwords) ++ if (ngx_ssl_certificate(cf, ssl, &cert[i], &key[i], passwords, dlg) + != NGX_OK) + { + return NGX_ERROR; +@@ -442,7 +442,7 @@ ngx_ssl_certificates(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_array_t *certs, + + ngx_int_t + ngx_ssl_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, +- ngx_str_t *key, ngx_array_t *passwords) ++ ngx_str_t *key, ngx_array_t *passwords, ngx_ssl_ppdialog_conf_t *dlg) + { + char *err; + X509 *x509, **elm; +@@ -450,6 +450,7 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, + EVP_PKEY *pkey; + ngx_uint_t mask; + STACK_OF(X509) *chain; ++ EVP_PKEY *pubkey; + + mask = 0; + elm = NULL; +@@ -457,7 +458,7 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, + retry: + + chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CERT | mask, +- &err, cert, NULL); ++ &err, cert, NULL, dlg); + if (chain == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, +@@ -562,9 +563,24 @@ retry: + + #endif + ++ pubkey = X509_get_pubkey(x509); ++ if (!pubkey) { ++ ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, ++ "X509_get_pubkey() failed"); ++ return NGX_ERROR; ++ } ++ ++ if (dlg) { ++ dlg->cryptosystem = EVP_PKEY_get_base_id(pubkey); ++ } ++ ++ EVP_PKEY_free(pubkey); ++ + pkey = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_PKEY | mask, +- &err, key, passwords); +- if (pkey == NULL) { ++ &err, key, passwords, dlg); ++ if (ngx_test_config) { ++ return NGX_OK; ++ } else if (pkey == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "cannot load certificate key \"%s\": %s", +@@ -751,7 +767,7 @@ ngx_ssl_client_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, + return NGX_ERROR; + } + +- chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CA, &err, cert, NULL); ++ chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CA, &err, cert, NULL, NULL); + if (chain == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, +@@ -849,7 +865,7 @@ ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, + return NGX_ERROR; + } + +- chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CA, &err, cert, NULL); ++ chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CA, &err, cert, NULL, NULL); + if (chain == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, +@@ -905,7 +921,7 @@ ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl) + return NGX_ERROR; + } + +- chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CRL, &err, crl, NULL); ++ chain = ngx_ssl_cache_fetch(cf, NGX_SSL_CACHE_CRL, &err, crl, NULL, NULL); + if (chain == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, +diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h +index fe5107fb6915..43bddc7be736 100644 +--- a/src/event/ngx_event_openssl.h ++++ b/src/event/ngx_event_openssl.h +@@ -100,9 +100,19 @@ + #define SSL_group_to_name(s, nid) NULL + #endif + ++#define NGX_SSL_PASS_PHRASE_ARG_MAX_LEN 255 ++#define NGX_SSL_PASS_PHRASE_DEFAULT_VAL "builtin" ++#define NGX_SSL_SERVER_NULL "undefined" + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; + ++typedef struct ngx_ssl_ppdialog_conf_s ngx_ssl_ppdialog_conf_t; ++ ++struct ngx_ssl_ppdialog_conf_s { ++ ngx_str_t *data; ++ ngx_str_t *server; ++ ngx_int_t cryptosystem; ++}; + + struct ngx_ssl_s { + SSL_CTX *ctx; +@@ -230,9 +240,9 @@ ngx_int_t ngx_ssl_init(ngx_log_t *log); + ngx_int_t ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_t protocols, void *data); + + ngx_int_t ngx_ssl_certificates(ngx_conf_t *cf, ngx_ssl_t *ssl, +- ngx_array_t *certs, ngx_array_t *keys, ngx_array_t *passwords); ++ ngx_array_t *certs, ngx_array_t *keys, ngx_array_t *passwords, ngx_ssl_ppdialog_conf_t *dlg); + ngx_int_t ngx_ssl_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, +- ngx_str_t *cert, ngx_str_t *key, ngx_array_t *passwords); ++ ngx_str_t *cert, ngx_str_t *key, ngx_array_t *passwords, ngx_ssl_ppdialog_conf_t *dlg); + ngx_int_t ngx_ssl_connection_certificate(ngx_connection_t *c, ngx_pool_t *pool, + ngx_str_t *cert, ngx_str_t *key, ngx_ssl_cache_t *cache, + ngx_array_t *passwords); +@@ -261,7 +271,7 @@ ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); + ngx_ssl_cache_t *ngx_ssl_cache_init(ngx_pool_t *pool, ngx_uint_t max, + time_t valid, time_t inactive); + void *ngx_ssl_cache_fetch(ngx_conf_t *cf, ngx_uint_t index, char **err, +- ngx_str_t *path, void *data); ++ ngx_str_t *path, void *data, ngx_ssl_ppdialog_conf_t *dlg); + void *ngx_ssl_cache_connection_fetch(ngx_ssl_cache_t *cache, ngx_pool_t *pool, + ngx_uint_t index, char **err, ngx_str_t *path, void *data); + +diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c +index d5488f89a69b..83ddbbaeb97e 100644 +--- a/src/event/ngx_event_openssl_cache.c ++++ b/src/event/ngx_event_openssl_cache.c +@@ -15,6 +15,7 @@ + + #define NGX_SSL_CACHE_DISABLED (ngx_array_t *) (uintptr_t) -1 + ++#define NGX_PASS_PHRASE_ARG_MAX_LEN 255 + + #define ngx_ssl_cache_get_conf(cycle) \ + (ngx_ssl_cache_t *) ngx_get_conf(cycle->conf_ctx, ngx_openssl_cache_module) +@@ -116,6 +117,13 @@ static void ngx_ssl_cache_node_insert(ngx_rbtree_node_t *temp, + static void ngx_ssl_cache_node_free(ngx_rbtree_t *rbtree, + ngx_ssl_cache_node_t *cn); + ++static int ngx_ssl_cache_read_pstream(const char *cmd, char *buf, ++ ngx_int_t bufsize); ++ ++static int ngx_ssl_cache_pass_phrase_callback(char *buf, int bufsize, ++ int rwflag, void *u); ++ ++static ngx_ssl_ppdialog_conf_t *pass_dlg = NULL; + + static ngx_command_t ngx_openssl_cache_commands[] = { + +@@ -179,7 +187,7 @@ static ngx_ssl_cache_type_t ngx_ssl_cache_types[] = { + + void * + ngx_ssl_cache_fetch(ngx_conf_t *cf, ngx_uint_t index, char **err, +- ngx_str_t *path, void *data) ++ ngx_str_t *path, void *data, ngx_ssl_ppdialog_conf_t *dlg) + { + void *value; + time_t mtime; +@@ -263,7 +271,9 @@ ngx_ssl_cache_fetch(ngx_conf_t *cf, ngx_uint_t index, char **err, + } + + if (value == NULL) { ++ pass_dlg = dlg; + value = type->create(&id, err, &data); ++ pass_dlg = NULL; + + if (value == NULL || data == NGX_SSL_CACHE_DISABLED) { + return value; +@@ -674,6 +684,7 @@ static void * + ngx_ssl_cache_pkey_create(ngx_ssl_cache_key_t *id, char **err, void *data) + { + ngx_array_t **passwords = data; ++ ngx_ssl_ppdialog_conf_t *dlg = pass_dlg; + + BIO *bio; + EVP_PKEY *pkey; +@@ -745,6 +756,22 @@ ngx_ssl_cache_pkey_create(ngx_ssl_cache_key_t *id, char **err, void *data) + tries = 1; + pwd = NULL; + cb = NULL; ++ ++ /** directive format: ssl_pass_phrase_dialog builtin|exec:filepath */ ++ if (dlg && ngx_strncasecmp(dlg->data->data, (u_char *)"exec:", 5) == 0){ ++ pwd = (void *)dlg; ++ cb = ngx_ssl_cache_pass_phrase_callback; ++ cb_data.encrypted = 1; ++ } else { ++ pwd = NULL; ++ cb = NULL; ++ } ++ } ++ ++ /* skip decrypting private keys in config test phase to avoid ++ asking for pass phase twice */ ++ if (ngx_test_config){ ++ return NULL; + } + + for ( ;; ) { +@@ -1170,3 +1197,76 @@ ngx_ssl_cache_node_insert(ngx_rbtree_node_t *temp, + node->right = sentinel; + ngx_rbt_red(node); + } ++ ++static int ++ngx_ssl_cache_read_pstream(const char *cmd, char *buf, ngx_int_t bufsize) ++{ ++ FILE *fp; ++ ngx_int_t i; ++ char c; ++ ++ fp = popen(cmd, "r"); ++ if (fp == NULL) { ++ return -1; ++ } ++ ++ for (i = 0; (c = fgetc(fp)) != EOF && ++ (i < bufsize - 1); i++) { ++ ++ if (c == '\n' || c == '\r'){ ++ break; ++ } ++ ++ buf[i] = c; ++ } ++ buf[i] = '\0'; ++ ++ pclose(fp); ++ ++ return 0; ++} ++ ++static int ++ngx_ssl_cache_pass_phrase_callback(char *buf, int bufsize, int rwflag, void *u) ++{ ++ u_char cmd[NGX_PASS_PHRASE_ARG_MAX_LEN + 1] = {0}; ++ u_char *cmd_end; ++ ngx_ssl_ppdialog_conf_t *dlg = (ngx_ssl_ppdialog_conf_t *)u; ++ ngx_str_t *pass_phrase_dialog = dlg->data; ++ char cryptosystem[4] = {0}; ++ int ret; ++ ++ /* remove exec: str from pass_phrase_dialog */ ++ pass_phrase_dialog->data = pass_phrase_dialog->data + 5; ++ pass_phrase_dialog->len = pass_phrase_dialog->len - 5; ++ ++ switch (dlg->cryptosystem){ ++ case EVP_PKEY_RSA: ++ strncpy(cryptosystem, "RSA", 4); ++ break; ++ case EVP_PKEY_DSA: ++ strncpy(cryptosystem, "DSA", 4); ++ break; ++ case EVP_PKEY_EC: ++ strncpy(cryptosystem, "EC", 3); ++ break; ++ case EVP_PKEY_DH: ++ strncpy(cryptosystem, "DH", 3); ++ break; ++ default: ++ strncpy(cryptosystem, "UNK", 4); ++ break; ++ } ++ ++ cmd_end = ngx_snprintf(cmd, NGX_PASS_PHRASE_ARG_MAX_LEN, "%V %V %s", ++ pass_phrase_dialog, dlg->server, cryptosystem); ++ *cmd_end = '\0'; ++ ++ ngx_log_stderr(0, "Executing external script: %s\n", cmd); ++ ++ if ((ret = ngx_ssl_cache_read_pstream((char *)cmd, buf, bufsize)) != 0){ ++ return -1; ++ } ++ ++ return strlen(buf); ++} +diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c +index 80046d6a4165..d4aa313f083d 100644 +--- a/src/http/modules/ngx_http_grpc_module.c ++++ b/src/http/modules/ngx_http_grpc_module.c +@@ -5088,7 +5088,7 @@ ngx_http_grpc_set_ssl(ngx_conf_t *cf, ngx_http_grpc_loc_conf_t *glcf) + if (ngx_ssl_certificate(cf, glcf->upstream.ssl, + &glcf->upstream.ssl_certificate->value, + &glcf->upstream.ssl_certificate_key->value, +- glcf->upstream.ssl_passwords) ++ glcf->upstream.ssl_passwords, NULL) + != NGX_OK) + { + return NGX_ERROR; +diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c +index 0778ec728d50..686168986001 100644 +--- a/src/http/modules/ngx_http_proxy_module.c ++++ b/src/http/modules/ngx_http_proxy_module.c +@@ -5349,7 +5349,7 @@ ngx_http_proxy_set_ssl(ngx_conf_t *cf, ngx_http_proxy_loc_conf_t *plcf) + if (ngx_ssl_certificate(cf, plcf->upstream.ssl, + &plcf->upstream.ssl_certificate->value, + &plcf->upstream.ssl_certificate_key->value, +- plcf->upstream.ssl_passwords) ++ plcf->upstream.ssl_passwords, NULL) + != NGX_OK) + { + return NGX_ERROR; +diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c +index 320d1ee044f2..47b84103a311 100644 +--- a/src/http/modules/ngx_http_ssl_module.c ++++ b/src/http/modules/ngx_http_ssl_module.c +@@ -21,6 +21,8 @@ typedef ngx_int_t (*ngx_ssl_variable_handler_pt)(ngx_connection_t *c, + #define NGX_DEFAULT_CIPHERS "HIGH:!aNULL:!MD5" + #define NGX_DEFAULT_ECDH_CURVE "auto" + ++static ngx_str_t ngx_ssl_server_null = ngx_string(NGX_SSL_SERVER_NULL); ++ + #define NGX_HTTP_ALPN_PROTOS "\x08http/1.1\x08http/1.0\x08http/0.9" + + +@@ -61,6 +63,9 @@ static ngx_int_t ngx_http_ssl_quic_compat_init(ngx_conf_t *cf, + ngx_http_conf_addr_t *addr); + #endif + ++static char *ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, ++ void *conf); ++ + + static ngx_conf_bitmask_t ngx_http_ssl_protocols[] = { + { ngx_string("SSLv2"), NGX_SSL_SSLv2 }, +@@ -299,6 +304,13 @@ static ngx_command_t ngx_http_ssl_commands[] = { + offsetof(ngx_http_ssl_srv_conf_t, reject_handshake), + NULL }, + ++ { ngx_string("ssl_pass_phrase_dialog"), ++ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, ++ ngx_conf_set_pass_phrase_dialog, ++ NGX_HTTP_SRV_CONF_OFFSET, ++ offsetof(ngx_http_ssl_srv_conf_t, pass_phrase_dialog), ++ NULL }, ++ + ngx_null_command + }; + +@@ -618,6 +630,7 @@ ngx_http_ssl_create_srv_conf(ngx_conf_t *cf) + * sscf->ocsp_responder = { 0, NULL }; + * sscf->stapling_file = { 0, NULL }; + * sscf->stapling_responder = { 0, NULL }; ++ * sscf->pass_phrase_dialog = NULL; + */ + + sscf->prefer_server_ciphers = NGX_CONF_UNSET; +@@ -649,6 +662,8 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + { + ngx_http_ssl_srv_conf_t *prev = parent; + ngx_http_ssl_srv_conf_t *conf = child; ++ ngx_http_core_srv_conf_t *cscf; ++ ngx_ssl_ppdialog_conf_t dlg; + + ngx_pool_cleanup_t *cln; + +@@ -705,6 +720,9 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + ngx_conf_merge_str_value(conf->stapling_responder, + prev->stapling_responder, ""); + ++ ngx_conf_merge_str_value(conf->pass_phrase_dialog, ++ prev->pass_phrase_dialog, NGX_SSL_PASS_PHRASE_DEFAULT_VAL); ++ + conf->ssl.log = cf->log; + + if (conf->certificates) { +@@ -737,6 +755,30 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + cln->handler = ngx_ssl_cleanup_ctx; + cln->data = &conf->ssl; + ++ /** directive format: ssl_pass_phrase_dialog builtin|exec:filepath */ ++ if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)"exec:", 5) == 0){ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive SET: %s ", conf->pass_phrase_dialog.data); ++ } else if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)NGX_SSL_PASS_PHRASE_DEFAULT_VAL, ++ sizeof(NGX_SSL_PASS_PHRASE_DEFAULT_VAL)) != 0){ ++ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive accepts only the following " ++ "values: %s | exec:filepath", NGX_SSL_PASS_PHRASE_DEFAULT_VAL); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ cscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_core_module); ++ ++ dlg.data = &conf->pass_phrase_dialog; ++ if (cscf->server_name.len != 0) { ++ dlg.server = &cscf->server_name; ++ } else { ++ dlg.server = &ngx_ssl_server_null; ++ } ++ ++ + #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME + + if (SSL_CTX_set_tlsext_servername_callback(conf->ssl.ctx, +@@ -787,7 +829,7 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + /* configure certificates */ + + if (ngx_ssl_certificates(cf, &conf->ssl, conf->certificates, +- conf->certificate_keys, conf->passwords) ++ conf->certificate_keys, conf->passwords, &dlg) + != NGX_OK) + { + return NGX_CONF_ERROR; +@@ -1437,6 +1479,32 @@ ngx_http_ssl_init(ngx_conf_t *cf) + return NGX_OK; + } + ++static char * ++ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) ++{ ++ ngx_http_ssl_srv_conf_t *sscf = conf; ++ ngx_str_t *value; ++ ++ if (sscf->pass_phrase_dialog.data){ ++ return "is duplicate"; ++ } ++ ++ value = cf->args->elts; ++ ++ sscf->pass_phrase_dialog = value[1]; ++ ++ if (sscf->pass_phrase_dialog.len == 0) { ++ return NGX_CONF_OK; ++ } else if (sscf->pass_phrase_dialog.len > NGX_SSL_PASS_PHRASE_ARG_MAX_LEN) { ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog argument length exceeded maximum possible length: %d", ++ NGX_SSL_PASS_PHRASE_ARG_MAX_LEN); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ return NGX_CONF_OK; ++} + + #if (NGX_QUIC_OPENSSL_COMPAT) + +diff --git a/src/http/modules/ngx_http_ssl_module.h b/src/http/modules/ngx_http_ssl_module.h +index 8650fab9376c..798cdda4c5ec 100644 +--- a/src/http/modules/ngx_http_ssl_module.h ++++ b/src/http/modules/ngx_http_ssl_module.h +@@ -64,6 +64,8 @@ typedef struct { + ngx_flag_t stapling_verify; + ngx_str_t stapling_file; + ngx_str_t stapling_responder; ++ ++ ngx_str_t pass_phrase_dialog; + } ngx_http_ssl_srv_conf_t; + + +diff --git a/src/http/modules/ngx_http_uwsgi_module.c b/src/http/modules/ngx_http_uwsgi_module.c +index c1d0035cc8b5..406018e65728 100644 +--- a/src/http/modules/ngx_http_uwsgi_module.c ++++ b/src/http/modules/ngx_http_uwsgi_module.c +@@ -2696,7 +2696,7 @@ ngx_http_uwsgi_set_ssl(ngx_conf_t *cf, ngx_http_uwsgi_loc_conf_t *uwcf) + if (ngx_ssl_certificate(cf, uwcf->upstream.ssl, + &uwcf->upstream.ssl_certificate->value, + &uwcf->upstream.ssl_certificate_key->value, +- uwcf->upstream.ssl_passwords) ++ uwcf->upstream.ssl_passwords, NULL) + != NGX_OK) + { + return NGX_ERROR; +diff --git a/src/mail/ngx_mail_ssl_module.c b/src/mail/ngx_mail_ssl_module.c +index 176e9c624724..a44bedcf77b6 100644 +--- a/src/mail/ngx_mail_ssl_module.c ++++ b/src/mail/ngx_mail_ssl_module.c +@@ -13,6 +13,7 @@ + #define NGX_DEFAULT_CIPHERS "HIGH:!aNULL:!MD5" + #define NGX_DEFAULT_ECDH_CURVE "auto" + ++static ngx_str_t ngx_ssl_server_null = ngx_string(NGX_SSL_SERVER_NULL); + + #ifdef TLSEXT_TYPE_application_layer_protocol_negotiation + static int ngx_mail_ssl_alpn_select(ngx_ssl_conn_t *ssl_conn, +@@ -33,6 +34,8 @@ static char *ngx_mail_ssl_session_cache(ngx_conf_t *cf, ngx_command_t *cmd, + static char *ngx_mail_ssl_conf_command_check(ngx_conf_t *cf, void *post, + void *data); + ++static char *ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, ++ void *conf); + + static ngx_conf_enum_t ngx_mail_starttls_state[] = { + { ngx_string("off"), NGX_MAIL_STARTTLS_OFF }, +@@ -202,6 +205,13 @@ static ngx_command_t ngx_mail_ssl_commands[] = { + offsetof(ngx_mail_ssl_conf_t, conf_commands), + &ngx_mail_ssl_conf_command_post }, + ++ { ngx_string("ssl_pass_phrase_dialog"), ++ NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1, ++ ngx_conf_set_pass_phrase_dialog, ++ NGX_MAIL_SRV_CONF_OFFSET, ++ offsetof(ngx_mail_ssl_conf_t, pass_phrase_dialog), ++ NULL }, ++ + ngx_null_command + }; + +@@ -330,6 +340,8 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) + { + ngx_mail_ssl_conf_t *prev = parent; + ngx_mail_ssl_conf_t *conf = child; ++ ngx_mail_core_srv_conf_t *cscf; ++ ngx_ssl_ppdialog_conf_t dlg; + + char *mode; + ngx_pool_cleanup_t *cln; +@@ -370,6 +382,8 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) + + ngx_conf_merge_ptr_value(conf->conf_commands, prev->conf_commands, NULL); + ++ ngx_conf_merge_str_value(conf->pass_phrase_dialog, ++ prev->pass_phrase_dialog, NGX_SSL_PASS_PHRASE_DEFAULT_VAL); + + conf->ssl.log = cf->log; + +@@ -428,6 +442,29 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) + cln->handler = ngx_ssl_cleanup_ctx; + cln->data = &conf->ssl; + ++ /** directive format: ssl_pass_phrase_dialog builtin|exec:filepath */ ++ if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)"exec:", 5) == 0){ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive SET: %s ", conf->pass_phrase_dialog.data); ++ } else if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)NGX_SSL_PASS_PHRASE_DEFAULT_VAL, ++ sizeof(NGX_SSL_PASS_PHRASE_DEFAULT_VAL)) != 0){ ++ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive accepts only the following " ++ "values: %s | exec:filepath", NGX_SSL_PASS_PHRASE_DEFAULT_VAL); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ cscf = ngx_mail_conf_get_module_srv_conf(cf, ngx_mail_core_module); ++ ++ dlg.data = &conf->pass_phrase_dialog; ++ if (cscf->server_name.len != 0) { ++ dlg.server = &cscf->server_name; ++ } else { ++ dlg.server = &ngx_ssl_server_null; ++ } ++ + #ifdef TLSEXT_TYPE_application_layer_protocol_negotiation + SSL_CTX_set_alpn_select_cb(conf->ssl.ctx, ngx_mail_ssl_alpn_select, NULL); + #endif +@@ -440,7 +477,7 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) + } + + if (ngx_ssl_certificates(cf, &conf->ssl, conf->certificates, +- conf->certificate_keys, conf->passwords) ++ conf->certificate_keys, conf->passwords, &dlg) + != NGX_OK) + { + return NGX_CONF_ERROR; +@@ -694,3 +731,30 @@ ngx_mail_ssl_conf_command_check(ngx_conf_t *cf, void *post, void *data) + return NGX_CONF_OK; + #endif + } ++ ++static char * ++ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) ++{ ++ ngx_mail_ssl_conf_t *sscf = conf; ++ ngx_str_t *value; ++ ++ if (sscf->pass_phrase_dialog.data){ ++ return "is duplicate"; ++ } ++ ++ value = cf->args->elts; ++ ++ sscf->pass_phrase_dialog = value[1]; ++ ++ if (sscf->pass_phrase_dialog.len == 0) { ++ return NGX_CONF_OK; ++ } else if (sscf->pass_phrase_dialog.len > NGX_SSL_PASS_PHRASE_ARG_MAX_LEN) { ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog argument length exceeded maximum possible length: %d", ++ NGX_SSL_PASS_PHRASE_ARG_MAX_LEN); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ return NGX_CONF_OK; ++} +diff --git a/src/mail/ngx_mail_ssl_module.h b/src/mail/ngx_mail_ssl_module.h +index c0eb6a38f834..02b4d4f8f72b 100644 +--- a/src/mail/ngx_mail_ssl_module.h ++++ b/src/mail/ngx_mail_ssl_module.h +@@ -56,6 +56,8 @@ typedef struct { + + u_char *file; + ngx_uint_t line; ++ ++ ngx_str_t pass_phrase_dialog; + } ngx_mail_ssl_conf_t; + + +diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c +index 6e51585f6e1f..3110b272952a 100644 +--- a/src/stream/ngx_stream_proxy_module.c ++++ b/src/stream/ngx_stream_proxy_module.c +@@ -2479,7 +2479,7 @@ ngx_stream_proxy_set_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *pscf) + if (ngx_ssl_certificate(cf, pscf->ssl, + &pscf->ssl_certificate->value, + &pscf->ssl_certificate_key->value, +- pscf->ssl_passwords) ++ pscf->ssl_passwords, NULL) + != NGX_OK) + { + return NGX_ERROR; +diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c +index ea0b112b883c..63b49f1d56b9 100644 +--- a/src/stream/ngx_stream_ssl_module.c ++++ b/src/stream/ngx_stream_ssl_module.c +@@ -17,6 +17,8 @@ typedef ngx_int_t (*ngx_ssl_variable_handler_pt)(ngx_connection_t *c, + #define NGX_DEFAULT_CIPHERS "HIGH:!aNULL:!MD5" + #define NGX_DEFAULT_ECDH_CURVE "auto" + ++#define NGX_SSL_STREAM_NAME "NGX_STREAM_SSL_MODULE" ++static ngx_str_t ngx_ssl_stream_default_name = ngx_string(NGX_SSL_STREAM_NAME); + + static ngx_int_t ngx_stream_ssl_handler(ngx_stream_session_t *s); + static ngx_int_t ngx_stream_ssl_init_connection(ngx_ssl_t *ssl, +@@ -61,6 +63,9 @@ static char *ngx_stream_ssl_alpn(ngx_conf_t *cf, ngx_command_t *cmd, + static char *ngx_stream_ssl_conf_command_check(ngx_conf_t *cf, void *post, + void *data); + ++static char *ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, ++ void *conf); ++ + static ngx_int_t ngx_stream_ssl_init(ngx_conf_t *cf); + + +@@ -301,6 +306,13 @@ static ngx_command_t ngx_stream_ssl_commands[] = { + 0, + NULL }, + ++ { ngx_string("ssl_pass_phrase_dialog"), ++ NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ++ ngx_conf_set_pass_phrase_dialog, ++ NGX_STREAM_SRV_CONF_OFFSET, ++ offsetof(ngx_stream_ssl_srv_conf_t, pass_phrase_dialog), ++ NULL }, ++ + ngx_null_command + }; + +@@ -902,6 +914,7 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + { + ngx_stream_ssl_srv_conf_t *prev = parent; + ngx_stream_ssl_srv_conf_t *conf = child; ++ ngx_ssl_ppdialog_conf_t dlg; + + ngx_pool_cleanup_t *cln; + +@@ -947,6 +960,9 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + + ngx_conf_merge_ptr_value(conf->conf_commands, prev->conf_commands, NULL); + ++ ngx_conf_merge_str_value(conf->pass_phrase_dialog, prev->pass_phrase_dialog, ++ NGX_SSL_PASS_PHRASE_DEFAULT_VAL); ++ + ngx_conf_merge_uint_value(conf->ocsp, prev->ocsp, 0); + ngx_conf_merge_str_value(conf->ocsp_responder, prev->ocsp_responder, ""); + ngx_conf_merge_ptr_value(conf->ocsp_cache_zone, +@@ -990,6 +1006,22 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + cln->handler = ngx_ssl_cleanup_ctx; + cln->data = &conf->ssl; + ++ /** directive format: ssl_pass_phrase_dialog builtin|exec:filepath */ ++ if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)"exec:", 5) == 0){ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive SET: %s ", conf->pass_phrase_dialog.data); ++ } else if (ngx_strncasecmp(conf->pass_phrase_dialog.data, (u_char *)NGX_SSL_PASS_PHRASE_DEFAULT_VAL, ++ sizeof(NGX_SSL_PASS_PHRASE_DEFAULT_VAL)) != 0){ ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog config directive accepts only the following " ++ "values: %s | exec:filepath", NGX_SSL_PASS_PHRASE_DEFAULT_VAL); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ dlg.data = &conf->pass_phrase_dialog; ++ dlg.server = &ngx_ssl_stream_default_name; ++ + #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME + SSL_CTX_set_tlsext_servername_callback(conf->ssl.ctx, + ngx_stream_ssl_servername); +@@ -1034,7 +1066,7 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) + /* configure certificates */ + + if (ngx_ssl_certificates(cf, &conf->ssl, conf->certificates, +- conf->certificate_keys, conf->passwords) ++ conf->certificate_keys, conf->passwords, &dlg) + != NGX_OK) + { + return NGX_CONF_ERROR; +@@ -1716,3 +1748,30 @@ ngx_stream_ssl_init(ngx_conf_t *cf) + + return NGX_OK; + } ++ ++static char * ++ngx_conf_set_pass_phrase_dialog(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) ++{ ++ ngx_stream_ssl_srv_conf_t *sscf = conf; ++ ngx_str_t *value; ++ ++ if (sscf->pass_phrase_dialog.data){ ++ return "is duplicate"; ++ } ++ ++ value = cf->args->elts; ++ ++ sscf->pass_phrase_dialog = value[1]; ++ ++ if (sscf->pass_phrase_dialog.len == 0) { ++ return NGX_CONF_OK; ++ } else if (sscf->pass_phrase_dialog.len > NGX_SSL_PASS_PHRASE_ARG_MAX_LEN) { ++ ngx_log_error(NGX_LOG_EMERG, cf->log, 0, ++ "ssl_pass_phrase_dialog argument length exceeded maximum possible length: %d", ++ NGX_SSL_PASS_PHRASE_ARG_MAX_LEN); ++ ++ return NGX_CONF_ERROR; ++ } ++ ++ return NGX_CONF_OK; ++} +diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h +index ffa03a6f3439..3dbbca7dcce8 100644 +--- a/src/stream/ngx_stream_ssl_module.h ++++ b/src/stream/ngx_stream_ssl_module.h +@@ -56,6 +56,8 @@ typedef struct { + ngx_flag_t session_tickets; + ngx_array_t *session_ticket_keys; + ++ ngx_str_t pass_phrase_dialog; ++ + ngx_uint_t ocsp; + ngx_str_t ocsp_responder; + ngx_shm_zone_t *ocsp_cache_zone; +-- +2.52.0 diff --git a/SPECS/nginx/0004-Disable-ENGINE-support.patch b/SPECS/nginx/0004-Disable-ENGINE-support.patch new file mode 100644 index 00000000000..8c2dbf28d5e --- /dev/null +++ b/SPECS/nginx/0004-Disable-ENGINE-support.patch @@ -0,0 +1,99 @@ +From 0b9485b645b487d08ed3ab38e94d433112553d67 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= +Date: Tue, 2 Jul 2024 18:29:18 +0200 +Subject: [PATCH 4/5] Disable ENGINE support + +--- + auto/options | 3 +++ + configure | 4 ++++ + src/event/ngx_event_openssl.c | 4 ++-- + src/event/ngx_event_openssl.h | 2 +- + src/event/ngx_event_openssl_cache.c | 2 +- + 5 files changed, 11 insertions(+), 4 deletions(-) + +diff --git a/auto/options b/auto/options +index 6a6e990a0f6b..3cc983dc68a2 100644 +--- a/auto/options ++++ b/auto/options +@@ -45,6 +45,8 @@ USE_THREADS=NO + + NGX_FILE_AIO=NO + ++NGX_SSL_NO_ENGINE=NO ++ + QUIC_BPF=NO + + HTTP=YES +@@ -373,6 +375,7 @@ use the \"--with-mail_ssl_module\" option instead" + + --with-openssl=*) OPENSSL="$value" ;; + --with-openssl-opt=*) OPENSSL_OPT="$value" ;; ++ --without-engine) NGX_SSL_NO_ENGINE=YES ;; + + --with-md5=*) + NGX_POST_CONF_MSG="$NGX_POST_CONF_MSG +diff --git a/configure b/configure +index 5b88ebb4cbe8..3a2129e499fc 100755 +--- a/configure ++++ b/configure +@@ -104,6 +104,10 @@ have=NGX_HTTP_UWSGI_TEMP_PATH value="\"$NGX_HTTP_UWSGI_TEMP_PATH\"" + have=NGX_HTTP_SCGI_TEMP_PATH value="\"$NGX_HTTP_SCGI_TEMP_PATH\"" + . auto/define + ++if [ $NGX_SSL_NO_ENGINE = YES ]; then ++ have=NGX_SSL_NO_ENGINE . auto/have ++fi ++ + . auto/make + . auto/lib/make + . auto/install +diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c +index fb6a5e950504..0b58d1383457 100644 +--- a/src/event/ngx_event_openssl.c ++++ b/src/event/ngx_event_openssl.c +@@ -5981,7 +5981,7 @@ ngx_openssl_create_conf(ngx_cycle_t *cycle) + static char * + ngx_openssl_engine(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) + { +-#ifndef OPENSSL_NO_ENGINE ++#if !defined(OPENSSL_NO_ENGINE) && !defined(NGX_SSL_NO_ENGINE) + + ngx_openssl_conf_t *oscf = conf; + +@@ -6032,7 +6032,7 @@ ngx_openssl_exit(ngx_cycle_t *cycle) + #if OPENSSL_VERSION_NUMBER < 0x10100003L + + EVP_cleanup(); +-#ifndef OPENSSL_NO_ENGINE ++#if !defined(OPENSSL_NO_ENGINE) && !defined(NGX_SSL_NO_ENGINE) + ENGINE_cleanup(); + #endif + +diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h +index 43bddc7be736..7ea2ad751e2b 100644 +--- a/src/event/ngx_event_openssl.h ++++ b/src/event/ngx_event_openssl.h +@@ -22,7 +22,7 @@ + #ifndef OPENSSL_NO_DH + #include + #endif +-#ifndef OPENSSL_NO_ENGINE ++#if !defined(OPENSSL_NO_ENGINE) && !defined(NGX_SSL_NO_ENGINE) + #include + #endif + #include +diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c +index 83ddbbaeb97e..b2df0941f5c1 100644 +--- a/src/event/ngx_event_openssl_cache.c ++++ b/src/event/ngx_event_openssl_cache.c +@@ -694,7 +694,7 @@ ngx_ssl_cache_pkey_create(ngx_ssl_cache_key_t *id, char **err, void *data) + + if (id->type == NGX_SSL_CACHE_ENGINE) { + +-#ifndef OPENSSL_NO_ENGINE ++#if !defined(OPENSSL_NO_ENGINE) && !defined(NGX_SSL_NO_ENGINE) + + u_char *p, *last; + ENGINE *engine; +-- +2.52.0 diff --git a/SPECS/nginx/0005-Compile-perl-module-with-O2.patch b/SPECS/nginx/0005-Compile-perl-module-with-O2.patch new file mode 100644 index 00000000000..5b76595fb26 --- /dev/null +++ b/SPECS/nginx/0005-Compile-perl-module-with-O2.patch @@ -0,0 +1,25 @@ +From 8c38540d5ab78c856b07d799015cb0c94d2f9a55 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= +Date: Wed, 31 Jul 2024 17:47:10 +0200 +Subject: [PATCH 5/5] Compile perl module with O2 + +--- + src/http/modules/perl/Makefile.PL | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/http/modules/perl/Makefile.PL b/src/http/modules/perl/Makefile.PL +index 7edadcb3d53e..2ebb7c4c6eb4 100644 +--- a/src/http/modules/perl/Makefile.PL ++++ b/src/http/modules/perl/Makefile.PL +@@ -14,7 +14,7 @@ WriteMakefile( + AUTHOR => 'Igor Sysoev', + + CCFLAGS => "$ENV{NGX_PM_CFLAGS}", +- OPTIMIZE => '-O', ++ OPTIMIZE => '-O2', + + LDDLFLAGS => "$ENV{NGX_PM_LDFLAGS}", + +-- +2.52.0 + diff --git a/SPECS/nginx/CVE-2024-7347.patch b/SPECS/nginx/CVE-2024-7347.patch deleted file mode 100644 index bbfad40576a..00000000000 --- a/SPECS/nginx/CVE-2024-7347.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 7362d01658b61184108c21278443910da68f93b4 Mon Sep 17 00:00:00 2001 -From: Roman Arutyunyan -Date: Mon, 12 Aug 2024 18:20:43 +0400 -Subject: [PATCH] Mp4: fixed buffer underread while updating stsz atom. - -While cropping an stsc atom in ngx_http_mp4_crop_stsc_data(), a 32-bit integer -overflow could happen, which could result in incorrect seeking and a very large -value stored in "samples". This resulted in a large invalid value of -trak->end_chunk_samples. This value is further used to calculate the value of -trak->end_chunk_samples_size in ngx_http_mp4_update_stsz_atom(). While doing -this, a large invalid value of trak->end_chunk_samples could result in reading -memory before stsz atom start. This could potentially result in a segfault. ---- - src/http/modules/ngx_http_mp4_module.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - -diff --git a/src/http/modules/ngx_http_mp4_module.c b/src/http/modules/ngx_http_mp4_module.c -index 03175dea21..1cd017c274 100644 ---- a/src/http/modules/ngx_http_mp4_module.c -+++ b/src/http/modules/ngx_http_mp4_module.c -@@ -3099,7 +3099,8 @@ static ngx_int_t - ngx_http_mp4_crop_stsc_data(ngx_http_mp4_file_t *mp4, - ngx_http_mp4_trak_t *trak, ngx_uint_t start) - { -- uint32_t start_sample, chunk, samples, id, next_chunk, n, -+ uint64_t n; -+ uint32_t start_sample, chunk, samples, id, next_chunk, - prev_samples; - ngx_buf_t *data, *buf; - ngx_uint_t entries, target_chunk, chunk_samples; -@@ -3160,7 +3161,7 @@ ngx_http_mp4_crop_stsc_data(ngx_http_mp4_file_t *mp4, - "samples:%uD, id:%uD", - start_sample, chunk, next_chunk - chunk, samples, id); - -- n = (next_chunk - chunk) * samples; -+ n = (uint64_t) (next_chunk - chunk) * samples; - - if (start_sample < n) { - goto found; -@@ -3182,7 +3183,7 @@ ngx_http_mp4_crop_stsc_data(ngx_http_mp4_file_t *mp4, - "sample:%uD, chunk:%uD, chunks:%uD, samples:%uD", - start_sample, chunk, next_chunk - chunk, samples); - -- n = (next_chunk - chunk) * samples; -+ n = (uint64_t) (next_chunk - chunk) * samples; - - if (start_sample > n) { - ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, -From 88955b1044ef38315b77ad1a509d63631a790a0f Mon Sep 17 00:00:00 2001 -From: Roman Arutyunyan -Date: Mon, 12 Aug 2024 18:20:45 +0400 -Subject: [PATCH] Mp4: rejecting unordered chunks in stsc atom. - -Unordered chunks could result in trak->end_chunk smaller than trak->start_chunk -in ngx_http_mp4_crop_stsc_data(). Later in ngx_http_mp4_update_stco_atom() -this caused buffer overread while trying to calculate trak->end_offset. ---- - src/http/modules/ngx_http_mp4_module.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/src/http/modules/ngx_http_mp4_module.c b/src/http/modules/ngx_http_mp4_module.c -index 1cd017c274..041ad263b5 100644 ---- a/src/http/modules/ngx_http_mp4_module.c -+++ b/src/http/modules/ngx_http_mp4_module.c -@@ -3156,6 +3156,13 @@ ngx_http_mp4_crop_stsc_data(ngx_http_mp4_file_t *mp4, - - next_chunk = ngx_mp4_get_32value(entry->chunk); - -+ if (next_chunk < chunk) { -+ ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, -+ "unordered mp4 stsc chunks in \"%s\"", -+ mp4->file.name.data); -+ return NGX_ERROR; -+ } -+ - ngx_log_debug5(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, - "sample:%uD, chunk:%uD, chunks:%uD, " - "samples:%uD, id:%uD", diff --git a/SPECS/nginx/CVE-2025-23419.patch b/SPECS/nginx/CVE-2025-23419.patch deleted file mode 100644 index eac62698187..00000000000 --- a/SPECS/nginx/CVE-2025-23419.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 117654149dea3a5ff72eae8c9ff2484c35f77732 Mon Sep 17 00:00:00 2001 -From: Sergey Kandaurov -Date: Wed, 22 Jan 2025 18:55:44 +0400 -Subject: [PATCH] SNI: added restriction for TLSv1.3 cross-SNI session - resumption. - -In OpenSSL, session resumption always happens in the default SSL context, -prior to invoking the SNI callback. Further, unlike in TLSv1.2 and older -protocols, SSL_get_servername() returns values received in the resumption -handshake, which may be different from the value in the initial handshake. -Notably, this makes the restriction added in b720f650b insufficient for -sessions resumed with different SNI server name. - -Considering the example from b720f650b, previously, a client was able to -request example.org by presenting a certificate for example.org, then to -resume and request example.com. - -The fix is to reject handshakes resumed with a different server name, if -verification of client certificates is enabled in a corresponding server -configuration. ---- - src/http/ngx_http_request.c | 27 +++++++++++++++++++++++++-- - 1 file changed, 25 insertions(+), 2 deletions(-) - -diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c -index 3cca57c..9593b7f 100644 ---- a/src/http/ngx_http_request.c -+++ b/src/http/ngx_http_request.c -@@ -932,6 +932,31 @@ ngx_http_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) - goto done; - } - -+ sscf = ngx_http_get_module_srv_conf(cscf->ctx, ngx_http_ssl_module); -+ -+#if (defined TLS1_3_VERSION \ -+ && !defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) -+ -+ /* -+ * SSL_SESSION_get0_hostname() is only available in OpenSSL 1.1.1+, -+ * but servername being negotiated in every TLSv1.3 handshake -+ * is only returned in OpenSSL 1.1.1+ as well -+ */ -+ -+ if (sscf->verify) { -+ const char *hostname; -+ -+ hostname = SSL_SESSION_get0_hostname(SSL_get0_session(ssl_conn)); -+ -+ if (hostname != NULL && ngx_strcmp(hostname, servername) != 0) { -+ c->ssl->handshake_rejected = 1; -+ *ad = SSL_AD_ACCESS_DENIED; -+ return SSL_TLSEXT_ERR_ALERT_FATAL; -+ } -+ } -+ -+#endif -+ - hc->ssl_servername = ngx_palloc(c->pool, sizeof(ngx_str_t)); - if (hc->ssl_servername == NULL) { - goto error; -@@ -945,8 +970,6 @@ ngx_http_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) - - ngx_set_connection_log(c, clcf->error_log); - -- sscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_ssl_module); -- - c->ssl->buffer_size = sscf->buffer_size; - - if (sscf->ssl.ctx) { --- -2.34.1 - diff --git a/SPECS/nginx/CVE-2025-53859.patch b/SPECS/nginx/CVE-2025-53859.patch deleted file mode 100644 index 8f670b314eb..00000000000 --- a/SPECS/nginx/CVE-2025-53859.patch +++ /dev/null @@ -1,141 +0,0 @@ -From 26aa1129e6d5920c7327991d693edda3aaa9abf3 Mon Sep 17 00:00:00 2001 -From: Azure Linux Security Servicing Account -Date: Tue, 19 Aug 2025 08:05:07 +0000 -Subject: [PATCH] CVE-2025-53859 - -Signed-off-by: Azure Linux Security Servicing Account -Upstream-reference: AI Backport of https://nginx.org/download/patch.2025.smtp.txt ---- - src/mail/ngx_mail_handler.c | 38 +++++++++++++++++++++---------------- - 1 file changed, 22 insertions(+), 16 deletions(-) - -diff --git a/src/mail/ngx_mail_handler.c b/src/mail/ngx_mail_handler.c -index 1167df3..d3be7f3 100644 ---- a/src/mail/ngx_mail_handler.c -+++ b/src/mail/ngx_mail_handler.c -@@ -523,7 +523,7 @@ ngx_mail_starttls_only(ngx_mail_session_t *s, ngx_connection_t *c) - ngx_int_t - ngx_mail_auth_plain(ngx_mail_session_t *s, ngx_connection_t *c, ngx_uint_t n) - { -- u_char *p, *last; -+ u_char *p, *pos, *last; - ngx_str_t *arg, plain; - - arg = s->args.elts; -@@ -555,7 +555,7 @@ ngx_mail_auth_plain(ngx_mail_session_t *s, ngx_connection_t *c, ngx_uint_t n) - return NGX_MAIL_PARSE_INVALID_COMMAND; - } - -- s->login.data = p; -+ pos = p; - - while (p < last && *p) { p++; } - -@@ -565,7 +565,8 @@ ngx_mail_auth_plain(ngx_mail_session_t *s, ngx_connection_t *c, ngx_uint_t n) - return NGX_MAIL_PARSE_INVALID_COMMAND; - } - -- s->login.len = p++ - s->login.data; -+ s->login.len = p++ - pos; -+ s->login.data = pos; - - s->passwd.len = last - p; - s->passwd.data = p; -@@ -583,24 +584,26 @@ ngx_int_t - ngx_mail_auth_login_username(ngx_mail_session_t *s, ngx_connection_t *c, - ngx_uint_t n) - { -- ngx_str_t *arg; -+ ngx_str_t *arg, login; - - arg = s->args.elts; - - ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, - "mail auth login username: \"%V\"", &arg[n]); - -- s->login.data = ngx_pnalloc(c->pool, ngx_base64_decoded_length(arg[n].len)); -- if (s->login.data == NULL) { -+ login.data = ngx_pnalloc(c->pool, ngx_base64_decoded_length(arg[n].len)); -+ if (login.data == NULL) { - return NGX_ERROR; - } - -- if (ngx_decode_base64(&s->login, &arg[n]) != NGX_OK) { -+ if (ngx_decode_base64(&login, &arg[n]) != NGX_OK) { - ngx_log_error(NGX_LOG_INFO, c->log, 0, - "client sent invalid base64 encoding in AUTH LOGIN command"); - return NGX_MAIL_PARSE_INVALID_COMMAND; - } - -+ s->login = login; -+ - ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, - "mail auth login username: \"%V\"", &s->login); - -@@ -611,7 +614,7 @@ ngx_mail_auth_login_username(ngx_mail_session_t *s, ngx_connection_t *c, - ngx_int_t - ngx_mail_auth_login_password(ngx_mail_session_t *s, ngx_connection_t *c) - { -- ngx_str_t *arg; -+ ngx_str_t *arg, passwd; - - arg = s->args.elts; - -@@ -620,18 +623,19 @@ ngx_mail_auth_login_password(ngx_mail_session_t *s, ngx_connection_t *c) - "mail auth login password: \"%V\"", &arg[0]); - #endif - -- s->passwd.data = ngx_pnalloc(c->pool, -- ngx_base64_decoded_length(arg[0].len)); -- if (s->passwd.data == NULL) { -+ passwd.data = ngx_pnalloc(c->pool, ngx_base64_decoded_length(arg[0].len)); -+ if (passwd.data == NULL) { - return NGX_ERROR; - } - -- if (ngx_decode_base64(&s->passwd, &arg[0]) != NGX_OK) { -+ if (ngx_decode_base64(&passwd, &arg[0]) != NGX_OK) { - ngx_log_error(NGX_LOG_INFO, c->log, 0, - "client sent invalid base64 encoding in AUTH LOGIN command"); - return NGX_MAIL_PARSE_INVALID_COMMAND; - } - -+ s->passwd = passwd; -+ - #if (NGX_DEBUG_MAIL_PASSWD) - ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, - "mail auth login password: \"%V\"", &s->passwd); -@@ -674,24 +678,26 @@ ngx_int_t - ngx_mail_auth_cram_md5(ngx_mail_session_t *s, ngx_connection_t *c) - { - u_char *p, *last; -- ngx_str_t *arg; -+ ngx_str_t *arg, login; - - arg = s->args.elts; - - ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, - "mail auth cram-md5: \"%V\"", &arg[0]); - -- s->login.data = ngx_pnalloc(c->pool, ngx_base64_decoded_length(arg[0].len)); -- if (s->login.data == NULL) { -+ login.data = ngx_pnalloc(c->pool, ngx_base64_decoded_length(arg[0].len)); -+ if (login.data == NULL) { - return NGX_ERROR; - } - -- if (ngx_decode_base64(&s->login, &arg[0]) != NGX_OK) { -+ if (ngx_decode_base64(&login, &arg[0]) != NGX_OK) { - ngx_log_error(NGX_LOG_INFO, c->log, 0, - "client sent invalid base64 encoding in AUTH CRAM-MD5 command"); - return NGX_MAIL_PARSE_INVALID_COMMAND; - } - -+ s->login = login; -+ - p = s->login.data; - last = p + s->login.len; - --- -2.45.4 - diff --git a/SPECS/nginx/nginx.signatures.json b/SPECS/nginx/nginx.signatures.json index fa4c0b10854..d3664c56162 100644 --- a/SPECS/nginx/nginx.signatures.json +++ b/SPECS/nginx/nginx.signatures.json @@ -1,8 +1,8 @@ { "Signatures": { - "nginx-1.25.4.tar.gz": "760729901acbaa517996e681ee6ea259032985e37c2768beef80df3a877deed9", - "nginx-njs-0.8.3.tar.gz": "5e1341ee8c1dfce420ea6456475dafa7d5f4b9aed310faca32597cf4d221cfe0", + "nginx-1.28.1.tar.gz": "40e7a0916d121e8905ef50f2a738b675599e42b2224a582dd938603fed15788e", + "nginx-njs-0.9.4.tar.gz": "7b3a9f14b0f09311d9031c2a252cb0e23c06baac2e586a7d12c75aa6cba4ca0e", "nginx-tests.tgz": "5847fdc454543df77e07026e7de737f9e7ff093c8ce4afcbc2093a64e570ff83", "nginx.service": "73a1321ae35eafc4e02614cde224fc0bf20ceba97f969b3373dd73c15c22a0e1" } -} \ No newline at end of file +} diff --git a/SPECS/nginx/nginx.spec b/SPECS/nginx/nginx.spec index 9c73c8fc85f..4ac4362d0ec 100644 --- a/SPECS/nginx/nginx.spec +++ b/SPECS/nginx/nginx.spec @@ -1,12 +1,12 @@ %global nginx_user nginx -%global njs_version 0.8.3 +%global njs_version 0.9.4 Summary: High-performance HTTP server and reverse proxy Name: nginx # Currently on "stable" version of nginx from https://nginx.org/en/download.html. # Note: Stable versions are even (1.20), mainline versions are odd (1.21) -Version: 1.25.4 -Release: 6%{?dist} +Version: 1.28.1 +Release: 1%{?dist} License: BSD-2-Clause Vendor: Microsoft Corporation Distribution: Azure Linux @@ -20,9 +20,11 @@ Source2: https://github.com/nginx/njs/archive/refs/tags/%{njs_version}.ta Source3: nginx-tests.tgz %endif -Patch0: CVE-2024-7347.patch -Patch1: CVE-2025-23419.patch -Patch2: CVE-2025-53859.patch +Patch1: 0001-remove-Werror-in-upstream-build-scripts.patch +Patch2: 0002-fix-PIDFile-handling.patch +Patch3: 0003-Add-SSL-passphrase-dialog.patch +Patch4: 0004-Disable-ENGINE-support.patch +Patch5: 0005-Compile-perl-module-with-O2.patch BuildRequires: libxml2-devel BuildRequires: libxslt-devel BuildRequires: openssl-devel @@ -165,6 +167,9 @@ rm -rf nginx-tests %dir %{_sysconfdir}/%{name} %changelog +* Thu Oct 23 2025 Sandeep Karambelkar - 1.28.0-1 +- Upgrade to 1.28.0 Upstream Stable Version + * Tue Sep 09 2025 Mayank Singh - 1.25.4-6 - Enable stream ssl preread module diff --git a/SPECS/nmap/CVE-2025-11961.patch b/SPECS/nmap/CVE-2025-11961.patch new file mode 100644 index 00000000000..12bb8c22bf3 --- /dev/null +++ b/SPECS/nmap/CVE-2025-11961.patch @@ -0,0 +1,453 @@ +From 84207632f5f10024d93558c136a303ad9fb6dc12 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Mon, 5 Jan 2026 09:32:18 +0000 +Subject: [PATCH] CVE-2025-11961: Fix OOBR and OOBW in pcap_ether_aton(); add + strict parsing helpers and update comment in gencode.c; update CHANGES. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/the-tcpdump-group/libpcap/commit/b2d2f9a9a0581c40780bde509f7cc715920f1c02.patch + +--- + libpcap/CHANGES | 1 + + libpcap/gencode.c | 5 + + libpcap/nametoaddr.c | 378 ++++++++++++++++++++++++++++++++++++++++--- + 3 files changed, 362 insertions(+), 22 deletions(-) + +diff --git a/libpcap/CHANGES b/libpcap/CHANGES +index c574a27..ba87e2f 100644 +--- a/libpcap/CHANGES ++++ b/libpcap/CHANGES +@@ -239,6 +239,7 @@ Wednesday, June 9, 2021: + Fix "type XXX subtype YYY" giving a parse error + Source code: + Add PCAP_AVAILABLE_1_11. ++ CVE-2025-11961: Fix OOBR and OOBW in pcap_ether_aton(). + Building and testing: + Rename struct bpf_aux_data to avoid NetBSD compile errors + Squelch some compiler warnings +diff --git a/libpcap/gencode.c b/libpcap/gencode.c +index 496e02f..8826e12 100644 +--- a/libpcap/gencode.c ++++ b/libpcap/gencode.c +@@ -7228,6 +7228,11 @@ gen_ecode(compiler_state_t *cstate, const char *s, struct qual q) + return (NULL); + + if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { ++ /* ++ * Because the lexer guards the input string format, in this ++ * context the function returns NULL iff the implicit malloc() ++ * has failed. ++ */ + cstate->e = pcap_ether_aton(s); + if (cstate->e == NULL) + bpf_error(cstate, "malloc"); +diff --git a/libpcap/nametoaddr.c b/libpcap/nametoaddr.c +index 7a04a61..96b2ff2 100644 +--- a/libpcap/nametoaddr.c ++++ b/libpcap/nametoaddr.c +@@ -620,6 +620,18 @@ pcap_nametoeproto(const char *s) + return PROTO_UNDEF; + } + ++/* Hex digit to 8-bit unsigned integer. */ ++static inline u_char ++pcapint_xdtoi(u_char c) ++{ ++ if (c >= '0' && c <= '9') ++ return (u_char)(c - '0'); ++ else if (c >= 'a' && c <= 'f') ++ return (u_char)(c - 'a' + 10); ++ else ++ return (u_char)(c - 'A' + 10); ++} ++ + #include "llc.h" + + /* Static data base of LLC values. */ +@@ -703,39 +715,361 @@ __pcap_atodn(const char *s, bpf_u_int32 *addr) + return(32); + } + ++// Man page: "xxxxxxxxxxxx", regexp: "^[0-9a-fA-F]{12}$". ++static u_char ++pcapint_atomac48_xxxxxxxxxxxx(const char *s, uint8_t *addr) ++{ ++ if (strlen(s) == 12 && ++ PCAP_ISXDIGIT(s[0]) && ++ PCAP_ISXDIGIT(s[1]) && ++ PCAP_ISXDIGIT(s[2]) && ++ PCAP_ISXDIGIT(s[3]) && ++ PCAP_ISXDIGIT(s[4]) && ++ PCAP_ISXDIGIT(s[5]) && ++ PCAP_ISXDIGIT(s[6]) && ++ PCAP_ISXDIGIT(s[7]) && ++ PCAP_ISXDIGIT(s[8]) && ++ PCAP_ISXDIGIT(s[9]) && ++ PCAP_ISXDIGIT(s[10]) && ++ PCAP_ISXDIGIT(s[11])) { ++ addr[0] = pcapint_xdtoi(s[0]) << 4 | pcapint_xdtoi(s[1]); ++ addr[1] = pcapint_xdtoi(s[2]) << 4 | pcapint_xdtoi(s[3]); ++ addr[2] = pcapint_xdtoi(s[4]) << 4 | pcapint_xdtoi(s[5]); ++ addr[3] = pcapint_xdtoi(s[6]) << 4 | pcapint_xdtoi(s[7]); ++ addr[4] = pcapint_xdtoi(s[8]) << 4 | pcapint_xdtoi(s[9]); ++ addr[5] = pcapint_xdtoi(s[10]) << 4 | pcapint_xdtoi(s[11]); ++ return 1; ++ } ++ return 0; ++} ++ ++// Man page: "xxxx.xxxx.xxxx", regexp: "^[0-9a-fA-F]{4}(\.[0-9a-fA-F]{4}){2}$". ++static u_char ++pcapint_atomac48_xxxx_3_times(const char *s, uint8_t *addr) ++{ ++ const char sep = '.'; ++ if (strlen(s) == 14 && ++ PCAP_ISXDIGIT(s[0]) && ++ PCAP_ISXDIGIT(s[1]) && ++ PCAP_ISXDIGIT(s[2]) && ++ PCAP_ISXDIGIT(s[3]) && ++ s[4] == sep && ++ PCAP_ISXDIGIT(s[5]) && ++ PCAP_ISXDIGIT(s[6]) && ++ PCAP_ISXDIGIT(s[7]) && ++ PCAP_ISXDIGIT(s[8]) && ++ s[9] == sep && ++ PCAP_ISXDIGIT(s[10]) && ++ PCAP_ISXDIGIT(s[11]) && ++ PCAP_ISXDIGIT(s[12]) && ++ PCAP_ISXDIGIT(s[13])) { ++ addr[0] = pcapint_xdtoi(s[0]) << 4 | pcapint_xdtoi(s[1]); ++ addr[1] = pcapint_xdtoi(s[2]) << 4 | pcapint_xdtoi(s[3]); ++ addr[2] = pcapint_xdtoi(s[5]) << 4 | pcapint_xdtoi(s[6]); ++ addr[3] = pcapint_xdtoi(s[7]) << 4 | pcapint_xdtoi(s[8]); ++ addr[4] = pcapint_xdtoi(s[10]) << 4 | pcapint_xdtoi(s[11]); ++ addr[5] = pcapint_xdtoi(s[12]) << 4 | pcapint_xdtoi(s[13]); ++ return 1; ++ } ++ return 0; ++} ++ + /* +- * Convert 's', which can have the one of the forms: ++ * Man page: "xx:xx:xx:xx:xx:xx", regexp: "^[0-9a-fA-F]{1,2}(:[0-9a-fA-F]{1,2}){5}$". ++ * Man page: "xx-xx-xx-xx-xx-xx", regexp: "^[0-9a-fA-F]{1,2}(-[0-9a-fA-F]{1,2}){5}$". ++ * Man page: "xx.xx.xx.xx.xx.xx", regexp: "^[0-9a-fA-F]{1,2}(\.[0-9a-fA-F]{1,2}){5}$". ++ * (Any "xx" above can be "x", which is equivalent to "0x".) + * +- * "xx:xx:xx:xx:xx:xx" +- * "xx.xx.xx.xx.xx.xx" +- * "xx-xx-xx-xx-xx-xx" +- * "xxxx.xxxx.xxxx" +- * "xxxxxxxxxxxx" ++ * An equivalent (and parametrisable for EUI-64) FSM could be implemented using ++ * a smaller graph, but that graph would be neither acyclic nor planar nor ++ * trivial to verify. ++ * ++ * | ++ * [.] v ++ * +<---------- START ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE0_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE0_XX | [:\.-] ++ * | | | ++ * | | [:\.-] | ++ * | [.] v | ++ * +<----- BYTE0_SEP_BYTE1 <-----+ ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE1_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE1_XX | ++ * | | | ++ * | | | ++ * | [.] v | ++ * +<----- BYTE1_SEP_BYTE2 <-----+ ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE2_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE2_XX | ++ * | | | ++ * | | | ++ * | [.] v | ++ * +<----- BYTE2_SEP_BYTE3 <-----+ ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE3_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE3_XX | ++ * | | | ++ * | | | ++ * | [.] v | ++ * +<----- BYTE3_SEP_BYTE4 <-----+ ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE4_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE4_XX | ++ * | | | ++ * | | | ++ * | [.] v | ++ * +<----- BYTE4_SEP_BYTE5 <-----+ ++ * | | ++ * | | [0-9a-fA-F] ++ * | [.] v ++ * +<--------- BYTE5_X ----------+ ++ * | | | ++ * | | [0-9a-fA-F] | ++ * | [.] v | ++ * +<--------- BYTE5_XX | \0 ++ * | | | ++ * | | \0 | ++ * | | v ++ * +--> (reject) +---------> (accept) + * +- * (or various mixes of ':', '.', and '-') into a new +- * ethernet address. Assumes 's' is well formed. ++ */ ++static u_char ++pcapint_atomac48_x_xx_6_times(const char *s, uint8_t *addr) ++{ ++ enum { ++ START, ++ BYTE0_X, ++ BYTE0_XX, ++ BYTE0_SEP_BYTE1, ++ BYTE1_X, ++ BYTE1_XX, ++ BYTE1_SEP_BYTE2, ++ BYTE2_X, ++ BYTE2_XX, ++ BYTE2_SEP_BYTE3, ++ BYTE3_X, ++ BYTE3_XX, ++ BYTE3_SEP_BYTE4, ++ BYTE4_X, ++ BYTE4_XX, ++ BYTE4_SEP_BYTE5, ++ BYTE5_X, ++ BYTE5_XX, ++ } fsm_state = START; ++ uint8_t buf[6]; ++ const char *seplist = ":.-"; ++ char sep; ++ ++ while (*s) { ++ switch (fsm_state) { ++ case START: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[0] = pcapint_xdtoi(*s); ++ fsm_state = BYTE0_X; ++ break; ++ } ++ goto reject; ++ case BYTE0_X: ++ if (strchr(seplist, *s)) { ++ sep = *s; ++ fsm_state = BYTE0_SEP_BYTE1; ++ break; ++ } ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[0] = buf[0] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE0_XX; ++ break; ++ } ++ goto reject; ++ case BYTE0_XX: ++ if (strchr(seplist, *s)) { ++ sep = *s; ++ fsm_state = BYTE0_SEP_BYTE1; ++ break; ++ } ++ goto reject; ++ case BYTE0_SEP_BYTE1: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[1] = pcapint_xdtoi(*s); ++ fsm_state = BYTE1_X; ++ break; ++ } ++ goto reject; ++ case BYTE1_X: ++ if (*s == sep) { ++ fsm_state = BYTE1_SEP_BYTE2; ++ break; ++ } ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[1] = buf[1] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE1_XX; ++ break; ++ } ++ goto reject; ++ case BYTE1_XX: ++ if (*s == sep) { ++ fsm_state = BYTE1_SEP_BYTE2; ++ break; ++ } ++ goto reject; ++ case BYTE1_SEP_BYTE2: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[2] = pcapint_xdtoi(*s); ++ fsm_state = BYTE2_X; ++ break; ++ } ++ goto reject; ++ case BYTE2_X: ++ if (*s == sep) { ++ fsm_state = BYTE2_SEP_BYTE3; ++ break; ++ } ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[2] = buf[2] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE2_XX; ++ break; ++ } ++ goto reject; ++ case BYTE2_XX: ++ if (*s == sep) { ++ fsm_state = BYTE2_SEP_BYTE3; ++ break; ++ } ++ goto reject; ++ case BYTE2_SEP_BYTE3: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[3] = pcapint_xdtoi(*s); ++ fsm_state = BYTE3_X; ++ break; ++ } ++ goto reject; ++ case BYTE3_X: ++ if (*s == sep) { ++ fsm_state = BYTE3_SEP_BYTE4; ++ break; ++ } ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[3] = buf[3] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE3_XX; ++ break; ++ } ++ goto reject; ++ case BYTE3_XX: ++ if (*s == sep) { ++ fsm_state = BYTE3_SEP_BYTE4; ++ break; ++ } ++ goto reject; ++ case BYTE3_SEP_BYTE4: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[4] = pcapint_xdtoi(*s); ++ fsm_state = BYTE4_X; ++ break; ++ } ++ goto reject; ++ case BYTE4_X: ++ if (*s == sep) { ++ fsm_state = BYTE4_SEP_BYTE5; ++ break; ++ } ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[4] = buf[4] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE4_XX; ++ break; ++ } ++ goto reject; ++ case BYTE4_XX: ++ if (*s == sep) { ++ fsm_state = BYTE4_SEP_BYTE5; ++ break; ++ } ++ goto reject; ++ case BYTE4_SEP_BYTE5: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[5] = pcapint_xdtoi(*s); ++ fsm_state = BYTE5_X; ++ break; ++ } ++ goto reject; ++ case BYTE5_X: ++ if (PCAP_ISXDIGIT(*s)) { ++ buf[5] = buf[5] << 4 | pcapint_xdtoi(*s); ++ fsm_state = BYTE5_XX; ++ break; ++ } ++ goto reject; ++ case BYTE5_XX: ++ goto reject; ++ } // switch ++ s++; ++ } // while ++ ++ if (fsm_state == BYTE5_X || fsm_state == BYTE5_XX) { ++ // accept ++ memcpy(addr, buf, sizeof(buf)); ++ return 1; ++ } ++ ++reject: ++ return 0; ++} ++ ++// The 'addr' argument must point to an array of at least 6 elements. ++static int ++pcapint_atomac48(const char *s, uint8_t *addr) ++{ ++ return s && ( ++ pcapint_atomac48_xxxxxxxxxxxx(s, addr) || ++ pcapint_atomac48_xxxx_3_times(s, addr) || ++ pcapint_atomac48_x_xx_6_times(s, addr) ++ ); ++} ++ ++/* ++ * If 's' is a MAC-48 address in one of the forms documented in pcap-filter(7) ++ * for "ether host", return a pointer to an allocated buffer with the binary ++ * value of the address. Return NULL on any error. + */ + u_char * + pcap_ether_aton(const char *s) + { +- register u_char *ep, *e; +- register u_char d; ++ uint8_t tmp[6]; ++ if (! pcapint_atomac48(s, tmp)) ++ return (NULL); + +- e = ep = (u_char *)malloc(6); ++ u_char *e = malloc(6); + if (e == NULL) + return (NULL); + +- while (*s) { +- if (*s == ':' || *s == '.' || *s == '-') +- s += 1; +- d = xdtoi(*s++); +- if (PCAP_ISXDIGIT(*s)) { +- d <<= 4; +- d |= xdtoi(*s++); +- } +- *ep++ = d; +- } +- ++ memcpy(e, tmp, sizeof(tmp)); + return (e); + } + +-- +2.45.4 + diff --git a/SPECS/nmap/nmap.spec b/SPECS/nmap/nmap.spec index a5bc1575a63..9b2bf1146bc 100644 --- a/SPECS/nmap/nmap.spec +++ b/SPECS/nmap/nmap.spec @@ -1,7 +1,7 @@ Summary: Nmap Network Mapper Name: nmap Version: 7.95 -Release: 2%{?dist} +Release: 3%{?dist} License: Nmap Vendor: Microsoft Corporation Distribution: Azure Linux @@ -21,6 +21,7 @@ BuildRequires: zlib-devel Patch0: remove_openssl_macro.patch Patch1: CVE-2024-8006.patch Patch2: CVE-2023-7256.patch +Patch3: CVE-2025-11961.patch %description Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. @@ -65,6 +66,9 @@ ln -s ncat %{buildroot}%{_bindir}/nc %{_bindir}/nc %changelog +* Mon Jan 05 2026 Azure Linux Security Servicing Account - 7.95-3 +- Patch for CVE-2025-11961 + * Mon Nov 18 2024 Kavya Sree Kaitepalli - 7.95-2 - Backport to fix CVE-2024-8006 - Fix CVE-2023-7256.patch diff --git a/SPECS/nodejs/CVE-2025-55130.patch b/SPECS/nodejs/CVE-2025-55130.patch new file mode 100644 index 00000000000..5eb9cb18549 --- /dev/null +++ b/SPECS/nodejs/CVE-2025-55130.patch @@ -0,0 +1,312 @@ +From b9f628994a4c17f42fa9fc96337db89ccaeef0f9 Mon Sep 17 00:00:00 2001 +From: RafaelGSS +Date: Mon, 10 Nov 2025 19:27:51 -0300 +Subject: [PATCH] lib,permission: require full read and write to symlink APIs + +Refs: https://hackerone.com/reports/3417819 +PR-URL: https://github.com/nodejs-private/node-private/pull/760 +Reviewed-By: Matteo Collina +CVE-ID: CVE-2025-55130 +Signed-off-by: RafaelGSS +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/494f62dc23.patch +--- + lib/fs.js | 34 ++++++------------- + lib/internal/fs/promises.js | 20 +++-------- + .../permission/fs-symlink-target-write.js | 18 ++-------- + test/fixtures/permission/fs-symlink.js | 18 ++++++++-- + .../test-permission-fs-symlink-relative.js | 10 +++--- + test/parallel/test-permission-fs-symlink.js | 14 ++++++++ + 6 files changed, 52 insertions(+), 62 deletions(-) + +diff --git a/lib/fs.js b/lib/fs.js +index fd339900..85f8301a 100644 +--- a/lib/fs.js ++++ b/lib/fs.js +@@ -59,7 +59,6 @@ const { + } = constants; + + const pathModule = require('path'); +-const { isAbsolute } = pathModule; + const { isArrayBufferView } = require('internal/util/types'); + + const binding = internalBinding('fs'); +@@ -1736,18 +1735,12 @@ function symlink(target, path, type_, callback_) { + const type = (typeof type_ === 'string' ? type_ : null); + const callback = makeCallback(arguments[arguments.length - 1]); + +- if (permission.isEnabled()) { +- // The permission model's security guarantees fall apart in the presence of +- // relative symbolic links. Thus, we have to prevent their creation. +- if (BufferIsBuffer(target)) { +- if (!isAbsolute(BufferToString(target))) { +- callback(new ERR_ACCESS_DENIED('relative symbolic link target')); +- return; +- } +- } else if (typeof target !== 'string' || !isAbsolute(toPathIfFileURL(target))) { +- callback(new ERR_ACCESS_DENIED('relative symbolic link target')); +- return; +- } ++ // Due to the nature of Node.js runtime, symlinks has different edge cases that can bypass ++ // the permission model security guarantees. Thus, this API is disabled unless fs.read ++ // and fs.write permission has been given. ++ if (permission.isEnabled() && !permission.has('fs')) { ++ callback(new ERR_ACCESS_DENIED('fs.symlink API requires full fs.read and fs.write permissions.')); ++ return; + } + + target = getValidatedPath(target, 'target'); +@@ -1807,16 +1800,11 @@ function symlinkSync(target, path, type) { + } + } + +- if (permission.isEnabled()) { +- // The permission model's security guarantees fall apart in the presence of +- // relative symbolic links. Thus, we have to prevent their creation. +- if (BufferIsBuffer(target)) { +- if (!isAbsolute(BufferToString(target))) { +- throw new ERR_ACCESS_DENIED('relative symbolic link target'); +- } +- } else if (typeof target !== 'string' || !isAbsolute(toPathIfFileURL(target))) { +- throw new ERR_ACCESS_DENIED('relative symbolic link target'); +- } ++ // Due to the nature of Node.js runtime, symlinks has different edge cases that can bypass ++ // the permission model security guarantees. Thus, this API is disabled unless fs.read ++ // and fs.write permission has been given. ++ if (permission.isEnabled() && !permission.has('fs')) { ++ throw new ERR_ACCESS_DENIED('fs.symlink API requires full fs.read and fs.write permissions.'); + } + + target = getValidatedPath(target, 'target'); +diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js +index 1544c34e..5584b2e3 100644 +--- a/lib/internal/fs/promises.js ++++ b/lib/internal/fs/promises.js +@@ -18,7 +18,6 @@ const { + SymbolAsyncDispose, + Uint8Array, + FunctionPrototypeBind, +- uncurryThis, + } = primordials; + + const { fs: constants } = internalBinding('constants'); +@@ -32,8 +31,6 @@ const { + + const binding = internalBinding('fs'); + const { Buffer } = require('buffer'); +-const { isBuffer: BufferIsBuffer } = Buffer; +-const BufferToString = uncurryThis(Buffer.prototype.toString); + + const { + codes: { +@@ -89,8 +86,6 @@ const { + kValidateObjectAllowNullable, + } = require('internal/validators'); + const pathModule = require('path'); +-const { isAbsolute } = pathModule; +-const { toPathIfFileURL } = require('internal/url'); + const { + kEmptyObject, + lazyDOMException, +@@ -980,16 +975,11 @@ async function symlink(target, path, type_) { + } + } + +- if (permission.isEnabled()) { +- // The permission model's security guarantees fall apart in the presence of +- // relative symbolic links. Thus, we have to prevent their creation. +- if (BufferIsBuffer(target)) { +- if (!isAbsolute(BufferToString(target))) { +- throw new ERR_ACCESS_DENIED('relative symbolic link target'); +- } +- } else if (typeof target !== 'string' || !isAbsolute(toPathIfFileURL(target))) { +- throw new ERR_ACCESS_DENIED('relative symbolic link target'); +- } ++ // Due to the nature of Node.js runtime, symlinks has different edge cases that can bypass ++ // the permission model security guarantees. Thus, this API is disabled unless fs.read ++ // and fs.write permission has been given. ++ if (permission.isEnabled() && !permission.has('fs')) { ++ throw new ERR_ACCESS_DENIED('fs.symlink API requires full fs.read and fs.write permissions.'); + } + + target = getValidatedPath(target, 'target'); +diff --git a/test/fixtures/permission/fs-symlink-target-write.js b/test/fixtures/permission/fs-symlink-target-write.js +index c17d674d..6e07bfa8 100644 +--- a/test/fixtures/permission/fs-symlink-target-write.js ++++ b/test/fixtures/permission/fs-symlink-target-write.js +@@ -26,8 +26,7 @@ const writeOnlyFolder = process.env.WRITEONLYFOLDER; + fs.symlinkSync(path.join(readOnlyFolder, 'file'), path.join(readWriteFolder, 'link-to-read-only'), 'file'); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemWrite', +- resource: path.toNamespacedPath(path.join(readOnlyFolder, 'file')), ++ message: 'fs.symlink API requires full fs.read and fs.write permissions.', + })); + assert.throws(() => { + fs.linkSync(path.join(readOnlyFolder, 'file'), path.join(readWriteFolder, 'link-to-read-only')); +@@ -37,18 +36,6 @@ const writeOnlyFolder = process.env.WRITEONLYFOLDER; + resource: path.toNamespacedPath(path.join(readOnlyFolder, 'file')), + })); + +- // App will be able to symlink to a writeOnlyFolder +- fs.symlink(path.join(readWriteFolder, 'file'), path.join(writeOnlyFolder, 'link-to-read-write'), 'file', (err) => { +- assert.ifError(err); +- // App will won't be able to read the symlink +- fs.readFile(path.join(writeOnlyFolder, 'link-to-read-write'), common.expectsError({ +- code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemRead', +- })); +- +- // App will be able to write to the symlink +- fs.writeFile(path.join(writeOnlyFolder, 'link-to-read-write'), 'some content', common.mustSucceed()); +- }); + fs.link(path.join(readWriteFolder, 'file'), path.join(writeOnlyFolder, 'link-to-read-write2'), (err) => { + assert.ifError(err); + // App will won't be able to read the link +@@ -66,8 +53,7 @@ const writeOnlyFolder = process.env.WRITEONLYFOLDER; + fs.symlinkSync(path.join(readWriteFolder, 'file'), path.join(readOnlyFolder, 'link-to-read-only'), 'file'); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemWrite', +- resource: path.toNamespacedPath(path.join(readOnlyFolder, 'link-to-read-only')), ++ message: 'fs.symlink API requires full fs.read and fs.write permissions.', + })); + assert.throws(() => { + fs.linkSync(path.join(readWriteFolder, 'file'), path.join(readOnlyFolder, 'link-to-read-only')); +diff --git a/test/fixtures/permission/fs-symlink.js b/test/fixtures/permission/fs-symlink.js +index 4cf3b45f..ba60f781 100644 +--- a/test/fixtures/permission/fs-symlink.js ++++ b/test/fixtures/permission/fs-symlink.js +@@ -54,7 +54,6 @@ const symlinkFromBlockedFile = process.env.EXISTINGSYMLINK; + fs.readFileSync(blockedFile); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemRead', + })); + assert.throws(() => { + fs.appendFileSync(blockedFile, 'data'); +@@ -68,7 +67,6 @@ const symlinkFromBlockedFile = process.env.EXISTINGSYMLINK; + fs.symlinkSync(regularFile, blockedFolder + '/asdf', 'file'); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemWrite', + })); + assert.throws(() => { + fs.linkSync(regularFile, blockedFolder + '/asdf'); +@@ -82,7 +80,6 @@ const symlinkFromBlockedFile = process.env.EXISTINGSYMLINK; + fs.symlinkSync(blockedFile, path.join(__dirname, '/asdf'), 'file'); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', +- permission: 'FileSystemRead', + })); + assert.throws(() => { + fs.linkSync(blockedFile, path.join(__dirname, '/asdf')); +@@ -90,4 +87,19 @@ const symlinkFromBlockedFile = process.env.EXISTINGSYMLINK; + code: 'ERR_ACCESS_DENIED', + permission: 'FileSystemRead', + })); ++} ++ ++// fs.symlink API is blocked by default ++{ ++ assert.throws(() => { ++ fs.symlinkSync(regularFile, regularFile); ++ }, common.expectsError({ ++ message: 'fs.symlink API requires full fs.read and fs.write permissions.', ++ code: 'ERR_ACCESS_DENIED', ++ })); ++ ++ fs.symlink(regularFile, regularFile, common.expectsError({ ++ message: 'fs.symlink API requires full fs.read and fs.write permissions.', ++ code: 'ERR_ACCESS_DENIED', ++ })); + } +\ No newline at end of file +diff --git a/test/parallel/test-permission-fs-symlink-relative.js b/test/parallel/test-permission-fs-symlink-relative.js +index 4cc7d920..9080f16c 100644 +--- a/test/parallel/test-permission-fs-symlink-relative.js ++++ b/test/parallel/test-permission-fs-symlink-relative.js +@@ -1,4 +1,4 @@ +-// Flags: --experimental-permission --allow-fs-read=* --allow-fs-write=* ++// Flags: --experimental-permission --allow-fs-read=* + 'use strict'; + + const common = require('../common'); +@@ -10,7 +10,7 @@ const { symlinkSync, symlink, promises: { symlink: symlinkAsync } } = require('f + + const error = { + code: 'ERR_ACCESS_DENIED', +- message: /relative symbolic link target/, ++ message: /symlink API requires full fs\.read and fs\.write permissions/, + }; + + for (const targetString of ['a', './b/c', '../d', 'e/../f', 'C:drive-relative', 'ntfs:alternate']) { +@@ -27,14 +27,14 @@ for (const targetString of ['a', './b/c', '../d', 'e/../f', 'C:drive-relative', + } + } + +-// Absolute should not throw ++// Absolute should throw too + for (const targetString of [path.resolve('.')]) { + for (const target of [targetString, Buffer.from(targetString)]) { + for (const path of [__filename]) { + symlink(target, path, common.mustCall((err) => { + assert(err); +- assert.strictEqual(err.code, 'EEXIST'); +- assert.match(err.message, /file already exists/); ++ assert.strictEqual(err.code, error.code); ++ assert.match(err.message, error.message); + })); + } + } +diff --git a/test/parallel/test-permission-fs-symlink.js b/test/parallel/test-permission-fs-symlink.js +index c7d753c2..268a8ecb 100644 +--- a/test/parallel/test-permission-fs-symlink.js ++++ b/test/parallel/test-permission-fs-symlink.js +@@ -21,15 +21,26 @@ const commonPathWildcard = path.join(__filename, '../../common*'); + const blockedFile = fixtures.path('permission', 'deny', 'protected-file.md'); + const blockedFolder = tmpdir.resolve('subdirectory'); + const symlinkFromBlockedFile = tmpdir.resolve('example-symlink.md'); ++const allowedFolder = tmpdir.resolve('allowed-folder'); ++const traversalSymlink = path.join(allowedFolder, 'deep1', 'deep2', 'deep3', 'gotcha'); + + { + tmpdir.refresh(); + fs.mkdirSync(blockedFolder); ++ // Create deep directory structure for path traversal test ++ fs.mkdirSync(allowedFolder); ++ fs.writeFileSync(path.resolve(allowedFolder, '../protected-file.md'), 'protected'); ++ fs.mkdirSync(path.join(allowedFolder, 'deep1')); ++ fs.mkdirSync(path.join(allowedFolder, 'deep1', 'deep2')); ++ fs.mkdirSync(path.join(allowedFolder, 'deep1', 'deep2', 'deep3')); + } + + { + // Symlink previously created ++ // fs.symlink API is allowed when full-read and full-write access + fs.symlinkSync(blockedFile, symlinkFromBlockedFile); ++ // Create symlink for path traversal test - symlink points to parent directory ++ fs.symlinkSync(allowedFolder, traversalSymlink); + } + + { +@@ -38,6 +49,7 @@ const symlinkFromBlockedFile = tmpdir.resolve('example-symlink.md'); + [ + '--experimental-permission', + `--allow-fs-read=${file}`, `--allow-fs-read=${commonPathWildcard}`, `--allow-fs-read=${symlinkFromBlockedFile}`, ++ `--allow-fs-read=${allowedFolder}`, + `--allow-fs-write=${symlinkFromBlockedFile}`, + file, + ], +@@ -47,6 +59,8 @@ const symlinkFromBlockedFile = tmpdir.resolve('example-symlink.md'); + BLOCKEDFOLDER: blockedFolder, + BLOCKEDFILE: blockedFile, + EXISTINGSYMLINK: symlinkFromBlockedFile, ++ TRAVERSALSYMLINK: traversalSymlink, ++ ALLOWEDFOLDER: allowedFolder, + }, + } + ); +-- +2.45.4 + diff --git a/SPECS/nodejs/CVE-2025-55131.patch b/SPECS/nodejs/CVE-2025-55131.patch new file mode 100644 index 00000000000..ce673f10c33 --- /dev/null +++ b/SPECS/nodejs/CVE-2025-55131.patch @@ -0,0 +1,297 @@ +From de3c7586220032e30e83529f24e0d63d2cfeed75 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=D0=A1=D0=BA=D0=BE=D0=B2=D0=BE=D1=80=D0=BE=D0=B4=D0=B0=20?= + =?UTF-8?q?=D0=9D=D0=B8=D0=BA=D0=B8=D1=82=D0=B0=20=D0=90=D0=BD=D0=B4=D1=80?= + =?UTF-8?q?=D0=B5=D0=B5=D0=B2=D0=B8=D1=87?= +Date: Fri, 7 Nov 2025 11:50:57 -0300 +Subject: [PATCH] src,lib: refactor unsafe buffer creation to remove zero-fill + toggle + +This removes the zero-fill toggle mechanism that allowed JavaScript +to control ArrayBuffer initialization via shared memory. Instead, +unsafe buffer creation now uses a dedicated C++ API. + +Refs: https://hackerone.com/reports/3405778 +Co-Authored-By: Rafael Gonzaga +Co-Authored-By: Joyee Cheung +Signed-off-by: RafaelGSS +PR-URL: https://github.com/nodejs-private/node-private/pull/759 +Backport-PR-URL: https://github.com/nodejs-private/node-private/pull/799 +CVE-ID: CVE-2025-55131 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/51f4de4b4a.patch +--- + deps/v8/include/v8-array-buffer.h | 7 +++ + deps/v8/src/api/api.cc | 17 ++++++ + lib/internal/buffer.js | 23 ++------ + lib/internal/process/pre_execution.js | 2 - + src/api/environment.cc | 3 +- + src/node_buffer.cc | 84 ++++++++++++++++----------- + 6 files changed, 82 insertions(+), 54 deletions(-) + +diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h +index 804fc42c..e03ed1a6 100644 +--- a/deps/v8/include/v8-array-buffer.h ++++ b/deps/v8/include/v8-array-buffer.h +@@ -244,6 +244,13 @@ class V8_EXPORT ArrayBuffer : public Object { + */ + static std::unique_ptr NewBackingStore(Isolate* isolate, + size_t byte_length); ++ /** ++ * Returns a new standalone BackingStore with uninitialized memory and ++ * return nullptr on failure. ++ * This variant is for not breaking ABI on Node.js LTS. DO NOT USE. ++ */ ++ static std::unique_ptr NewBackingStoreForNodeLTS( ++ Isolate* isolate, size_t byte_length); + /** + * Returns a new standalone BackingStore that takes over the ownership of + * the given buffer. The destructor of the BackingStore invokes the given +diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc +index a06394e6..da0c960f 100644 +--- a/deps/v8/src/api/api.cc ++++ b/deps/v8/src/api/api.cc +@@ -8743,6 +8743,23 @@ std::unique_ptr v8::ArrayBuffer::NewBackingStore( + static_cast(backing_store.release())); + } + ++std::unique_ptr v8::ArrayBuffer::NewBackingStoreForNodeLTS( ++ Isolate* v8_isolate, size_t byte_length) { ++ i::Isolate* i_isolate = reinterpret_cast(v8_isolate); ++ API_RCS_SCOPE(i_isolate, ArrayBuffer, NewBackingStore); ++ CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength); ++ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); ++ std::unique_ptr backing_store = ++ i::BackingStore::Allocate(i_isolate, byte_length, ++ i::SharedFlag::kNotShared, ++ i::InitializedFlag::kUninitialized); ++ if (!backing_store) { ++ return nullptr; ++ } ++ return std::unique_ptr( ++ static_cast(backing_store.release())); ++} ++ + std::unique_ptr v8::ArrayBuffer::NewBackingStore( + void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter, + void* deleter_data) { +diff --git a/lib/internal/buffer.js b/lib/internal/buffer.js +index fbe9de24..23df382f 100644 +--- a/lib/internal/buffer.js ++++ b/lib/internal/buffer.js +@@ -30,7 +30,7 @@ const { + hexWrite, + ucs2Write, + utf8Write, +- getZeroFillToggle, ++ createUnsafeArrayBuffer, + } = internalBinding('buffer'); + + const { +@@ -1053,26 +1053,14 @@ function markAsUntransferable(obj) { + obj[untransferable_object_private_symbol] = true; + } + +-// A toggle used to access the zero fill setting of the array buffer allocator +-// in C++. +-// |zeroFill| can be undefined when running inside an isolate where we +-// do not own the ArrayBuffer allocator. Zero fill is always on in that case. +-let zeroFill = getZeroFillToggle(); + function createUnsafeBuffer(size) { +- zeroFill[0] = 0; +- try { ++ if (size <= 64) { ++ // Allocated in heap, doesn't call backing store anyway ++ // This is the same that the old impl did implicitly, but explicit now + return new FastBuffer(size); +- } finally { +- zeroFill[0] = 1; + } +-} + +-// The connection between the JS land zero fill toggle and the +-// C++ one in the NodeArrayBufferAllocator gets lost if the toggle +-// is deserialized from the snapshot, because V8 owns the underlying +-// memory of this toggle. This resets the connection. +-function reconnectZeroFillToggle() { +- zeroFill = getZeroFillToggle(); ++ return new FastBuffer(createUnsafeArrayBuffer(size)); + } + + module.exports = { +@@ -1082,5 +1070,4 @@ module.exports = { + createUnsafeBuffer, + readUInt16BE, + readUInt32BE, +- reconnectZeroFillToggle, + }; +diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js +index 16e29148..4a5dc638 100644 +--- a/lib/internal/process/pre_execution.js ++++ b/lib/internal/process/pre_execution.js +@@ -27,7 +27,6 @@ const { + refreshOptions, + getEmbedderOptions, + } = require('internal/options'); +-const { reconnectZeroFillToggle } = require('internal/buffer'); + const { + exposeInterface, + exposeLazyInterfaces, +@@ -98,7 +97,6 @@ function prepareExecution(options) { + const { expandArgv1, initializeModules, isMainThread } = options; + + refreshRuntimeOptions(); +- reconnectZeroFillToggle(); + + // Patch the process object and get the resolved main entry point. + const mainEntry = patchProcessObject(expandArgv1); +diff --git a/src/api/environment.cc b/src/api/environment.cc +index cdc2f7aa..776b6f10 100644 +--- a/src/api/environment.cc ++++ b/src/api/environment.cc +@@ -107,8 +107,9 @@ void* NodeArrayBufferAllocator::Allocate(size_t size) { + ret = allocator_->Allocate(size); + else + ret = allocator_->AllocateUninitialized(size); +- if (LIKELY(ret != nullptr)) ++ if (ret != nullptr) [[likely]] { + total_mem_usage_.fetch_add(size, std::memory_order_relaxed); ++ } + return ret; + } + +diff --git a/src/node_buffer.cc b/src/node_buffer.cc +index e63318b6..6604e53e 100644 +--- a/src/node_buffer.cc ++++ b/src/node_buffer.cc +@@ -74,7 +74,6 @@ using v8::Object; + using v8::SharedArrayBuffer; + using v8::String; + using v8::Uint32; +-using v8::Uint32Array; + using v8::Uint8Array; + using v8::Value; + +@@ -1170,35 +1169,6 @@ void SetBufferPrototype(const FunctionCallbackInfo& args) { + realm->set_buffer_prototype_object(proto); + } + +-void GetZeroFillToggle(const FunctionCallbackInfo& args) { +- Environment* env = Environment::GetCurrent(args); +- NodeArrayBufferAllocator* allocator = env->isolate_data()->node_allocator(); +- Local ab; +- // It can be a nullptr when running inside an isolate where we +- // do not own the ArrayBuffer allocator. +- if (allocator == nullptr) { +- // Create a dummy Uint32Array - the JS land can only toggle the C++ land +- // setting when the allocator uses our toggle. With this the toggle in JS +- // land results in no-ops. +- ab = ArrayBuffer::New(env->isolate(), sizeof(uint32_t)); +- } else { +- uint32_t* zero_fill_field = allocator->zero_fill_field(); +- std::unique_ptr backing = +- ArrayBuffer::NewBackingStore(zero_fill_field, +- sizeof(*zero_fill_field), +- [](void*, size_t, void*) {}, +- nullptr); +- ab = ArrayBuffer::New(env->isolate(), std::move(backing)); +- } +- +- ab->SetPrivate( +- env->context(), +- env->untransferable_object_private_symbol(), +- True(env->isolate())).Check(); +- +- args.GetReturnValue().Set(Uint32Array::New(ab, 0, 1)); +-} +- + void DetachArrayBuffer(const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + if (args[0]->IsArrayBuffer()) { +@@ -1378,6 +1348,54 @@ void CopyArrayBuffer(const FunctionCallbackInfo& args) { + memcpy(dest, src, bytes_to_copy); + } + ++// Converts a number parameter to size_t suitable for ArrayBuffer sizes ++// Could be larger than uint32_t ++// See v8::internal::TryNumberToSize and v8::internal::NumberToSize ++inline size_t CheckNumberToSize(Local number) { ++ CHECK(number->IsNumber()); ++ double value = number.As()->Value(); ++ // See v8::internal::TryNumberToSize on this (and on < comparison) ++ double maxSize = static_cast(std::numeric_limits::max()); ++ CHECK(value >= 0 && value < maxSize); ++ size_t size = static_cast(value); ++#ifdef V8_ENABLE_SANDBOX ++ CHECK_LE(size, kMaxSafeBufferSizeForSandbox); ++#endif ++ return size; ++} ++ ++void CreateUnsafeArrayBuffer(const FunctionCallbackInfo& args) { ++ Environment* env = Environment::GetCurrent(args); ++ if (args.Length() != 1) { ++ env->ThrowRangeError("Invalid array buffer length"); ++ return; ++ } ++ ++ size_t size = CheckNumberToSize(args[0]); ++ ++ Isolate* isolate = env->isolate(); ++ ++ Local buf; ++ ++ NodeArrayBufferAllocator* allocator = env->isolate_data()->node_allocator(); ++ // 0-length, or zero-fill flag is set, or building snapshot ++ if (size == 0 || per_process::cli_options->zero_fill_all_buffers || ++ allocator == nullptr) { ++ buf = ArrayBuffer::New(isolate, size); ++ } else { ++ std::unique_ptr store = ++ ArrayBuffer::NewBackingStoreForNodeLTS(isolate, size); ++ if (!store) { ++ // This slightly differs from the old behavior, ++ // as in v8 that's a RangeError, and this is an Error with code ++ return env->ThrowRangeError("Array buffer allocation failed"); ++ } ++ buf = ArrayBuffer::New(isolate, std::move(store)); ++ } ++ ++ args.GetReturnValue().Set(buf); ++} ++ + void Initialize(Local target, + Local unused, + Local context, +@@ -1406,6 +1424,8 @@ void Initialize(Local target, + + SetMethod(context, target, "detachArrayBuffer", DetachArrayBuffer); + SetMethod(context, target, "copyArrayBuffer", CopyArrayBuffer); ++ SetMethodNoSideEffect( ++ context, target, "createUnsafeArrayBuffer", CreateUnsafeArrayBuffer); + + SetMethod(context, target, "swap16", Swap16); + SetMethod(context, target, "swap32", Swap32); +@@ -1442,8 +1462,6 @@ void Initialize(Local target, + SetMethod(context, target, "hexWrite", StringWrite); + SetMethod(context, target, "ucs2Write", StringWrite); + SetMethod(context, target, "utf8Write", StringWrite); +- +- SetMethod(context, target, "getZeroFillToggle", GetZeroFillToggle); + } + + } // anonymous namespace +@@ -1485,10 +1503,10 @@ void RegisterExternalReferences(ExternalReferenceRegistry* registry) { + registry->Register(StringWrite); + registry->Register(StringWrite); + registry->Register(StringWrite); +- registry->Register(GetZeroFillToggle); + + registry->Register(DetachArrayBuffer); + registry->Register(CopyArrayBuffer); ++ registry->Register(CreateUnsafeArrayBuffer); + + registry->Register(Atob); + registry->Register(Btoa); +-- +2.45.4 + diff --git a/SPECS/nodejs/CVE-2025-55132.patch b/SPECS/nodejs/CVE-2025-55132.patch new file mode 100644 index 00000000000..71322c0fd66 --- /dev/null +++ b/SPECS/nodejs/CVE-2025-55132.patch @@ -0,0 +1,177 @@ +From 19bb6aa25dac2e0bec0024dbf92c49f7d6b889d6 Mon Sep 17 00:00:00 2001 +From: RafaelGSS +Date: Tue, 21 Oct 2025 18:25:31 -0300 +Subject: [PATCH] lib: disable futimes when permission model is enabled + +Refs: https://hackerone.com/reports/3390084 +PR-URL: https://github.com/nodejs-private/node-private/pull/748 +Reviewed-By: Matteo Collina +Reviewed-By: Anna Henningsen +CVE-ID: CVE-2025-55132 +PR-URL: https://github.com/nodejs-private/node-private/pull/802 +Reviewed-By: Rafael Gonzaga +CVE-ID: CVE-2025-55132 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/14fbbb510c.patch +--- + lib/fs.js | 24 ++++++++++ + test/fixtures/permission/fs-write.js | 45 +++++++++++++++++++ + test/parallel/test-permission-fs-supported.js | 17 ++++++- + 3 files changed, 85 insertions(+), 1 deletion(-) + +diff --git a/lib/fs.js b/lib/fs.js +index 2231ad74..fd339900 100644 +--- a/lib/fs.js ++++ b/lib/fs.js +@@ -1275,6 +1275,11 @@ function rmSync(path, options) { + function fdatasync(fd, callback) { + const req = new FSReqCallback(); + req.oncomplete = makeCallback(callback); ++ ++ if (permission.isEnabled()) { ++ callback(new ERR_ACCESS_DENIED('fdatasync API is disabled when Permission Model is enabled.')); ++ return; ++ } + binding.fdatasync(fd, req); + } + +@@ -1286,6 +1291,9 @@ function fdatasync(fd, callback) { + * @returns {void} + */ + function fdatasyncSync(fd) { ++ if (permission.isEnabled()) { ++ throw new ERR_ACCESS_DENIED('fdatasync API is disabled when Permission Model is enabled.'); ++ } + binding.fdatasync(fd); + } + +@@ -1299,6 +1307,10 @@ function fdatasyncSync(fd) { + function fsync(fd, callback) { + const req = new FSReqCallback(); + req.oncomplete = makeCallback(callback); ++ if (permission.isEnabled()) { ++ callback(new ERR_ACCESS_DENIED('fsync API is disabled when Permission Model is enabled.')); ++ return; ++ } + binding.fsync(fd, req); + } + +@@ -1309,6 +1321,9 @@ function fsync(fd, callback) { + * @returns {void} + */ + function fsyncSync(fd) { ++ if (permission.isEnabled()) { ++ throw new ERR_ACCESS_DENIED('fsync API is disabled when Permission Model is enabled.'); ++ } + binding.fsync(fd); + } + +@@ -2143,6 +2158,11 @@ function futimes(fd, atime, mtime, callback) { + mtime = toUnixTimestamp(mtime, 'mtime'); + callback = makeCallback(callback); + ++ if (permission.isEnabled()) { ++ callback(new ERR_ACCESS_DENIED('futimes API is disabled when Permission Model is enabled.')); ++ return; ++ } ++ + const req = new FSReqCallback(); + req.oncomplete = callback; + binding.futimes(fd, atime, mtime, req); +@@ -2158,6 +2178,10 @@ function futimes(fd, atime, mtime, callback) { + * @returns {void} + */ + function futimesSync(fd, atime, mtime) { ++ if (permission.isEnabled()) { ++ throw new ERR_ACCESS_DENIED('futimes API is disabled when Permission Model is enabled.'); ++ } ++ + binding.futimes( + fd, + toUnixTimestamp(atime, 'atime'), +diff --git a/test/fixtures/permission/fs-write.js b/test/fixtures/permission/fs-write.js +index 828f953e..f875c321 100644 +--- a/test/fixtures/permission/fs-write.js ++++ b/test/fixtures/permission/fs-write.js +@@ -464,4 +464,49 @@ const absoluteProtectedFolder = path.resolve(relativeProtectedFolder); + permission: 'FileSystemWrite', + resource: path.toNamespacedPath(blockedFile), + }); ++} ++ ++// fs.utimes with read-only fd ++{ ++ assert.throws(() => { ++ // blocked file is allowed to read ++ const fd = fs.openSync(blockedFile, 'r'); ++ const date = new Date(); ++ date.setFullYear(2100,0,1); ++ ++ fs.futimes(fd, date, date, common.expectsError({ ++ code: 'ERR_ACCESS_DENIED', ++ })); ++ fs.futimesSync(fd, date, date); ++ }, { ++ code: 'ERR_ACCESS_DENIED', ++ }); ++} ++ ++// fs.fdatasync with read-only fd ++{ ++ assert.throws(() => { ++ // blocked file is allowed to read ++ const fd = fs.openSync(blockedFile, 'r'); ++ fs.fdatasync(fd, common.expectsError({ ++ code: 'ERR_ACCESS_DENIED', ++ })); ++ fs.fdatasyncSync(fd); ++ }, { ++ code: 'ERR_ACCESS_DENIED', ++ }); ++} ++ ++// fs.fsync with read-only fd ++{ ++ assert.throws(() => { ++ // blocked file is allowed to read ++ const fd = fs.openSync(blockedFile, 'r'); ++ fs.fsync(fd, common.expectsError({ ++ code: 'ERR_ACCESS_DENIED', ++ })); ++ fs.fsyncSync(fd); ++ }, { ++ code: 'ERR_ACCESS_DENIED', ++ }); + } +\ No newline at end of file +diff --git a/test/parallel/test-permission-fs-supported.js b/test/parallel/test-permission-fs-supported.js +index 10621177..805365f2 100644 +--- a/test/parallel/test-permission-fs-supported.js ++++ b/test/parallel/test-permission-fs-supported.js +@@ -77,7 +77,22 @@ const ignoreList = [ + 'unwatchFile', + ...syncAndAsyncAPI('lstat'), + ...syncAndAsyncAPI('realpath'), +- // fd required methods ++ // File descriptor–based metadata operations ++ // ++ // The kernel does not allow opening a file descriptor for an inode ++ // with write access if the inode itself is read-only. However, it still ++ // permits modifying the inode’s metadata (e.g., permission bits, ownership, ++ // timestamps) because you own the file. These changes can be made either ++ // by referring to the file by name (e.g., chmod) or through any existing ++ // file descriptor that identifies the same inode (e.g., fchmod). ++ // ++ // If the kernel required write access to change metadata, it would be ++ // impossible to modify the permissions of a file once it was made read-only. ++ // For that reason, syscalls such as fchmod, fchown, and futimes bypass ++ // the file descriptor’s access mode. Even a read-only ('r') descriptor ++ // can still update metadata. To prevent unintended modifications, ++ // these APIs are therefore blocked by default when permission model is ++ // enabled. + ...syncAndAsyncAPI('close'), + ...syncAndAsyncAPI('fchown'), + ...syncAndAsyncAPI('fchmod'), +-- +2.45.4 + diff --git a/SPECS/nodejs/CVE-2025-59465.patch b/SPECS/nodejs/CVE-2025-59465.patch new file mode 100644 index 00000000000..0463a187c35 --- /dev/null +++ b/SPECS/nodejs/CVE-2025-59465.patch @@ -0,0 +1,46 @@ +From a60da6716ed4a82259a04b8834b30d023a588f2d Mon Sep 17 00:00:00 2001 +From: RafaelGSS +Date: Fri, 31 Oct 2025 16:27:48 -0300 +Subject: [PATCH] lib: add TLSSocket default error handler + +This prevents the server from crashing due to an unhandled rejection +when a TLSSocket connection is abruptly destroyed during initialization +and the user has not attached an error handler to the socket. +e.g: + +```js +const server = http2.createSecureServer({ ... }) +server.on('secureConnection', socket => { + socket.on('error', err => { + console.log(err) + }) +}) +``` + +PR-URL: https://github.com/nodejs-private/node-private/pull/797 +Fixes: https://github.com/nodejs/node/issues/44751 +Refs: https://hackerone.com/bugs?subject=nodejs&report_id=3262404 +Reviewed-By: Matteo Collina +Reviewed-By: Anna Henningsen +CVE-ID: CVE-2025-59465 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/1febc48d5b.patch +--- + lib/_tls_wrap.js | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js +index 1b9b9189..0aeebc0f 100644 +--- a/lib/_tls_wrap.js ++++ b/lib/_tls_wrap.js +@@ -1249,6 +1249,7 @@ function tlsConnectionListener(rawSocket) { + socket[kErrorEmitted] = false; + socket.on('close', onSocketClose); + socket.on('_tlsError', onSocketTLSError); ++ socket.on('error', onSocketTLSError); + } + + // AUTHENTICATION MODES +-- +2.45.4 + diff --git a/SPECS/nodejs/CVE-2025-59466.patch b/SPECS/nodejs/CVE-2025-59466.patch new file mode 100644 index 00000000000..dcfee0c2c97 --- /dev/null +++ b/SPECS/nodejs/CVE-2025-59466.patch @@ -0,0 +1,505 @@ +From fc8ef6351633b1a62ea3272f47c0109e75a05a65 Mon Sep 17 00:00:00 2001 +From: Matteo Collina +Date: Tue, 9 Dec 2025 23:50:18 +0100 +Subject: [PATCH] src: rethrow stack overflow exceptions in async_hooks + +When a stack overflow exception occurs during async_hooks callbacks +(which use TryCatchScope::kFatal), detect the specific "Maximum call +stack size exceeded" RangeError and re-throw it instead of immediately +calling FatalException. This allows user code to catch the exception +with try-catch blocks instead of requiring uncaughtException handlers. + +The implementation adds IsStackOverflowError() helper to detect stack +overflow RangeErrors and re-throws them in TryCatchScope destructor +instead of calling FatalException. + +This fixes the issue where async_hooks would cause stack overflow +exceptions to exit with code 7 (kExceptionInFatalExceptionHandler) +instead of being catchable. + +Fixes: https://github.com/nodejs/node/issues/37989 +Ref: https://hackerone.com/reports/3456295 +PR-URL: https://github.com/nodejs-private/node-private/pull/773 +Refs: https://hackerone.com/reports/3456295 +Reviewed-By: Robert Nagy +Reviewed-By: Paolo Insogna +Reviewed-By: Marco Ippolito +Reviewed-By: Rafael Gonzaga +Reviewed-By: Anna Henningsen +CVE-ID: CVE-2025-59466 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/d7a5c587c0.patch +--- + src/async_wrap.cc | 9 ++- + src/debug_utils.cc | 3 +- + src/node_errors.cc | 71 ++++++++++++++-- + src/node_errors.h | 2 +- + src/node_report.cc | 3 +- + ...async-hooks-stack-overflow-nested-async.js | 80 +++++++++++++++++++ + ...st-async-hooks-stack-overflow-try-catch.js | 47 +++++++++++ + .../test-async-hooks-stack-overflow.js | 47 +++++++++++ + ...andler-stack-overflow-on-stack-overflow.js | 29 +++++++ + ...caught-exception-handler-stack-overflow.js | 29 +++++++ + 10 files changed, 306 insertions(+), 14 deletions(-) + create mode 100644 test/parallel/test-async-hooks-stack-overflow-nested-async.js + create mode 100644 test/parallel/test-async-hooks-stack-overflow-try-catch.js + create mode 100644 test/parallel/test-async-hooks-stack-overflow.js + create mode 100644 test/parallel/test-uncaught-exception-handler-stack-overflow-on-stack-overflow.js + create mode 100644 test/parallel/test-uncaught-exception-handler-stack-overflow.js + +diff --git a/src/async_wrap.cc b/src/async_wrap.cc +index 8b784cdd..6902aa1d 100644 +--- a/src/async_wrap.cc ++++ b/src/async_wrap.cc +@@ -67,7 +67,8 @@ static const char* const provider_names[] = { + void AsyncWrap::DestroyAsyncIdsCallback(Environment* env) { + Local fn = env->async_hooks_destroy_function(); + +- TryCatchScope try_catch(env, TryCatchScope::CatchMode::kFatal); ++ TryCatchScope try_catch(env, ++ TryCatchScope::CatchMode::kFatalRethrowStackOverflow); + + do { + std::vector destroy_async_id_list; +@@ -96,7 +97,8 @@ void Emit(Environment* env, double async_id, AsyncHooks::Fields type, + + HandleScope handle_scope(env->isolate()); + Local async_id_value = Number::New(env->isolate(), async_id); +- TryCatchScope try_catch(env, TryCatchScope::CatchMode::kFatal); ++ TryCatchScope try_catch(env, ++ TryCatchScope::CatchMode::kFatalRethrowStackOverflow); + USE(fn->Call(env->context(), Undefined(env->isolate()), 1, &async_id_value)); + } + +@@ -660,7 +662,8 @@ void AsyncWrap::EmitAsyncInit(Environment* env, + object, + }; + +- TryCatchScope try_catch(env, TryCatchScope::CatchMode::kFatal); ++ TryCatchScope try_catch(env, ++ TryCatchScope::CatchMode::kFatalRethrowStackOverflow); + USE(init_fn->Call(env->context(), object, arraysize(argv), argv)); + } + +diff --git a/src/debug_utils.cc b/src/debug_utils.cc +index 82e65870..db9ca329 100644 +--- a/src/debug_utils.cc ++++ b/src/debug_utils.cc +@@ -324,7 +324,8 @@ void DumpJavaScriptBacktrace(FILE* fp) { + } + + Local stack; +- if (!GetCurrentStackTrace(isolate).ToLocal(&stack)) { ++ if (!GetCurrentStackTrace(isolate).ToLocal(&stack) || ++ stack->GetFrameCount() == 0) { + return; + } + +diff --git a/src/node_errors.cc b/src/node_errors.cc +index ff091fd2..e803c4ac 100644 +--- a/src/node_errors.cc ++++ b/src/node_errors.cc +@@ -188,7 +188,7 @@ static std::string GetErrorSource(Isolate* isolate, + } + + static std::atomic is_in_oom{false}; +-static std::atomic is_retrieving_js_stacktrace{false}; ++static thread_local std::atomic is_retrieving_js_stacktrace{false}; + MaybeLocal GetCurrentStackTrace(Isolate* isolate, int frame_count) { + if (isolate == nullptr) { + return MaybeLocal(); +@@ -216,9 +216,6 @@ MaybeLocal GetCurrentStackTrace(Isolate* isolate, int frame_count) { + StackTrace::CurrentStackTrace(isolate, frame_count, options); + + is_retrieving_js_stacktrace.store(false); +- if (stack->GetFrameCount() == 0) { +- return MaybeLocal(); +- } + + return scope.Escape(stack); + } +@@ -293,7 +290,8 @@ void PrintStackTrace(Isolate* isolate, + + void PrintCurrentStackTrace(Isolate* isolate, StackTracePrefix prefix) { + Local stack; +- if (GetCurrentStackTrace(isolate).ToLocal(&stack)) { ++ if (GetCurrentStackTrace(isolate).ToLocal(&stack) && ++ stack->GetFrameCount() > 0) { + PrintStackTrace(isolate, stack, prefix); + } + } +@@ -661,13 +659,52 @@ v8::ModifyCodeGenerationFromStringsResult ModifyCodeGenerationFromStrings( + }; + } + ++// Check if an exception is a stack overflow error (RangeError with ++// "Maximum call stack size exceeded" message). This is used to handle ++// stack overflow specially in TryCatchScope - instead of immediately ++// exiting, we can use the red zone to re-throw to user code. ++static bool IsStackOverflowError(Isolate* isolate, Local exception) { ++ if (!exception->IsNativeError()) return false; ++ ++ Local err_obj = exception.As(); ++ Local constructor_name = err_obj->GetConstructorName(); ++ ++ // Must be a RangeError ++ Utf8Value name(isolate, constructor_name); ++ if (name.ToStringView() != "RangeError") return false; ++ ++ // Check for the specific stack overflow message ++ Local context = isolate->GetCurrentContext(); ++ Local message_val; ++ if (!err_obj->Get(context, String::NewFromUtf8Literal(isolate, "message")) ++ .ToLocal(&message_val)) { ++ return false; ++ } ++ ++ if (!message_val->IsString()) return false; ++ ++ Utf8Value message(isolate, message_val.As()); ++ return message.ToStringView() == "Maximum call stack size exceeded"; ++} ++ + namespace errors { + + TryCatchScope::~TryCatchScope() { +- if (HasCaught() && !HasTerminated() && mode_ == CatchMode::kFatal) { ++ if (HasCaught() && !HasTerminated() && mode_ != CatchMode::kNormal) { + HandleScope scope(env_->isolate()); + Local exception = Exception(); + Local message = Message(); ++ ++ // Special handling for stack overflow errors in async_hooks: instead of ++ // immediately exiting, re-throw the exception. This allows the exception ++ // to propagate to user code's try-catch blocks. ++ if (mode_ == CatchMode::kFatalRethrowStackOverflow && ++ IsStackOverflowError(env_->isolate(), exception)) { ++ ReThrow(); ++ Reset(); ++ return; ++ } ++ + EnhanceFatalException enhance = CanContinue() ? + EnhanceFatalException::kEnhance : EnhanceFatalException::kDontEnhance; + if (message.IsEmpty()) +@@ -1218,8 +1255,26 @@ void TriggerUncaughtException(Isolate* isolate, + if (env->can_call_into_js()) { + // We do not expect the global uncaught exception itself to throw any more + // exceptions. If it does, exit the current Node.js instance. +- errors::TryCatchScope try_catch(env, +- errors::TryCatchScope::CatchMode::kFatal); ++ // Special case: if the original error was a stack overflow and calling ++ // _fatalException causes another stack overflow, rethrow it to allow ++ // user code's try-catch blocks to potentially catch it. ++ auto is_stack_overflow = [&] { ++ return IsStackOverflowError(env->isolate(), error); ++ }; ++ // Without a JS stack, rethrowing may or may not do anything. ++ // TODO(addaleax): In V8, expose a way to check whether there is a JS stack ++ // or TryCatch that would capture the rethrown exception. ++ auto has_js_stack = [&] { ++ HandleScope handle_scope(env->isolate()); ++ Local stack; ++ return GetCurrentStackTrace(env->isolate(), 1).ToLocal(&stack) && ++ stack->GetFrameCount() > 0; ++ }; ++ errors::TryCatchScope::CatchMode mode = ++ is_stack_overflow() && has_js_stack() ++ ? errors::TryCatchScope::CatchMode::kFatalRethrowStackOverflow ++ : errors::TryCatchScope::CatchMode::kFatal; ++ errors::TryCatchScope try_catch(env, mode); + // Explicitly disable verbose exception reporting - + // if process._fatalException() throws an error, we don't want it to + // trigger the per-isolate message listener which will call this +diff --git a/src/node_errors.h b/src/node_errors.h +index 30f66a76..c77590ac 100644 +--- a/src/node_errors.h ++++ b/src/node_errors.h +@@ -257,7 +257,7 @@ namespace errors { + + class TryCatchScope : public v8::TryCatch { + public: +- enum class CatchMode { kNormal, kFatal }; ++ enum class CatchMode { kNormal, kFatal, kFatalRethrowStackOverflow }; + + explicit TryCatchScope(Environment* env, CatchMode mode = CatchMode::kNormal) + : v8::TryCatch(env->isolate()), env_(env), mode_(mode) {} +diff --git a/src/node_report.cc b/src/node_report.cc +index 46881e32..d3a6599e 100644 +--- a/src/node_report.cc ++++ b/src/node_report.cc +@@ -463,7 +463,8 @@ static void PrintJavaScriptStack(JSONWriter* writer, + const char* trigger) { + HandleScope scope(isolate); + Local stack; +- if (!GetCurrentStackTrace(isolate, MAX_FRAME_COUNT).ToLocal(&stack)) { ++ if (!GetCurrentStackTrace(isolate, MAX_FRAME_COUNT).ToLocal(&stack) || ++ stack->GetFrameCount() == 0) { + PrintEmptyJavaScriptStack(writer); + return; + } +diff --git a/test/parallel/test-async-hooks-stack-overflow-nested-async.js b/test/parallel/test-async-hooks-stack-overflow-nested-async.js +new file mode 100644 +index 00000000..779f8d75 +--- /dev/null ++++ b/test/parallel/test-async-hooks-stack-overflow-nested-async.js +@@ -0,0 +1,80 @@ ++'use strict'; ++ ++// This test verifies that stack overflow during deeply nested async operations ++// with async_hooks enabled can be caught by try-catch. This simulates real-world ++// scenarios like processing deeply nested JSON structures where each level ++// creates async operations (e.g., database calls, API requests). ++ ++require('../common'); ++const assert = require('assert'); ++const { spawnSync } = require('child_process'); ++ ++if (process.argv[2] === 'child') { ++ const { createHook } = require('async_hooks'); ++ ++ // Enable async_hooks with all callbacks (simulates APM tools) ++ createHook({ ++ init() {}, ++ before() {}, ++ after() {}, ++ destroy() {}, ++ promiseResolve() {}, ++ }).enable(); ++ ++ // Simulate an async operation (like a database call or API request) ++ async function fetchThing(id) { ++ return { id, data: `data-${id}` }; ++ } ++ ++ // Recursively process deeply nested data structure ++ // This will cause stack overflow when the nesting is deep enough ++ function processData(data, depth = 0) { ++ if (Array.isArray(data)) { ++ for (const item of data) { ++ // Create a promise to trigger async_hooks init callback ++ fetchThing(depth); ++ processData(item, depth + 1); ++ } ++ } ++ } ++ ++ // Create deeply nested array structure iteratively (to avoid stack overflow ++ // during creation) ++ function createNestedArray(depth) { ++ let result = 'leaf'; ++ for (let i = 0; i < depth; i++) { ++ result = [result]; ++ } ++ return result; ++ } ++ ++ // Create a very deep nesting that will cause stack overflow during processing ++ const deeplyNested = createNestedArray(50000); ++ ++ try { ++ processData(deeplyNested); ++ // Should not complete successfully - the nesting is too deep ++ console.log('UNEXPECTED: Processing completed without error'); ++ process.exit(1); ++ } catch (err) { ++ assert.strictEqual(err.name, 'RangeError'); ++ assert.match(err.message, /Maximum call stack size exceeded/); ++ console.log('SUCCESS: try-catch caught the stack overflow in nested async'); ++ process.exit(0); ++ } ++} else { ++ // Parent process - spawn the child and check exit code ++ const result = spawnSync( ++ process.execPath, ++ [__filename, 'child'], ++ { encoding: 'utf8', timeout: 30000 } ++ ); ++ ++ // Should exit successfully (try-catch worked) ++ assert.strictEqual(result.status, 0, ++ `Expected exit code 0, got ${result.status}.\n` + ++ `stdout: ${result.stdout}\n` + ++ `stderr: ${result.stderr}`); ++ // Verify the error was handled by try-catch ++ assert.match(result.stdout, /SUCCESS: try-catch caught the stack overflow/); ++} +diff --git a/test/parallel/test-async-hooks-stack-overflow-try-catch.js b/test/parallel/test-async-hooks-stack-overflow-try-catch.js +new file mode 100644 +index 00000000..43338905 +--- /dev/null ++++ b/test/parallel/test-async-hooks-stack-overflow-try-catch.js +@@ -0,0 +1,47 @@ ++'use strict'; ++ ++// This test verifies that when a stack overflow occurs with async_hooks ++// enabled, the exception can be caught by try-catch blocks in user code. ++ ++require('../common'); ++const assert = require('assert'); ++const { spawnSync } = require('child_process'); ++ ++if (process.argv[2] === 'child') { ++ const { createHook } = require('async_hooks'); ++ ++ createHook({ init() {} }).enable(); ++ ++ function recursive(depth = 0) { ++ // Create a promise to trigger async_hooks init callback ++ new Promise(() => {}); ++ return recursive(depth + 1); ++ } ++ ++ try { ++ recursive(); ++ // Should not reach here ++ process.exit(1); ++ } catch (err) { ++ assert.strictEqual(err.name, 'RangeError'); ++ assert.match(err.message, /Maximum call stack size exceeded/); ++ console.log('SUCCESS: try-catch caught the stack overflow'); ++ process.exit(0); ++ } ++ ++ // Should not reach here ++ process.exit(2); ++} else { ++ // Parent process - spawn the child and check exit code ++ const result = spawnSync( ++ process.execPath, ++ [__filename, 'child'], ++ { encoding: 'utf8', timeout: 30000 } ++ ); ++ ++ assert.strictEqual(result.status, 0, ++ `Expected exit code 0 (try-catch worked), got ${result.status}.\n` + ++ `stdout: ${result.stdout}\n` + ++ `stderr: ${result.stderr}`); ++ assert.match(result.stdout, /SUCCESS: try-catch caught the stack overflow/); ++} +diff --git a/test/parallel/test-async-hooks-stack-overflow.js b/test/parallel/test-async-hooks-stack-overflow.js +new file mode 100644 +index 00000000..aff41969 +--- /dev/null ++++ b/test/parallel/test-async-hooks-stack-overflow.js +@@ -0,0 +1,47 @@ ++'use strict'; ++ ++// This test verifies that when a stack overflow occurs with async_hooks ++// enabled, the uncaughtException handler is still called instead of the ++// process crashing with exit code 7. ++ ++const common = require('../common'); ++const assert = require('assert'); ++const { spawnSync } = require('child_process'); ++ ++if (process.argv[2] === 'child') { ++ const { createHook } = require('async_hooks'); ++ ++ let handlerCalled = false; ++ ++ function recursive() { ++ // Create a promise to trigger async_hooks init callback ++ new Promise(() => {}); ++ return recursive(); ++ } ++ ++ createHook({ init() {} }).enable(); ++ ++ process.on('uncaughtException', common.mustCall((err) => { ++ assert.strictEqual(err.name, 'RangeError'); ++ assert.match(err.message, /Maximum call stack size exceeded/); ++ // Ensure handler is only called once ++ assert.strictEqual(handlerCalled, false); ++ handlerCalled = true; ++ })); ++ ++ setImmediate(recursive); ++} else { ++ // Parent process - spawn the child and check exit code ++ const result = spawnSync( ++ process.execPath, ++ [__filename, 'child'], ++ { encoding: 'utf8', timeout: 30000 } ++ ); ++ ++ // Should exit with code 0 (handler was called and handled the exception) ++ // Previously would exit with code 7 (kExceptionInFatalExceptionHandler) ++ assert.strictEqual(result.status, 0, ++ `Expected exit code 0, got ${result.status}.\n` + ++ `stdout: ${result.stdout}\n` + ++ `stderr: ${result.stderr}`); ++} +diff --git a/test/parallel/test-uncaught-exception-handler-stack-overflow-on-stack-overflow.js b/test/parallel/test-uncaught-exception-handler-stack-overflow-on-stack-overflow.js +new file mode 100644 +index 00000000..1923b7f2 +--- /dev/null ++++ b/test/parallel/test-uncaught-exception-handler-stack-overflow-on-stack-overflow.js +@@ -0,0 +1,29 @@ ++'use strict'; ++ ++// This test verifies that when the uncaughtException handler itself causes ++// a stack overflow, the process exits with a non-zero exit code. ++// This is important to ensure we don't silently swallow errors. ++ ++require('../common'); ++const assert = require('assert'); ++const { spawnSync } = require('child_process'); ++ ++if (process.argv[2] === 'child') { ++ function f() { f(); } ++ process.on('uncaughtException', f); ++ f(); ++} else { ++ // Parent process - spawn the child and check exit code ++ const result = spawnSync( ++ process.execPath, ++ [__filename, 'child'], ++ { encoding: 'utf8', timeout: 30000 } ++ ); ++ ++ // Should exit with non-zero exit code since the uncaughtException handler ++ // itself caused a stack overflow. ++ assert.notStrictEqual(result.status, 0, ++ `Expected non-zero exit code, got ${result.status}.\n` + ++ `stdout: ${result.stdout}\n` + ++ `stderr: ${result.stderr}`); ++} +diff --git a/test/parallel/test-uncaught-exception-handler-stack-overflow.js b/test/parallel/test-uncaught-exception-handler-stack-overflow.js +new file mode 100644 +index 00000000..050cd092 +--- /dev/null ++++ b/test/parallel/test-uncaught-exception-handler-stack-overflow.js +@@ -0,0 +1,29 @@ ++'use strict'; ++ ++// This test verifies that when the uncaughtException handler itself causes ++// a stack overflow, the process exits with a non-zero exit code. ++// This is important to ensure we don't silently swallow errors. ++ ++require('../common'); ++const assert = require('assert'); ++const { spawnSync } = require('child_process'); ++ ++if (process.argv[2] === 'child') { ++ function f() { f(); } ++ process.on('uncaughtException', f); ++ throw new Error('X'); ++} else { ++ // Parent process - spawn the child and check exit code ++ const result = spawnSync( ++ process.execPath, ++ [__filename, 'child'], ++ { encoding: 'utf8', timeout: 30000 } ++ ); ++ ++ // Should exit with non-zero exit code since the uncaughtException handler ++ // itself caused a stack overflow. ++ assert.notStrictEqual(result.status, 0, ++ `Expected non-zero exit code, got ${result.status}.\n` + ++ `stdout: ${result.stdout}\n` + ++ `stderr: ${result.stderr}`); ++} +-- +2.45.4 + diff --git a/SPECS/nodejs/CVE-2026-21637.patch b/SPECS/nodejs/CVE-2026-21637.patch new file mode 100644 index 00000000000..f5082d64696 --- /dev/null +++ b/SPECS/nodejs/CVE-2026-21637.patch @@ -0,0 +1,219 @@ +From fcdf9bcb96bb7d2e1467397503e4f3f98f999388 Mon Sep 17 00:00:00 2001 +From: Matteo Collina +Date: Mon, 22 Dec 2025 18:25:33 +0100 +Subject: [PATCH] tls: route callback exceptions through error handlers + +Wrap pskCallback and ALPNCallback invocations in try-catch blocks +to route exceptions through owner.destroy() instead of letting them +become uncaught exceptions. This prevents remote attackers from +crashing TLS servers or causing resource exhaustion. + +Fixes: https://hackerone.com/reports/3473882 +PR-URL: https://github.com/nodejs-private/node-private/pull/782 +PR-URL: https://github.com/nodejs-private/node-private/pull/796 +Reviewed-By: Matteo Collina +CVE-ID: CVE-2026-21637 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/nodejs/node/commit/85f73e7057.patch +--- + lib/_tls_wrap.js | 157 ++++++++++++++++++++++++++--------------------- + 1 file changed, 87 insertions(+), 70 deletions(-) + +diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js +index 0aeebc0f..324559df 100644 +--- a/lib/_tls_wrap.js ++++ b/lib/_tls_wrap.js +@@ -241,39 +241,44 @@ function callALPNCallback(protocolsBuffer) { + const handle = this; + const socket = handle[owner_symbol]; + +- const servername = handle.getServername(); ++ try { ++ const servername = handle.getServername(); + +- // Collect all the protocols from the given buffer: +- const protocols = []; +- let offset = 0; +- while (offset < protocolsBuffer.length) { +- const protocolLen = protocolsBuffer[offset]; +- offset += 1; ++ // Collect all the protocols from the given buffer: ++ const protocols = []; ++ let offset = 0; ++ while (offset < protocolsBuffer.length) { ++ const protocolLen = protocolsBuffer[offset]; ++ offset += 1; + +- const protocol = protocolsBuffer.slice(offset, offset + protocolLen); +- offset += protocolLen; ++ const protocol = protocolsBuffer.slice(offset, offset + protocolLen); ++ offset += protocolLen; + +- protocols.push(protocol.toString('ascii')); +- } ++ protocols.push(protocol.toString('ascii')); ++ } + +- const selectedProtocol = socket[kALPNCallback]({ +- servername, +- protocols, +- }); ++ const selectedProtocol = socket[kALPNCallback]({ ++ servername, ++ protocols, ++ }); + +- // Undefined -> all proposed protocols rejected +- if (selectedProtocol === undefined) return undefined; ++ // Undefined -> all proposed protocols rejected ++ if (selectedProtocol === undefined) return undefined; + +- const protocolIndex = protocols.indexOf(selectedProtocol); +- if (protocolIndex === -1) { +- throw new ERR_TLS_ALPN_CALLBACK_INVALID_RESULT(selectedProtocol, protocols); +- } +- let protocolOffset = 0; +- for (let i = 0; i < protocolIndex; i++) { +- protocolOffset += 1 + protocols[i].length; +- } ++ const protocolIndex = protocols.indexOf(selectedProtocol); ++ if (protocolIndex === -1) { ++ throw new ERR_TLS_ALPN_CALLBACK_INVALID_RESULT(selectedProtocol, protocols); ++ } ++ let protocolOffset = 0; ++ for (let i = 0; i < protocolIndex; i++) { ++ protocolOffset += 1 + protocols[i].length; ++ } + +- return protocolOffset; ++ return protocolOffset; ++ } catch (err) { ++ socket.destroy(err); ++ return undefined; ++ } + } + + function requestOCSP(socket, info) { +@@ -380,63 +385,75 @@ function onnewsession(sessionId, session) { + + function onPskServerCallback(identity, maxPskLen) { + const owner = this[owner_symbol]; +- const ret = owner[kPskCallback](owner, identity); +- if (ret == null) +- return undefined; + +- let psk; +- if (isArrayBufferView(ret)) { +- psk = ret; +- } else { +- if (typeof ret !== 'object') { +- throw new ERR_INVALID_ARG_TYPE( +- 'ret', +- ['Object', 'Buffer', 'TypedArray', 'DataView'], +- ret, ++ try { ++ const ret = owner[kPskCallback](owner, identity); ++ if (ret == null) ++ return undefined; ++ ++ let psk; ++ if (isArrayBufferView(ret)) { ++ psk = ret; ++ } else { ++ if (typeof ret !== 'object') { ++ throw new ERR_INVALID_ARG_TYPE( ++ 'ret', ++ ['Object', 'Buffer', 'TypedArray', 'DataView'], ++ ret, ++ ); ++ } ++ psk = ret.psk; ++ validateBuffer(psk, 'psk'); ++ } ++ ++ if (psk.length > maxPskLen) { ++ throw new ERR_INVALID_ARG_VALUE( ++ 'psk', ++ psk, ++ `Pre-shared key exceeds ${maxPskLen} bytes`, + ); + } +- psk = ret.psk; +- validateBuffer(psk, 'psk'); +- } + +- if (psk.length > maxPskLen) { +- throw new ERR_INVALID_ARG_VALUE( +- 'psk', +- psk, +- `Pre-shared key exceeds ${maxPskLen} bytes`, +- ); ++ return psk; ++ } catch (err) { ++ owner.destroy(err); ++ return undefined; + } +- +- return psk; + } + + function onPskClientCallback(hint, maxPskLen, maxIdentityLen) { + const owner = this[owner_symbol]; +- const ret = owner[kPskCallback](hint); +- if (ret == null) +- return undefined; + +- validateObject(ret, 'ret'); ++ try { ++ const ret = owner[kPskCallback](hint); ++ if (ret == null) ++ return undefined; ++ ++ validateObject(ret, 'ret'); ++ ++ validateBuffer(ret.psk, 'psk'); ++ if (ret.psk.length > maxPskLen) { ++ throw new ERR_INVALID_ARG_VALUE( ++ 'psk', ++ ret.psk, ++ `Pre-shared key exceeds ${maxPskLen} bytes`, ++ ); ++ } + +- validateBuffer(ret.psk, 'psk'); +- if (ret.psk.length > maxPskLen) { +- throw new ERR_INVALID_ARG_VALUE( +- 'psk', +- ret.psk, +- `Pre-shared key exceeds ${maxPskLen} bytes`, +- ); +- } ++ validateString(ret.identity, 'identity'); ++ if (Buffer.byteLength(ret.identity) > maxIdentityLen) { ++ throw new ERR_INVALID_ARG_VALUE( ++ 'identity', ++ ret.identity, ++ `PSK identity exceeds ${maxIdentityLen} bytes`, ++ ); ++ } + +- validateString(ret.identity, 'identity'); +- if (Buffer.byteLength(ret.identity) > maxIdentityLen) { +- throw new ERR_INVALID_ARG_VALUE( +- 'identity', +- ret.identity, +- `PSK identity exceeds ${maxIdentityLen} bytes`, +- ); ++ return { psk: ret.psk, identity: ret.identity }; ++ } catch (err) { ++ owner.destroy(err); ++ return undefined; + } +- +- return { psk: ret.psk, identity: ret.identity }; + } + + function onkeylog(line) { +-- +2.45.4 + diff --git a/SPECS/nodejs/nodejs.spec b/SPECS/nodejs/nodejs.spec index 6b80ae56ff7..8cc7eba332f 100644 --- a/SPECS/nodejs/nodejs.spec +++ b/SPECS/nodejs/nodejs.spec @@ -5,7 +5,7 @@ Name: nodejs # WARNINGS: MUST check and update the 'npm_version' macro for every version update of this package. # The version of NPM can be found inside the sources under 'deps/npm/package.json'. Version: 20.14.0 -Release: 10%{?dist} +Release: 13%{?dist} License: BSD AND MIT AND Public Domain AND NAIST-2003 AND Artistic-2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -30,6 +30,12 @@ Patch11: CVE-2025-47279.patch Patch12: CVE-2025-23165.patch Patch13: CVE-2025-23166.patch Patch14: CVE-2025-5222.patch +Patch15: CVE-2025-55131.patch +Patch16: CVE-2025-55132.patch +Patch17: CVE-2025-59465.patch +Patch18: CVE-2025-59466.patch +Patch19: CVE-2026-21637.patch +Patch20: CVE-2025-55130.patch BuildRequires: brotli-devel BuildRequires: c-ares-devel BuildRequires: coreutils >= 8.22 @@ -44,6 +50,7 @@ Requires: brotli Requires: c-ares Requires: coreutils >= 8.22 Requires: openssl >= 1.1.1 +Provides: nodejs %description Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. @@ -141,6 +148,15 @@ make cctest %{_prefix}/lib/node_modules/* %changelog +* Mon Feb 02 2026 Azure Linux Security Servicing Account - 20.14.0-13 +- Patch for CVE-2025-55130 + +* Thu Jan 29 2026 Sandeep Karambelkar - 20.14.0-12 +- Add nodejs provides to manage co existence with nodejs24 + +* Wed Jan 28 2026 Azure Linux Security Servicing Account - 20.14.0-11 +- Patch for CVE-2026-21637, CVE-2025-59466, CVE-2025-59465, CVE-2025-55132, CVE-2025-55131 + * Fri Nov 07 2025 Azure Linux Security Servicing Account - 20.14.0-10 - Patch for CVE-2025-5222 diff --git a/SPECS/nodejs24/CVE-2019-10906.patch b/SPECS/nodejs24/CVE-2019-10906.patch new file mode 100644 index 00000000000..e4aca456fc5 --- /dev/null +++ b/SPECS/nodejs24/CVE-2019-10906.patch @@ -0,0 +1,197 @@ +From ce71e5f5911b12ebc36711a7d86dab0a11bd1c4d Mon Sep 17 00:00:00 2001 +From: Suresh Thelkar +Date: Fri, 20 Sep 2024 09:55:21 +0530 +Subject: [PATCH] Changed needed to upgrade jinja2 to 2.10.1 + +--- + .../jinja2/Jinja2-2.10.1.tar.gz.md5 | 1 + + .../jinja2/Jinja2-2.10.1.tar.gz.sha512 | 1 + + .../jinja2/Jinja2-2.10.tar.gz.md5 | 1 - + .../jinja2/Jinja2-2.10.tar.gz.sha512 | 1 - + tools/inspector_protocol/jinja2/LICENSE | 62 +++++++++---------- + tools/inspector_protocol/jinja2/__init__.py | 2 +- + tools/inspector_protocol/jinja2/get_jinja2.sh | 4 +- + tools/inspector_protocol/jinja2/sandbox.py | 17 ++++- + 8 files changed, 50 insertions(+), 39 deletions(-) + create mode 100644 tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.md5 + create mode 100644 tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.sha512 + delete mode 100644 tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.md5 + delete mode 100644 tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.sha512 + +diff --git a/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.md5 b/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.md5 +new file mode 100644 +index 00000000..254f4371 +--- /dev/null ++++ b/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.md5 +@@ -0,0 +1 @@ ++0ae535be40fd215a8114a090c8b68e5a Jinja2-2.10.1.tar.gz +\ No newline at end of file +diff --git a/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.sha512 b/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.sha512 +new file mode 100644 +index 00000000..7c379ff1 +--- /dev/null ++++ b/tools/inspector_protocol/jinja2/Jinja2-2.10.1.tar.gz.sha512 +@@ -0,0 +1 @@ ++a00153a0e07bb7d67f301b4eaf7af657726a1985e9ffc7ae2d76bdbb4c062d672efc8065e398767e1039b18a483a0092e206deac91e4047aad64920b56869623 Jinja2-2.10.1.tar.gz +\ No newline at end of file +diff --git a/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.md5 b/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.md5 +deleted file mode 100644 +index 9137ee12..00000000 +--- a/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.md5 ++++ /dev/null +@@ -1 +0,0 @@ +-61ef1117f945486472850819b8d1eb3d Jinja2-2.10.tar.gz +diff --git a/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.sha512 b/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.sha512 +deleted file mode 100644 +index 087d24c1..00000000 +--- a/tools/inspector_protocol/jinja2/Jinja2-2.10.tar.gz.sha512 ++++ /dev/null +@@ -1 +0,0 @@ +-0ea7371be67ffcf19e46dfd06523a45a0806e678a407d54f5f2f3e573982f0959cf82ec5d07b203670309928a62ef71109701ab16547a9bba2ebcdc178cb67f2 Jinja2-2.10.tar.gz +diff --git a/tools/inspector_protocol/jinja2/LICENSE b/tools/inspector_protocol/jinja2/LICENSE +index 31bf900e..10145a26 100644 +--- a/tools/inspector_protocol/jinja2/LICENSE ++++ b/tools/inspector_protocol/jinja2/LICENSE +@@ -1,31 +1,31 @@ +-Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. +- +-Some rights reserved. +- +-Redistribution and use in source and binary forms, with or without +-modification, are permitted provided that the following conditions are +-met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- +- * Redistributions in binary form must reproduce the above +- copyright notice, this list of conditions and the following +- disclaimer in the documentation and/or other materials provided +- with the distribution. +- +- * The names of the contributors may not be used to endorse or +- promote products derived from this software without specific +- prior written permission. +- +-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. ++ ++Some rights reserved. ++ ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are ++met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ ++ * Redistributions in binary form must reproduce the above ++ copyright notice, this list of conditions and the following ++ disclaimer in the documentation and/or other materials provided ++ with the distribution. ++ ++ * The names of the contributors may not be used to endorse or ++ promote products derived from this software without specific ++ prior written permission. ++ ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +diff --git a/tools/inspector_protocol/jinja2/__init__.py b/tools/inspector_protocol/jinja2/__init__.py +index 42aa763d..15e13b6f 100644 +--- a/tools/inspector_protocol/jinja2/__init__.py ++++ b/tools/inspector_protocol/jinja2/__init__.py +@@ -27,7 +27,7 @@ + :license: BSD, see LICENSE for more details. + """ + __docformat__ = 'restructuredtext en' +-__version__ = '2.10' ++__version__ = '2.10.1' + + # high level interface + from jinja2.environment import Environment, Template +diff --git a/tools/inspector_protocol/jinja2/get_jinja2.sh b/tools/inspector_protocol/jinja2/get_jinja2.sh +index bc6c4c30..b0fa6e8e 100755 +--- a/tools/inspector_protocol/jinja2/get_jinja2.sh ++++ b/tools/inspector_protocol/jinja2/get_jinja2.sh +@@ -7,8 +7,8 @@ + # Download page: + # https://pypi.python.org/pypi/Jinja2 + PACKAGE='Jinja2' +-VERSION='2.10' +-SRC_URL='https://pypi.python.org/packages/56/e6/332789f295cf22308386cf5bbd1f4e00ed11484299c5d7383378cf48ba47/Jinja2-2.10.tar.gz' ++VERSION='2.10.1' ++SRC_URL='https://files.pythonhosted.org/packages/93/ea/d884a06f8c7f9b7afbc8138b762e80479fb17aedbbe2b06515a12de9378d/Jinja2-2.10.1.tar.gz' + PACKAGE_DIR='jinja2' + + CHROMIUM_FILES="README.chromium OWNERS get_jinja2.sh" +diff --git a/tools/inspector_protocol/jinja2/sandbox.py b/tools/inspector_protocol/jinja2/sandbox.py +index 93fb9d45..752e8128 100644 +--- a/tools/inspector_protocol/jinja2/sandbox.py ++++ b/tools/inspector_protocol/jinja2/sandbox.py +@@ -137,7 +137,7 @@ class _MagicFormatMapping(Mapping): + def inspect_format_method(callable): + if not isinstance(callable, (types.MethodType, + types.BuiltinMethodType)) or \ +- callable.__name__ != 'format': ++ callable.__name__ not in ('format', 'format_map'): + return None + obj = callable.__self__ + if isinstance(obj, string_types): +@@ -402,7 +402,7 @@ class SandboxedEnvironment(Environment): + obj.__class__.__name__ + ), name=attribute, obj=obj, exc=SecurityError) + +- def format_string(self, s, args, kwargs): ++ def format_string(self, s, args, kwargs, format_func=None): + """If a format call is detected, then this is routed through this + method so that our safety sandbox can be used for it. + """ +@@ -410,6 +410,17 @@ class SandboxedEnvironment(Environment): + formatter = SandboxedEscapeFormatter(self, s.escape) + else: + formatter = SandboxedFormatter(self) ++ ++ if format_func is not None and format_func.__name__ == 'format_map': ++ if len(args) != 1 or kwargs: ++ raise TypeError( ++ 'format_map() takes exactly one argument %d given' ++ % (len(args) + (kwargs is not None)) ++ ) ++ ++ kwargs = args[0] ++ args = None ++ + kwargs = _MagicFormatMapping(args, kwargs) + rv = formatter.vformat(s, args, kwargs) + return type(s)(rv) +@@ -418,7 +429,7 @@ class SandboxedEnvironment(Environment): + """Call an object from sandboxed code.""" + fmt = inspect_format_method(__obj) + if fmt is not None: +- return __self.format_string(fmt, args, kwargs) ++ return __self.format_string(fmt, args, kwargs, __obj) + + # the double prefixes are to avoid double keyword argument + # errors when proxying the call. +-- +2.34.1 + diff --git a/SPECS/nodejs24/CVE-2020-28493.patch b/SPECS/nodejs24/CVE-2020-28493.patch new file mode 100644 index 00000000000..c7ea9c4129b --- /dev/null +++ b/SPECS/nodejs24/CVE-2020-28493.patch @@ -0,0 +1,134 @@ +From 1416131a2c937e08dd313f622f6c8b928c64e477 Mon Sep 17 00:00:00 2001 +From: Kevin Lockwood +Date: Wed, 5 Feb 2025 16:33:58 -0800 +Subject: [PATCH] [Medium] Patch nodejs to fix CVE-2020-28493 + +Link: https://github.com/pallets/jinja/pull/1343/commits/ef658dc3b6389b091d608e710a810ce8b87995b3.patch +--- + tools/inspector_protocol/jinja2/utils.py | 93 ++++++++++++++---------- + 1 file changed, 56 insertions(+), 37 deletions(-) + +diff --git a/tools/inspector_protocol/jinja2/utils.py b/tools/inspector_protocol/jinja2/utils.py +index 502a311c..00664b56 100644 +--- a/tools/inspector_protocol/jinja2/utils.py ++++ b/tools/inspector_protocol/jinja2/utils.py +@@ -12,24 +12,13 @@ import re + import json + import errno + from collections import deque ++from string import ascii_letters as _letters ++from string import digits as _digits + from threading import Lock + from jinja2._compat import text_type, string_types, implements_iterator, \ + url_quote + + +-_word_split_re = re.compile(r'(\s+)') +-_punctuation_re = re.compile( +- '^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % ( +- '|'.join(map(re.escape, ('(', '<', '<'))), +- '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>'))) +- ) +-) +-_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') +-_striptags_re = re.compile(r'(|<[^>]*>)') +-_entity_re = re.compile(r'&([^;]+);') +-_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +-_digits = '0123456789' +- + # special singleton representing missing values for the runtime + missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})() + +@@ -203,35 +192,65 @@ def urlize(text, trim_url_limit=None, rel=None, target=None): + trim_url = lambda x, limit=trim_url_limit: limit is not None \ + and (x[:limit] + (len(x) >=limit and '...' + or '')) or x +- words = _word_split_re.split(text_type(escape(text))) ++ words = re.split(r"(\s+)", text_type(escape(text))) + rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or '' + target_attr = target and ' target="%s"' % escape(target) or '' + + for i, word in enumerate(words): +- match = _punctuation_re.match(word) ++ head, middle, tail = "", word, "" ++ match = re.match(r"^([(<]|<)+", middle) ++ + if match: +- lead, middle, trail = match.groups() +- if middle.startswith('www.') or ( +- '@' not in middle and +- not middle.startswith('http://') and +- not middle.startswith('https://') and +- len(middle) > 0 and +- middle[0] in _letters + _digits and ( +- middle.endswith('.org') or +- middle.endswith('.net') or +- middle.endswith('.com') +- )): +- middle = '%s' % (middle, +- rel_attr, target_attr, trim_url(middle)) +- if middle.startswith('http://') or \ +- middle.startswith('https://'): +- middle = '%s' % (middle, +- rel_attr, target_attr, trim_url(middle)) +- if '@' in middle and not middle.startswith('www.') and \ +- not ':' in middle and _simple_email_re.match(middle): +- middle = '%s' % (middle, middle) +- if lead + middle + trail != word: +- words[i] = lead + middle + trail ++ head = match.group() ++ middle = middle[match.end() :] ++ ++ # Unlike lead, which is anchored to the start of the string, ++ # need to check that the string ends with any of the characters ++ # before trying to match all of them, to avoid backtracking. ++ if middle.endswith((")", ">", ".", ",", "\n", ">")): ++ match = re.search(r"([)>.,\n]|>)+$", middle) ++ ++ if match: ++ tail = match.group() ++ middle = middle[: match.start()] ++ ++ if middle.startswith("www.") or ( ++ "@" not in middle ++ and not middle.startswith("http://") ++ and not middle.startswith("https://") ++ and len(middle) > 0 ++ and middle[0] in _letters + _digits ++ and ( ++ middle.endswith(".org") ++ or middle.endswith(".net") ++ or middle.endswith(".com") ++ ) ++ ): ++ middle = '%s' % ( ++ middle, ++ rel_attr, ++ target_attr, ++ trim_url(middle), ++ ) ++ ++ if middle.startswith("http://") or middle.startswith("https://"): ++ middle = '%s' % ( ++ middle, ++ rel_attr, ++ target_attr, ++ trim_url(middle), ++ ) ++ ++ if ( ++ "@" in middle ++ and not middle.startswith("www.") ++ and ":" not in middle ++ and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle) ++ ): ++ middle = '%s' % (middle, middle) ++ ++ words[i] = head + middle + tail ++ + return u''.join(words) + + +-- +2.34.1 + diff --git a/SPECS/nodejs24/CVE-2024-22195.patch b/SPECS/nodejs24/CVE-2024-22195.patch new file mode 100644 index 00000000000..eddb8a6f92e --- /dev/null +++ b/SPECS/nodejs24/CVE-2024-22195.patch @@ -0,0 +1,64 @@ +From 7150425bb9ea718fc9a8007ab3ba6a421f97e0a6 Mon Sep 17 00:00:00 2001 +From: Kanishk-Bansal +Date: Wed, 12 Feb 2025 05:35:22 +0000 +Subject: [PATCH] Fix CVE-2024-22195 + +--- + deps/v8/third_party/jinja2/filters.py | 28 ++++++++++++++++++++------- + 1 file changed, 21 insertions(+), 7 deletions(-) + +diff --git a/deps/v8/third_party/jinja2/filters.py b/deps/v8/third_party/jinja2/filters.py +index ed07c4c0..c7ecc9bb 100644 +--- a/deps/v8/third_party/jinja2/filters.py ++++ b/deps/v8/third_party/jinja2/filters.py +@@ -248,13 +248,17 @@ def do_items(value: t.Union[t.Mapping[K, V], Undefined]) -> t.Iterator[t.Tuple[K + yield from value.items() + + ++_space_re = re.compile(r"\s", flags=re.ASCII) ++ ++ + @pass_eval_context + def do_xmlattr( + eval_ctx: "EvalContext", d: t.Mapping[str, t.Any], autospace: bool = True + ) -> str: + """Create an SGML/XML attribute string based on the items in a dict. +- All values that are neither `none` nor `undefined` are automatically +- escaped: ++ ++ If any key contains a space, this fails with a ``ValueError``. Values that ++ are neither ``none`` nor ``undefined`` are automatically escaped. + + .. sourcecode:: html+jinja + +@@ -273,12 +277,22 @@ def do_xmlattr( + + As you can see it automatically prepends a space in front of the item + if the filter returned something unless the second parameter is false. ++ ++ .. versionchanged:: 3.1.3 ++ Keys with spaces are not allowed. + """ +- rv = " ".join( +- f'{escape(key)}="{escape(value)}"' +- for key, value in d.items() +- if value is not None and not isinstance(value, Undefined) +- ) ++ items = [] ++ ++ for key, value in d.items(): ++ if value is None or isinstance(value, Undefined): ++ continue ++ ++ if _space_re.search(key) is not None: ++ raise ValueError(f"Spaces are not allowed in attributes: '{key}'") ++ ++ items.append(f'{escape(key)}="{escape(value)}"') ++ ++ rv = " ".join(items) + + if autospace and rv: + rv = " " + rv +-- +2.45.2 + diff --git a/SPECS/nodejs24/CVE-2024-34064.patch b/SPECS/nodejs24/CVE-2024-34064.patch new file mode 100644 index 00000000000..74f949e639f --- /dev/null +++ b/SPECS/nodejs24/CVE-2024-34064.patch @@ -0,0 +1,68 @@ +From 7d26e047cce618fe6e8ccb26bf3d4685d4a7815e Mon Sep 17 00:00:00 2001 +From: Kevin Lockwood +Date: Thu, 3 Apr 2025 12:04:25 -0700 +Subject: [PATCH] [Medium] Patch nodejs for CVE-2024-34064 + +--- + deps/v8/third_party/jinja2/filters.py | 24 +++++++++++++++++++----- + 1 file changed, 19 insertions(+), 5 deletions(-) + +diff --git a/deps/v8/third_party/jinja2/filters.py b/deps/v8/third_party/jinja2/filters.py +index c7ecc9bb..eace6ed8 100644 +--- a/deps/v8/third_party/jinja2/filters.py ++++ b/deps/v8/third_party/jinja2/filters.py +@@ -248,7 +248,10 @@ def do_items(value: t.Union[t.Mapping[K, V], Undefined]) -> t.Iterator[t.Tuple[K + yield from value.items() + + +-_space_re = re.compile(r"\s", flags=re.ASCII) ++# Check for characters that would move the parser state from key to value. ++# https://html.spec.whatwg.org/#attribute-name-state ++_attr_key_re = re.compile(r"[\s/>=]", flags=re.ASCII) ++ + + + @pass_eval_context +@@ -257,8 +260,14 @@ def do_xmlattr( + ) -> str: + """Create an SGML/XML attribute string based on the items in a dict. + +- If any key contains a space, this fails with a ``ValueError``. Values that +- are neither ``none`` nor ``undefined`` are automatically escaped. ++ **Values** that are neither ``none`` nor ``undefined`` are automatically ++ escaped, safely allowing untrusted user input. ++ ++ User input should not be used as **keys** to this filter. If any key ++ contains a space, ``/`` solidus, ``>`` greater-than sign, or ``=`` equals ++ sign, this fails with a ``ValueError``. Regardless of this, user input ++ should never be used as keys to this filter, or must be separately validated ++ first. + + .. sourcecode:: html+jinja + +@@ -280,6 +289,11 @@ def do_xmlattr( + + .. versionchanged:: 3.1.3 + Keys with spaces are not allowed. ++ ++ .. versionchanged:: 3.1.4 ++ Keys with ``/`` solidus, ``>`` greater-than sign, or ``=`` equals sign ++ are not allowed. ++ + """ + items = [] + +@@ -287,8 +301,8 @@ def do_xmlattr( + if value is None or isinstance(value, Undefined): + continue + +- if _space_re.search(key) is not None: +- raise ValueError(f"Spaces are not allowed in attributes: '{key}'") ++ if _attr_key_re.search(key) is not None: ++ raise ValueError(f"Invalid character in attribute name: {key!r}") + + items.append(f'{escape(key)}="{escape(value)}"') + +-- +2.34.1 + diff --git a/SPECS/nodejs24/CVE-2025-27516.patch b/SPECS/nodejs24/CVE-2025-27516.patch new file mode 100644 index 00000000000..f29c39628f1 --- /dev/null +++ b/SPECS/nodejs24/CVE-2025-27516.patch @@ -0,0 +1,68 @@ +From 065334d1ee5b7210e1a0a93c37238c86858f2af7 Mon Sep 17 00:00:00 2001 +From: David Lord +Date: Wed, 5 Mar 2025 10:08:48 -0800 +Subject: [PATCH] attr filter uses env.getattr + +--- + deps/v8/third_party/jinja2/filters.py | 37 ++++++++++++++++--------------------- + 3 files changed, 30 insertions(+), 21 deletions(-) + +diff --git a/deps/v8/third_party/jinja2/filters.py b/deps/v8/third_party/jinja2/filters.py +index e5b5a00c5..2bcba4fbd 100644 +--- a/deps/v8/third_party/jinja2/filters.py ++++ b/deps/v8/third_party/jinja2/filters.py +@@ -6,6 +6,7 @@ + import typing + import typing as t + from collections import abc ++from inspect import getattr_static + from itertools import chain + from itertools import groupby + +@@ -1411,31 +1412,25 @@ def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V] + def do_attr( + environment: "Environment", obj: t.Any, name: str + ) -> t.Union[Undefined, t.Any]: +- """Get an attribute of an object. ``foo|attr("bar")`` works like +- ``foo.bar`` just that always an attribute is returned and items are not +- looked up. ++ """Get an attribute of an object. ``foo|attr("bar")`` works like ++ ``foo.bar``, but returns undefined instead of falling back to ``foo["bar"]`` ++ if the attribute doesn't exist. + + See :ref:`Notes on subscriptions ` for more details. + """ ++ # Environment.getattr will fall back to obj[name] if obj.name doesn't exist. ++ # But we want to call env.getattr to get behavior such as sandboxing. ++ # Determine if the attr exists first, so we know the fallback won't trigger. + try: +- name = str(name) +- except UnicodeError: +- pass +- else: +- try: +- value = getattr(obj, name) +- except AttributeError: +- pass +- else: +- if environment.sandboxed: +- environment = t.cast("SandboxedEnvironment", environment) +- +- if not environment.is_safe_attribute(obj, name, value): +- return environment.unsafe_undefined(obj, name) +- +- return value +- +- return environment.undefined(obj=obj, name=name) ++ # This avoids executing properties/descriptors, but misses __getattr__ ++ # and __getattribute__ dynamic attrs. ++ getattr_static(obj, name) ++ except AttributeError: ++ # This finds dynamic attrs, and we know it's not a descriptor at this point. ++ if not hasattr(obj, name): ++ return environment.undefined(obj=obj, name=name) ++ ++ return environment.getattr(obj, name) + + + @typing.overload diff --git a/SPECS/nodejs24/btest402.js b/SPECS/nodejs24/btest402.js new file mode 100644 index 00000000000..835deb31122 --- /dev/null +++ b/SPECS/nodejs24/btest402.js @@ -0,0 +1,149 @@ +// Copyright (C) 2014 IBM Corporation and Others. All Rights Reserved. +// This file is part of the Node.JS ICU enablement work +// https://github.com/joyent/node/pull/7719 +// and is under the same license. +// +// This is a very, very, very basic test of es402 +// +// URL: https://github.com/srl295/btest402 +// Author: Steven R. Loomis +// +// for a complete test, see http://test262.ecmascript.org +// +// Usage: node btest402.js + +try { + console.log("You have console.log."); +} catch(e) { + // this works on d8 + console = { log: print }; + console.log("Now you have console.log."); +} + +function runbtest() { + var summary = {}; + + try { + summary.haveIntl = true; + console.log("+ Congrats, you have the Intl object."); + } catch(e) { + console.log("You don't have the Intl object: " + e); + } + + if(summary.haveIntl) { + var locs = [ "en", "mt", "ja","tlh"]; + var d = new Date(196400000); + for ( var n=0; n 0 ) { + lsummary.haveSlo = true; + } + } catch (e) { + console.log("SLO err: " + e); + } + try { + lsummary.dstr = d.toLocaleString(loc,{month: "long",day:"numeric",weekday:"long",year:"numeric"}); + console.log(" date: (supported:"+sl+") " + lsummary.dstr); + } catch (e) { + console.log(" Date Format err: " + e); + } + try { + new Intl.v8BreakIterator(); + console.log(" Intl.v8BreakIterator:" + + Intl.v8BreakIterator.supportedLocalesOf(loc) + " Supported, first()==" + + new Intl.v8BreakIterator(loc).first() ); + lsummary.brkOk = true; + } catch ( e) { + console.log(" Intl.v8BreakIterator error (NOT part of EcmaScript402): " + e); + } + console.log(); + } + } + + // print summary + console.log(); + console.log("--------- Analysis ---------"); + var stxt = ""; + if( summary.haveIntl ) { + console.log("* You have the 'Intl' object. Congratulations! You have the possibility of being EcmaScript 402 compliant."); + stxt += "Have Intl, "; + + if ( !summary.en.haveSlo ) { + stxt += "Date:no EN, "; + console.log("* English isn't a supported language by the date formatter. Perhaps the data isn't installed properly?"); + } + if ( !summary.tlh.haveSlo ) { + stxt += "Date:no 'tlh', "; + console.log("* Klingon isn't a supported language by the date formatter. It is without honor!"); + } + // now, what is it actually saying + if( summary.en.dstr.indexOf("1970") == -1) { + stxt += "Date:bad 'en', "; + console.log("* the English date format text looks bad to me. Doesn't even have the year."); + } else { + if( summary.en.dstr.indexOf("Jan") == -1) { + stxt += "Date:bad 'en', "; + console.log("* The English date format text looks bad to me. Doesn't have the right month."); + } + } + + if( summary.mt.dstr == summary.en.dstr ) { + stxt += "Date:'mt'=='en', "; + console.log("* The English and Maltese look the same to me. Probably a 'small' build."); + } else if( summary.mt.dstr.indexOf("1970") == -1) { + stxt += "Date:bad 'mt', "; + console.log("* the Maltese date format text looks bad to me. Doesn't even have the year. (This data is missing from the Chromium ICU build)"); + } else { + if( summary.mt.dstr.indexOf("Jann") == -1) { + stxt += "Date:bad 'mt', "; + console.log("* The Maltese date format text looks bad to me. Doesn't have the right month. (This data is missing from the Chromium ICU build)"); + } + } + + if ( !summary.ja.haveSlo ) { + stxt += "Date:no 'ja', "; + console.log("* Japanese isn't a supported language by the date formatter. Could be a 'small' build."); + } else { + if( summary.ja.dstr.indexOf("1970") == -1) { + stxt += "Date:bad 'ja', "; + console.log("* the Japanese date format text looks bad to me. Doesn't even have the year."); + } else { + if( summary.ja.dstr.indexOf("日") == -1) { + stxt += "Date:bad 'ja', "; + console.log("* The Japanese date format text looks bad to me."); + } + } + } + if ( summary.en.brkOk ) { + stxt += "FYI: v8Brk:have 'en', "; + console.log("* You have Intl.v8BreakIterator support. (Note: not part of ES402.)"); + } + } else { + console.log("* You don't have the 'Intl' object. You aren't EcmaScript 402 compliant."); + stxt += " NO Intl. "; + } + + // 1-liner. + console.log(); + console.log("----------------"); + console.log( "SUMMARY:" + stxt ); +} + +var dorun = true; + +try { + if(btest402_noautorun) { + dorun = false; + } +} catch(e) {} + +if(dorun) { + console.log("Running btest.."); + runbtest(); +} diff --git a/SPECS/nodejs24/disable-tlsv1-tlsv1-1.patch b/SPECS/nodejs24/disable-tlsv1-tlsv1-1.patch new file mode 100644 index 00000000000..0a40760b4f7 --- /dev/null +++ b/SPECS/nodejs24/disable-tlsv1-tlsv1-1.patch @@ -0,0 +1,42 @@ +diff -ru node-v16.14.0-orig/src/crypto/crypto_context.cc node-v16.14.0/src/crypto/crypto_context.cc +--- node-v16.14.0-orig/src/crypto/crypto_context.cc 2022-02-08 04:37:50.000000000 -0800 ++++ node-v16.14.0/src/crypto/crypto_context.cc 2022-02-25 09:17:21.964960342 -0800 +@@ -467,28 +467,16 @@ + min_version = 0; + max_version = kMaxSupportedVersion; + method = TLS_client_method(); +- } else if (sslmethod == "TLSv1_method") { +- min_version = TLS1_VERSION; +- max_version = TLS1_VERSION; +- } else if (sslmethod == "TLSv1_server_method") { +- min_version = TLS1_VERSION; +- max_version = TLS1_VERSION; +- method = TLS_server_method(); +- } else if (sslmethod == "TLSv1_client_method") { +- min_version = TLS1_VERSION; +- max_version = TLS1_VERSION; +- method = TLS_client_method(); +- } else if (sslmethod == "TLSv1_1_method") { +- min_version = TLS1_1_VERSION; +- max_version = TLS1_1_VERSION; +- } else if (sslmethod == "TLSv1_1_server_method") { +- min_version = TLS1_1_VERSION; +- max_version = TLS1_1_VERSION; +- method = TLS_server_method(); +- } else if (sslmethod == "TLSv1_1_client_method") { +- min_version = TLS1_1_VERSION; +- max_version = TLS1_1_VERSION; +- method = TLS_client_method(); ++ } else if (sslmethod == "TLSv1_method" || ++ sslmethod == "TLSv1_server_method" || ++ sslmethod == "TLSv1_client_method") { ++ THROW_ERR_TLS_INVALID_PROTOCOL_METHOD(env, "TLSv1 methods disabled"); ++ return; ++ } else if (sslmethod == "TLSv1_1_method" || ++ sslmethod == "TLSv1_1_server_method" || ++ sslmethod == "TLSv1_1_client_method") { ++ THROW_ERR_TLS_INVALID_PROTOCOL_METHOD(env, "TLSv1_1 methods disabled"); ++ return; + } else if (sslmethod == "TLSv1_2_method") { + min_version = TLS1_2_VERSION; + max_version = TLS1_2_VERSION; diff --git a/SPECS/nodejs24/generate_source_tarball.sh b/SPECS/nodejs24/generate_source_tarball.sh new file mode 100755 index 00000000000..550ba1d444d --- /dev/null +++ b/SPECS/nodejs24/generate_source_tarball.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# Quit on failure +set -e + +# +# The nodejs source tarball contains a copy of the OpenSSL source tree. +# OpenSSL contains patented algorithms that should not be distributed +# as part of the SRPM. Since we use the shared OpenSSL libraries, we +# can just remove the entire OpenSSL source tree from the tarball. + +PKG_VERSION="" +SRC_TARBALL="" +OUT_FOLDER="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# parameters: +# +# --srcTarball : src tarball file +# this file contains the 'initial' source code of the component +# and should be replaced with the new/modified src code +# --outFolder : folder where to copy the new tarball(s) +# --pkgVersion : package version +# +PARAMS="" +while (( "$#" )); do + case "$1" in + --srcTarball) + if [ -n "$2" ] && [ ${2:0:1} != "-" ]; then + SRC_TARBALL=$2 + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --outFolder) + if [ -n "$2" ] && [ ${2:0:1} != "-" ]; then + OUT_FOLDER=$2 + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + --pkgVersion) + if [ -n "$2" ] && [ ${2:0:1} != "-" ]; then + PKG_VERSION=$2 + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; + -*|--*=) # unsupported flags + echo "Error: Unsupported flag $1" >&2 + exit 1 + ;; + *) # preserve positional arguments + PARAMS="$PARAMS $1" + shift + ;; + esac +done + +echo "--srcTarball -> $SRC_TARBALL" +echo "--outFolder -> $OUT_FOLDER" +echo "--pkgVersion -> $PKG_VERSION" + +if [ -z "$PKG_VERSION" ]; then + echo "--pkgVersion parameter cannot be empty" + exit 1 +fi + +echo "-- create temp folder" +tmpdir=$(mktemp -d) +function cleanup { + echo "+++ cleanup -> remove $tmpdir" + rm -rf $tmpdir +} +trap cleanup EXIT + +pushd $tmpdir > /dev/null + +namever="node-v${PKG_VERSION}" + +if [[ -n $SRC_TARBALL ]]; then + upstream_tarball_name="$SRC_TARBALL" + clean_tarball_name="$OUT_FOLDER/$(basename $SRC_TARBALL)" +else + upstream_tarball_name="${namever}.tar.xz" + clean_tarball_name="$OUT_FOLDER/${namever}-clean.tar.xz" + download_url="https://nodejs.org/download/release/v${PKG_VERSION}/${upstream_tarball_name}" + + echo "Downloading upstream source tarball..." + curl -s -O $download_url +fi + +echo "Unpacking upstream source tarball..." +tar -xf $upstream_tarball_name + +echo "Removing bad vendored dependencies from source tree..." +rm -rf ./$namever/deps/openssl/openssl + +# Create a reproducible tarball +# Credit to https://reproducible-builds.org/docs/archives/ for instructions +# Do not update mtime value for new versions- keep the same value for ease of +# reproducing old tarball versions in the future if necessary +echo "Repacking source tarball..." +tar --sort=name --mtime="2021-11-10 00:00Z" \ + --owner=0 --group=0 --numeric-owner \ + --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime \ + -cJf $clean_tarball_name ./$namever + +popd > /dev/null +echo "Clean nodejs source tarball available at $clean_tarball_name" diff --git a/SPECS/nodejs24/nodejs24.signatures.json b/SPECS/nodejs24/nodejs24.signatures.json new file mode 100644 index 00000000000..b601b952650 --- /dev/null +++ b/SPECS/nodejs24/nodejs24.signatures.json @@ -0,0 +1,8 @@ +{ + "Signatures": { + "btest402.js": "fabaf4dacc13e93d54f825b87ffde18573214b149388a5f96176236dd31d7768", + "icu4c-77_1-data-bin-b.zip": "d8be12e03f782da350508b15354738ed97a3289008a787b6bd2a85434374bff4", + "icu4c-77_1-data-bin-l.zip": "0913674ff673c585f8bc08370916b6a6ccc30ffb6408a5c1bc3edbf5a687fd96", + "node-v24.13.0.tar.xz": "320fe909cbb347dcf516201e4964ef177b8138df9a7f810d0d54950481b3158b" + } +} \ No newline at end of file diff --git a/SPECS/nodejs24/nodejs24.spec b/SPECS/nodejs24/nodejs24.spec new file mode 100644 index 00000000000..b207848f27e --- /dev/null +++ b/SPECS/nodejs24/nodejs24.spec @@ -0,0 +1,327 @@ +# Retrieved from 'deps/npm/package.json' inside the sources tarball. +%define npm_version 11.6.2 + +%global nodejs_datadir %{_datarootdir}/nodejs + +# ICU - from tools/icu/current_ver.dep +%global icu_major 77 +%global icu_minor 1 +%global icu_version %{icu_major}.%{icu_minor} + +%global icudatadir %{nodejs_datadir}/icudata +%{!?little_endian: %global little_endian %(%{python3} -c "import sys;print (0 if sys.byteorder=='big' else 1)")} + +Summary: A JavaScript runtime built on Chrome's V8 JavaScript engine. +Name: nodejs24 +# WARNINGS: MUST check and update the 'npm_version' macro for every version update of this package. +# The version of NPM can be found inside the sources under 'deps/npm/package.json'. +Version: 24.13.0 +Release: 1%{?dist} +License: BSD AND MIT AND Public Domain AND NAIST-2003 AND Artistic-2.0 +Vendor: Microsoft Corporation +Distribution: Azure Linux +Group: Applications/System +URL: https://github.com/nodejs/node +# !!!! Nodejs code has a vendored version of OpenSSL code that must be removed from source tarball +# !!!! because it contains patented algorithms. +# !!! => use generate_source_tarball.sh script to create a clean and reproducible source tarball. +Source0: https://nodejs.org/download/release/v%{version}/node-v%{version}.tar.xz +Source1: https://github.com/unicode-org/icu/releases/download/release-%{icu_major}-%{icu_minor}/icu4c-%{icu_major}_%{icu_minor}-data-bin-b.zip +Source2: https://github.com/unicode-org/icu/releases/download/release-%{icu_major}-%{icu_minor}/icu4c-%{icu_major}_%{icu_minor}-data-bin-l.zip +Source3: btest402.js +Patch0: disable-tlsv1-tlsv1-1.patch +Patch1: CVE-2019-10906.patch +Patch2: CVE-2024-22195.patch +Patch3: CVE-2020-28493.patch +Patch4: CVE-2024-34064.patch +Patch5: CVE-2025-27516.patch +BuildRequires: brotli-devel +BuildRequires: c-ares-devel +BuildRequires: coreutils >= 8.22 +BuildRequires: gcc +BuildRequires: make +BuildRequires: ninja-build +BuildRequires: openssl-devel >= 1.1.1 +BuildRequires: python3 +BuildRequires: which +BuildRequires: zlib-devel +Requires: brotli +Requires: c-ares +Requires: coreutils >= 8.22 +Requires: openssl >= 1.1.1 +Provides: nodejs24 +# Until we make this as formal nodejs release, lets make it conflicting with nodejs20 +# This will uninstall nodejs20 during installation of nodejs24 +Conflicts: nodejs + +Recommends: nodejs-full-i18n = %{version}-%{release} +Provides: bundled(icu) = %{icu_version} + +%description +Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. +Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient. +The Node.js package ecosystem, npm, is the largest ecosystem of open source libraries in the world. + +%package devel +Summary: Development files node +Group: System Environment/Base +Requires: %{name} = %{version}-%{release} +Requires: brotli-devel +Requires: openssl-devel >= 1.1.1 +Requires: zlib-devel + +%description devel +The nodejs-devel package contains libraries, header files and documentation +for developing applications that use nodejs. + +%package full-i18n +Summary: Non-English locale data for Node.js +Requires: %{name} = %{version}-%{release} + +%description full-i18n +Optional data files to provide full-icu support for Node.js. Remove this +package to save space if non-English locales are not needed. + +%package npm +Summary: Node.js Package Manager +Group: System Environment/Base +Requires: %{name} = %{version}-%{release} +Provides: npm = %{npm_version}.%{version}-%{release} + +%description npm +npm is a package manager for node.js. You can use it to install and publish +your node programs. It manages dependencies and does other cool stuff. + +%prep +%autosetup -p1 -n node-v%{version} + +%build +# remove unsupported TLSv1.3 cipher: +# Mariner's OpenSSL configuration does not allow for this TLSv1.3 +# cipher. OpenSSL does not like being asked to use TLSv1.3 ciphers +# it doesn't support (despite being fine processing similar cipher +# requests for TLS < 1.3). This cipher's presence in the default +# cipher list causes failures when initializing secure contexts +# in the context of Node's TLS library. +sed -i '/TLS_CHACHA20_POLY1305_SHA256/d' ./src/node_constants.h + +# remove brotli and zlib source code from deps folder +# keep the .gyp and .gypi files that are still used during configuration +find deps/zlib -name *.[ch] -delete +find deps/brotli -name *.[ch] -delete + +python3 configure.py \ + --prefix=%{_prefix} \ + --ninja \ + --shared-openssl \ + --shared-zlib \ + --shared-brotli \ + --with-intl=small-icu \ + --with-icu-source=deps/icu-small \ + --with-icu-default-data-dir=%{icudatadir} \ + --openssl-use-def-ca-store \ + --shared-cares + +JOBS=4 make %{?_smp_mflags} V=0 + +%install + +make %{?_smp_mflags} install DESTDIR=%{buildroot} +install -m 755 -d %{buildroot}%{_libdir}/node_modules/ +install -m 755 -d %{buildroot}%{_datadir}/%{name} + +# Remove junk files from node_modules/ - we should probably take care of +# this in the installer. +for FILE in .gitmodules .gitignore .npmignore .travis.yml \*.py[co]; do + find %{buildroot}%{_libdir}/node_modules/ -name "$FILE" -delete +done + +# Install the full-icu data files +mkdir -p %{buildroot}%{icudatadir} +%if 0%{?little_endian} +unzip -d %{buildroot}%{icudatadir} %{SOURCE2} icudt%{icu_major}l.dat +%else +unzip -d %{buildroot}%{icudatadir} %{SOURCE1} icudt%{icu_major}b.dat +%endif + +%check +# Make sure i18n support is working +NODE_PATH=%{buildroot}%{_prefix}/lib/node_modules:%{buildroot}%{_prefix}/lib/node_modules/npm/node_modules LD_LIBRARY_PATH=%{buildroot}%{_libdir} %{buildroot}/%{_bindir}/node --icu-data-dir=%{buildroot}%{icudatadir} %{SOURCE3} + +make cctest + +%post -p /sbin/ldconfig + +%files +%defattr(-,root,root) +%license LICENSE +%doc CHANGELOG.md README.md +%{_bindir}/node +%dir %{_prefix}/lib/node_modules +%{_mandir}/man*/* + +%files devel +%defattr(-,root,root) +%{_includedir}/* +%{_docdir}/* + +%files full-i18n +%dir %{icudatadir} +%{icudatadir}/icudt%{icu_major}*.dat + +%files npm +%defattr(-,root,root) +%{_bindir}/npm +%{_bindir}/npx +%{_bindir}/corepack +%{_prefix}/lib/node_modules/* + +%changelog +* Tue Dec 23 2025 Sandeep Karambelkar - 24.13.0-1 +- Upgrade to 24.13.0 +- Add support for passing runtime internationalization data + +* Fri Nov 07 2025 Azure Linux Security Servicing Account - 20.14.0-10 +- Patch for CVE-2025-5222 + +* Tue May 27 2025 Aninda Pradhan - 20.14.0-9 +- Patch CVE-2025-23165, CVE-2025-23166 + +* Wed May 21 2025 Aninda Pradhan - 20.14.0-8 +- Patch CVE-2025-47279 + +* Mon Mar 10 2025 Sandeep Karambelkar - 20.14.0-7 +- Patch CVE-2025-27516 + +* Wed Feb 12 2025 Kevin Lockwood - 20.14.0-6 +- Patch CVE-2020-28493 +- Patch CVE-2024-34064 + +* Tue Feb 11 2025 Kanishk Bansal - 20.14.0-5 +- Patch CVE-2025-22150, CVE-2025-23085, CVE-2024-22020, CVE-2024-22195 + +* Mon Jan 27 2025 Sumedh Sharma - 20.14.0-4 +- Patch CVE-2025-23083 + +* Tue Nov 19 2024 Bala - 20.14.0-3 +- Patch CVE-2024-21538 + +* Thu Sep 19 2024 Suresh Thelkar - 20.14.0-2 +- Patch CVE-2019-10906 + +* Fri Jun 07 2024 Nicolas Guibourge - 20.14.0-1 +- Upgrade to 20.14.0 to address CVEs + +* Thu Jun 06 2024 Riken Maharjan - 20.10.0-3 +- Separate npm from node using Fedora 50 (LICENSE: MIT) + +* Tue May 21 2024 Neha Agarwal - 20.10.0-2 +- Bump release to build with new libuv to fix CVE-2024-24806 + +* Wed Jan 31 2024 Saul Paredes - 20.10.0-1 +- Upgrade to nodejs to 20.10.0 and npm to 10.2.3 + +* Wed Sep 06 2023 Brian Fjeldstad - 16.20.2-2 +- Patch CVE-2023-35945 + +* Wed Sep 06 2023 Brian Fjeldstad - 16.20.2-1 +- Patch CVE-2023-32002 CVE-2023-32006 CVE-2023-32559 + +* Wed Jul 12 2023 Olivia Crain - 16.20.1-2 +- Backport upstream patches to fix CVE-2022-25883 + +* Wed Jun 28 2023 David Steele - 16.20.1-1 +- Upgrade to nodejs to 16.20.1 and npm to 8.19.4 + +* Tue May 30 2023 Dallas Delaney - 16.19.1-2 +- Fix CVE-2023-32067, CVE-2023-31130, CVE-2023-31147 by using system c-ares + +* Wed Mar 01 2023 CBL-Mariner Servicing Account - 16.19.1-1 +- Auto-upgrade to 16.19.1 - to fix CVE-2023-23936 +- Update npm version to 8.19.3 to reflect the actual version of npm bundled with v16.19.1 + +* Tue Dec 13 2022 Andrew Phelps - 16.18.1-2 +- Update license to reference Artistic 2.0 + +* Fri Dec 09 2022 CBL-Mariner Servicing Account - 16.18.1-1 +- Auto-upgrade to 16.18.1 - CVE-2022-43548 + +* Tue Oct 25 2022 Nicolas Guibourge - 16.17.1-2 +- Change npm_version to 8.15.0 to reflect the actual version of npm bundled with v16.17.1 + +* Mon Oct 24 2022 CBL-Mariner Servicing Account - 16.17.1-1 +- Upgrade to 16.17.1 + +* Thu Aug 18 2022 Cameron Baird - 16.16.0-2 +- Change npm_version to 8.11.0 to reflect the actual version of npm bundled with v16.16.0 + +* Tue Aug 02 2022 Cameron Baird - 16.16.0-1 +- Update to v16.16.0 (security update) to resolve CVE-2022-32213, CVE-2022-32214, CVE-2022-32215 + +* Mon May 16 2022 Mandeep Plaha - 16.14.2-2 +- Remove python3 as a runtime dependency as it is not needed during runtime. + +* Tue Apr 19 2022 Mandeep Plaha - 16.14.2-1 +- Update to 16.14.2. + +* Thu Feb 24 2022 Nicolas Guibourge - 16.14.0-1 +- Upgrade to 16.14.0. + +* Thu Nov 18 2021 Thomas Crain - 14.18.1-1 +- Update to version 14.18.1 to fix CVE-2021-22959, CVE-2021-22960, CVE-2021-37701, + CVE-2021-37712, CVE-2021-37713, CVE-2021-39134, CVE-2021-39135 +- Add patch to remove problematic cipher from default list +- Add config flag to use OpenSSL cert store instead of built-in Mozilla certs +- Add script to remove vendored OpenSSL tree from source tarball +- Update required OpenSSL version to 1.1.1 +- Use python configure script directly +- Lint spec + +* Thu Sep 23 2021 Pawel Winogrodzki - 14.17.2-2 +- Adding 'Provides' for 'npm'. + +* Mon Jul 19 2021 Neha Agarwal - 14.17.2-1 +- Update to version 14.17.2 to fix CVE-2021-22918 + +* Mon Jun 07 2021 Henry Beberman - 14.17.0-1 +- Update to nodejs version 14.17.0 + +* Sat May 09 2020 Nick Samson - 9.11.2-7 +- Added %%license line automatically + +* Mon May 04 2020 Paul Monson 9.11.2-6 +- Add patch that enables building openssl without TLS versions less 1.2 + +* Thu Apr 09 2020 Nicolas Ontiveros 9.11.2-5 +- Remove toybox and only use coreutils for requires. + +* Wed Apr 08 2020 Pawel Winogrodzki 9.11.2-4 +- License verified. +- Removed "%%define sha1". + +* Tue Sep 03 2019 Mateusz Malisz 9.11.2-3 +- Initial CBL-Mariner import from Photon (license: Apache2). + +* Tue Jan 08 2019 Alexey Makhalov 9.11.2-2 +- Added BuildRequires python2, which + +* Thu Sep 20 2018 Him Kalyan Bordoloi 9.11.2-1 +- Updated to version 9.11.2 + +* Mon Sep 10 2018 Him Kalyan Bordoloi 9.9.0-1 +- Updated to version 9.9.0 + +* Wed Feb 14 2018 Xiaolin Li 8.3.0-1 +- Updated to version 8.3.0 + +* Fri Oct 13 2017 Alexey Makhalov 7.7.4-4 +- Remove BuildArch + +* Mon Sep 18 2017 Alexey Makhalov 7.7.4-3 +- Requires coreutils or toybox + +* Fri Jul 14 2017 Chang Lee 7.7.4-2 +- Updated %check + +* Mon Mar 20 2017 Xiaolin Li 7.7.4-1 +- Initial packaging for Photon diff --git a/SPECS/ntopng/CVE-2021-44964.patch b/SPECS/ntopng/CVE-2021-44964.patch new file mode 100644 index 00000000000..ec36c16f525 --- /dev/null +++ b/SPECS/ntopng/CVE-2021-44964.patch @@ -0,0 +1,394 @@ +From 0bfc572e51d9035a615ef6e9523f736c9ffa8e57 Mon Sep 17 00:00:00 2001 +From: Roberto Ierusalimschy +Date: Mon, 13 Dec 2021 10:41:17 -0300 +Subject: [PATCH] Bug: GC is not reentrant + +As the GC is not reentrant, finalizers should not be able to invoke it. + +Upstream patch reference: https://github.com/lua/lua/commit/0bfc572e51d9035a615ef6e9523f736c9ffa8e57.patch +--- + third-party/lua-5.4.3/src/lapi.c | 17 ++++----- + third-party/lua-5.4.3/src/lbaselib.c | 19 ++++++++-- + third-party/lua-5.4.3/src/ldebug.c | 54 +++++++++++++++++----------- + third-party/lua-5.4.3/src/lgc.c | 17 +++++---- + third-party/lua-5.4.3/src/lgc.h | 10 ++++++ + third-party/lua-5.4.3/src/llimits.h | 2 +- + third-party/lua-5.4.3/src/lstate.c | 4 +-- + third-party/lua-5.4.3/src/lstate.h | 4 +-- + 8 files changed, 84 insertions(+), 43 deletions(-) + +diff --git a/third-party/lua-5.4.3/src/lapi.c b/third-party/lua-5.4.3/src/lapi.c +index f8f70cd..b7e4711 100644 +--- a/third-party/lua-5.4.3/src/lapi.c ++++ b/third-party/lua-5.4.3/src/lapi.c +@@ -1126,18 +1126,19 @@ LUA_API int lua_status (lua_State *L) { + LUA_API int lua_gc (lua_State *L, int what, ...) { + va_list argp; + int res = 0; +- global_State *g; ++ global_State *g = G(L); ++ if (g->gcstp & GCSTPGC) /* internal stop? */ ++ return -1; /* all options are invalid when stopped */ + lua_lock(L); +- g = G(L); + va_start(argp, what); + switch (what) { + case LUA_GCSTOP: { +- g->gcrunning = 0; ++ g->gcstp = GCSTPUSR; /* stopped by the user */ + break; + } + case LUA_GCRESTART: { + luaE_setdebt(g, 0); +- g->gcrunning = 1; ++ g->gcstp = 0; /* (GCSTPGC must be already zero here) */ + break; + } + case LUA_GCCOLLECT: { +@@ -1156,8 +1157,8 @@ LUA_API int lua_gc (lua_State *L, int what, ...) { + case LUA_GCSTEP: { + int data = va_arg(argp, int); + l_mem debt = 1; /* =1 to signal that it did an actual step */ +- lu_byte oldrunning = g->gcrunning; +- g->gcrunning = 1; /* allow GC to run */ ++ lu_byte oldstp = g->gcstp; ++ g->gcstp = 0; /* allow GC to run (GCSTPGC must be zero here) */ + if (data == 0) { + luaE_setdebt(g, 0); /* do a basic step */ + luaC_step(L); +@@ -1167,7 +1168,7 @@ LUA_API int lua_gc (lua_State *L, int what, ...) { + luaE_setdebt(g, debt); + luaC_checkGC(L); + } +- g->gcrunning = oldrunning; /* restore previous state */ ++ g->gcstp = oldstp; /* restore previous state */ + if (debt > 0 && g->gcstate == GCSpause) /* end of cycle? */ + res = 1; /* signal it */ + break; +@@ -1185,7 +1186,7 @@ LUA_API int lua_gc (lua_State *L, int what, ...) { + break; + } + case LUA_GCISRUNNING: { +- res = g->gcrunning; ++ res = gcrunning(g); + break; + } + case LUA_GCGEN: { +diff --git a/third-party/lua-5.4.3/src/lbaselib.c b/third-party/lua-5.4.3/src/lbaselib.c +index 83ad306..82abd94 100644 +--- a/third-party/lua-5.4.3/src/lbaselib.c ++++ b/third-party/lua-5.4.3/src/lbaselib.c +@@ -182,12 +182,20 @@ static int luaB_rawset (lua_State *L) { + + + static int pushmode (lua_State *L, int oldmode) { +- lua_pushstring(L, (oldmode == LUA_GCINC) ? "incremental" +- : "generational"); ++ if (oldmode == -1) ++ luaL_pushfail(L); /* invalid call to 'lua_gc' */ ++ else ++ lua_pushstring(L, (oldmode == LUA_GCINC) ? "incremental" ++ : "generational"); + return 1; + } + + ++/* ++** check whether call to 'lua_gc' was valid (not inside a finalizer) ++*/ ++#define checkvalres(res) { if (res == -1) break; } ++ + static int luaB_collectgarbage (lua_State *L) { + static const char *const opts[] = {"stop", "restart", "collect", + "count", "step", "setpause", "setstepmul", +@@ -200,12 +208,14 @@ static int luaB_collectgarbage (lua_State *L) { + case LUA_GCCOUNT: { + int k = lua_gc(L, o); + int b = lua_gc(L, LUA_GCCOUNTB); ++ checkvalres(k); + lua_pushnumber(L, (lua_Number)k + ((lua_Number)b/1024)); + return 1; + } + case LUA_GCSTEP: { + int step = (int)luaL_optinteger(L, 2, 0); + int res = lua_gc(L, o, step); ++ checkvalres(res); + lua_pushboolean(L, res); + return 1; + } +@@ -213,11 +223,13 @@ static int luaB_collectgarbage (lua_State *L) { + case LUA_GCSETSTEPMUL: { + int p = (int)luaL_optinteger(L, 2, 0); + int previous = lua_gc(L, o, p); ++ checkvalres(previous); + lua_pushinteger(L, previous); + return 1; + } + case LUA_GCISRUNNING: { + int res = lua_gc(L, o); ++ checkvalres(res); + lua_pushboolean(L, res); + return 1; + } +@@ -234,10 +246,13 @@ static int luaB_collectgarbage (lua_State *L) { + } + default: { + int res = lua_gc(L, o); ++ checkvalres(res); + lua_pushinteger(L, res); + return 1; + } + } ++ luaL_pushfail(L); /* invalid call (inside a finalizer) */ ++ return 1; + } + + +diff --git a/third-party/lua-5.4.3/src/ldebug.c b/third-party/lua-5.4.3/src/ldebug.c +index 5524fae..43d77bb 100644 +--- a/third-party/lua-5.4.3/src/ldebug.c ++++ b/third-party/lua-5.4.3/src/ldebug.c +@@ -34,8 +34,8 @@ + #define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL) + + +-static const char *funcnamefromcode (lua_State *L, CallInfo *ci, +- const char **name); ++static const char *funcnamefromcall (lua_State *L, CallInfo *ci, ++ const char **name); + + + static int currentpc (CallInfo *ci) { +@@ -310,15 +310,9 @@ static void collectvalidlines (lua_State *L, Closure *f) { + + + static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { +- if (ci == NULL) /* no 'ci'? */ +- return NULL; /* no info */ +- else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */ +- *name = "__gc"; +- return "metamethod"; /* report it as such */ +- } +- /* calling function is a known Lua function? */ +- else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous)) +- return funcnamefromcode(L, ci->previous, name); ++ /* calling function is a known function? */ ++ if (ci != NULL && !(ci->callstatus & CIST_TAIL)) ++ return funcnamefromcall(L, ci->previous, name); + else return NULL; /* no way to find a name */ + } + +@@ -590,16 +584,10 @@ static const char *getobjname (const Proto *p, int lastpc, int reg, + ** Returns what the name is (e.g., "for iterator", "method", + ** "metamethod") and sets '*name' to point to the name. + */ +-static const char *funcnamefromcode (lua_State *L, CallInfo *ci, +- const char **name) { ++static const char *funcnamefromcode (lua_State *L, const Proto *p, ++ int pc, const char **name) { + TMS tm = (TMS)0; /* (initial value avoids warnings) */ +- const Proto *p = ci_func(ci)->p; /* calling function */ +- int pc = currentpc(ci); /* calling instruction index */ + Instruction i = p->code[pc]; /* calling instruction */ +- if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ +- *name = "?"; +- return "hook"; +- } + switch (GET_OPCODE(i)) { + case OP_CALL: + case OP_TAILCALL: +@@ -636,6 +624,26 @@ static const char *funcnamefromcode (lua_State *L, CallInfo *ci, + return "metamethod"; + } + ++ ++/* ++** Try to find a name for a function based on how it was called. ++*/ ++static const char *funcnamefromcall (lua_State *L, CallInfo *ci, ++ const char **name) { ++ if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ ++ *name = "?"; ++ return "hook"; ++ } ++ else if (ci->callstatus & CIST_FIN) { /* was it called as a finalizer? */ ++ *name = "__gc"; ++ return "metamethod"; /* report it as such */ ++ } ++ else if (isLua(ci)) ++ return funcnamefromcode(L, ci_func(ci)->p, currentpc(ci), name); ++ else ++ return NULL; ++} ++ + /* }====================================================== */ + + +@@ -694,11 +702,15 @@ l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) { + luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o)); + } + +- ++/* ++** Raise an error for calling a non-callable object. Try to find a name ++** for the object based on how it was called ('funcnamefromcall'); if it ++** cannot get a name there, try 'varinfo'. ++*/ + l_noret luaG_callerror (lua_State *L, const TValue *o) { + CallInfo *ci = L->ci; + const char *name = NULL; /* to avoid warnings */ +- const char *what = (isLua(ci)) ? funcnamefromcode(L, ci, &name) : NULL; ++ const char *what = funcnamefromcall(L, ci, &name); + if (what != NULL) { + const char *t = luaT_objtypename(L, o); + luaG_runerror(L, "%s '%s' is not callable (a %s value)", what, name, t); +diff --git a/third-party/lua-5.4.3/src/lgc.c b/third-party/lua-5.4.3/src/lgc.c +index b360eed..42a73d8 100644 +--- a/third-party/lua-5.4.3/src/lgc.c ++++ b/third-party/lua-5.4.3/src/lgc.c +@@ -906,18 +906,18 @@ static void GCTM (lua_State *L) { + if (!notm(tm)) { /* is there a finalizer? */ + int status; + lu_byte oldah = L->allowhook; +- int running = g->gcrunning; ++ int oldgcstp = g->gcstp; ++ g->gcstp |= GCSTPGC; /* avoid GC steps */ + L->allowhook = 0; /* stop debug hooks during GC metamethod */ +- g->gcrunning = 0; /* avoid GC steps */ + setobj2s(L, L->top++, tm); /* push finalizer... */ + setobj2s(L, L->top++, &v); /* ... and its argument */ + L->ci->callstatus |= CIST_FIN; /* will run a finalizer */ + status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); + L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */ + L->allowhook = oldah; /* restore hooks */ +- g->gcrunning = running; /* restore state */ ++ g->gcstp = oldgcstp; /* restore state */ + if (l_unlikely(status != LUA_OK)) { /* error while running __gc? */ +- luaE_warnerror(L, "__gc metamethod"); ++ luaE_warnerror(L, "__gc"); + L->top--; /* pops error object */ + } + } +@@ -1011,7 +1011,8 @@ static void correctpointers (global_State *g, GCObject *o) { + void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { + global_State *g = G(L); + if (tofinalize(o) || /* obj. is already marked... */ +- gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ ++ gfasttm(g, mt, TM_GC) == NULL || /* or has no finalizer... */ ++ (g->gcstp & GCSTPCLS)) /* or closing state? */ + return; /* nothing to be done */ + else { /* move 'o' to 'finobj' list */ + GCObject **p; +@@ -1502,12 +1503,13 @@ static void deletelist (lua_State *L, GCObject *p, GCObject *limit) { + */ + void luaC_freeallobjects (lua_State *L) { + global_State *g = G(L); ++ g->gcstp = GCSTPCLS; /* no extra finalizers after here */ + luaC_changemode(L, KGC_INC); + separatetobefnz(g, 1); /* separate all objects with finalizers */ + lua_assert(g->finobj == NULL); + callallpendingfinalizers(L); + deletelist(L, g->allgc, obj2gco(g->mainthread)); +- deletelist(L, g->finobj, NULL); ++ lua_assert(g->finobj == NULL); /* no new finalizers */ + deletelist(L, g->fixedgc, NULL); /* collect fixed objects */ + lua_assert(g->strt.nuse == 0); + } +@@ -1647,6 +1649,7 @@ void luaC_runtilstate (lua_State *L, int statesmask) { + } + + ++ + /* + ** Performs a basic incremental step. The debt and step size are + ** converted from bytes to "units of work"; then the function loops +@@ -1678,7 +1681,7 @@ static void incstep (lua_State *L, global_State *g) { + void luaC_step (lua_State *L) { + global_State *g = G(L); + lua_assert(!g->gcemergency); +- if (g->gcrunning) { /* running? */ ++ if (gcrunning(g)) { /* running? */ + if(isdecGCmodegen(g)) + genstep(L, g); + else +diff --git a/third-party/lua-5.4.3/src/lgc.h b/third-party/lua-5.4.3/src/lgc.h +index 073e2a4..4a12563 100644 +--- a/third-party/lua-5.4.3/src/lgc.h ++++ b/third-party/lua-5.4.3/src/lgc.h +@@ -148,6 +148,16 @@ + */ + #define isdecGCmodegen(g) (g->gckind == KGC_GEN || g->lastatomic != 0) + ++ ++/* ++** Control when GC is running: ++*/ ++#define GCSTPUSR 1 /* bit true when GC stopped by user */ ++#define GCSTPGC 2 /* bit true when GC stopped by itself */ ++#define GCSTPCLS 4 /* bit true when closing Lua state */ ++#define gcrunning(g) ((g)->gcstp == 0) ++ ++ + /* + ** Does one step of collection when debt becomes positive. 'pre'/'pos' + ** allows some adjustments to be done only when needed. macro +diff --git a/third-party/lua-5.4.3/src/llimits.h b/third-party/lua-5.4.3/src/llimits.h +index 025f1c8..9a68a66 100644 +--- a/third-party/lua-5.4.3/src/llimits.h ++++ b/third-party/lua-5.4.3/src/llimits.h +@@ -347,7 +347,7 @@ typedef l_uint32 Instruction; + #define condchangemem(L,pre,pos) ((void)0) + #else + #define condchangemem(L,pre,pos) \ +- { if (G(L)->gcrunning) { pre; luaC_fullgc(L, 0); pos; } } ++ { if (gcrunning(G(L))) { pre; luaC_fullgc(L, 0); pos; } } + #endif + + #endif +diff --git a/third-party/lua-5.4.3/src/lstate.c b/third-party/lua-5.4.3/src/lstate.c +index 38da773..59b4f21 100644 +--- a/third-party/lua-5.4.3/src/lstate.c ++++ b/third-party/lua-5.4.3/src/lstate.c +@@ -236,7 +236,7 @@ static void f_luaopen (lua_State *L, void *ud) { + luaS_init(L); + luaT_init(L); + luaX_init(L); +- g->gcrunning = 1; /* allow gc */ ++ g->gcstp = 0; /* allow gc */ + setnilvalue(&g->nilvalue); /* now state is complete */ + luai_userstateopen(L); + } +@@ -373,7 +373,7 @@ LUA_API lua_State *lua_newstate (lua_Alloc f, void *ud) { + g->ud_warn = NULL; + g->mainthread = L; + g->seed = luai_makeseed(L); +- g->gcrunning = 0; /* no GC while building state */ ++ g->gcstp = GCSTPGC; /* no GC while building state */ + g->strt.size = g->strt.nuse = 0; + g->strt.hash = NULL; + setnilvalue(&g->l_registry); +diff --git a/third-party/lua-5.4.3/src/lstate.h b/third-party/lua-5.4.3/src/lstate.h +index c1283bb..11f27fd 100644 +--- a/third-party/lua-5.4.3/src/lstate.h ++++ b/third-party/lua-5.4.3/src/lstate.h +@@ -209,7 +209,7 @@ typedef struct CallInfo { + #define CIST_YPCALL (1<<4) /* doing a yieldable protected call */ + #define CIST_TAIL (1<<5) /* call was tail called */ + #define CIST_HOOKYIELD (1<<6) /* last hook called yielded */ +-#define CIST_FIN (1<<7) /* call is running a finalizer */ ++#define CIST_FIN (1<<7) /* function "called" a finalizer */ + #define CIST_TRAN (1<<8) /* 'ci' has transfer information */ + #define CIST_CLSRET (1<<9) /* function is closing tbc variables */ + /* Bits 10-12 are used for CIST_RECST (see below) */ +@@ -263,7 +263,7 @@ typedef struct global_State { + lu_byte gcstopem; /* stops emergency collections */ + lu_byte genminormul; /* control for minor generational collections */ + lu_byte genmajormul; /* control for major generational collections */ +- lu_byte gcrunning; /* true if GC is running */ ++ lu_byte gcstp; /* control whether GC is running */ + lu_byte gcemergency; /* true if this is an emergency collection */ + lu_byte gcpause; /* size of pause between successive GCs */ + lu_byte gcstepmul; /* GC "speed" */ +-- +2.45.4 + diff --git a/SPECS/ntopng/ntopng.spec b/SPECS/ntopng/ntopng.spec index 1124941ce5d..533a67f89d9 100644 --- a/SPECS/ntopng/ntopng.spec +++ b/SPECS/ntopng/ntopng.spec @@ -2,7 +2,7 @@ Summary: Web-based Network Traffic Monitoring Application Name: ntopng Version: 5.2.1 -Release: 5%{?dist} +Release: 6%{?dist} License: GPLv3 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -18,6 +18,7 @@ Patch3: CVE-2017-18214.patch Patch4: CVE-2022-33099.patch Patch5: CVE-2021-44647.patch Patch6: CVE-2021-43519.patch +Patch7: CVE-2021-44964.patch BuildRequires: curl-devel BuildRequires: gcc BuildRequires: glib-devel @@ -67,6 +68,9 @@ mv nDPI-%{nDPIver} nDPI %{_datadir}/ntopng/* %changelog +* Fri Dec 26 2025 Jyoti Kanase - 5.2.1-6 +- Patch to fix CVE-2021-44964 + * Thu Feb 06 2025 Jyoti Kanase - 5.2.1-5 - Patch to fix CVE-2021-44647 and CVE-2021-43519. diff --git a/SPECS/ofed-docs/ofed-docs.signatures.json b/SPECS/ofed-docs/ofed-docs.signatures.json index b3f36eb9671..21758251b41 100644 --- a/SPECS/ofed-docs/ofed-docs.signatures.json +++ b/SPECS/ofed-docs/ofed-docs.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ofed-docs-24.10.tar.gz": "ca307e3ef7407d9f4386fe021dd3f130d114f793884e60493ae129fdc83f7478" + "ofed-docs-25.07.tar.gz": "eb4ead69a6bf16616f807a9ffe7d0fa34ae4df372bdedef6bdcd1f9b78d171c3" } } diff --git a/SPECS/ofed-docs/ofed-docs.spec b/SPECS/ofed-docs/ofed-docs.spec index 15de25c3357..c9fafc5fc56 100644 --- a/SPECS/ofed-docs/ofed-docs.spec +++ b/SPECS/ofed-docs/ofed-docs.spec @@ -28,16 +28,19 @@ # $Id: ofed-docs.spec 7948 2006-06-13 12:42:34Z vlad $ # -%global MLNX_OFED_VERSION 24.10-0.7.0.0 +%global MLNX_OFED_VERSION 25.07.0.9.7.1 Summary: OFED docs Name: ofed-docs -Version: 24.10 +Version: 25.07 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://www.openfabrics.org -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/%{MLNX_OFED_VERSION}/SRPMS/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz Group: Documentation/Man BuildRoot: %{?build_root:%{build_root}}%{!?build_root:/var/tmp/%{name}-%{version}-root} @@ -61,6 +64,10 @@ rm -rf $RPM_BUILD_ROOT %{_defaultdocdir}/%{name}-%{version} %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Wed Jan 08 2025 Alberto David Perez Guevara 24.10-1 - Initial Azure Linux import from NVIDIA (license: GPLv2). - License verified. diff --git a/SPECS/ofed-scripts/ofed-scripts.signatures.json b/SPECS/ofed-scripts/ofed-scripts.signatures.json index 02c6b9a470c..dc7cdf53c21 100644 --- a/SPECS/ofed-scripts/ofed-scripts.signatures.json +++ b/SPECS/ofed-scripts/ofed-scripts.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "ofed-scripts-24.10.tar.gz": "c27e6739dd878e8879945e8d15b5d0950a5b80597a21b60e1018bf707e281f20" + "ofed-scripts-25.07.tar.gz": "ccf17949c543811cb6fd0a718341eb5df7206985c7ac90837dd3aab79a7fbeec" } } diff --git a/SPECS/ofed-scripts/ofed-scripts.spec b/SPECS/ofed-scripts/ofed-scripts.spec index f0bdf14bdd6..c532ab70f96 100644 --- a/SPECS/ofed-scripts/ofed-scripts.spec +++ b/SPECS/ofed-scripts/ofed-scripts.spec @@ -28,18 +28,21 @@ # $Id: ofed-scripts.spec 8402 2006-07-06 06:35:57Z vlad $ # -%global MLNX_OFED_VERSION 24.10-0.7.0.0 +%global MLNX_OFED_VERSION 25.07.0.9.7.1 Summary: OFED scripts Name: ofed-scripts -Version: 24.10 +Version: 25.07 Release: 1%{?dist} License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux Group: System Environment/Base URL: https://www.openfabrics.org -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/%{MLNX_OFED_VERSION}/SRPMS/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz BuildRoot: %{?build_root:%{build_root}}%{!?build_root:/var/tmp/%{name}-%{version}-root} @@ -147,6 +150,10 @@ echo "/etc/ld.so.conf.d/ofed.conf" >> ofed-files %{_prefix}/sbin/* %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Wed Jan 08 2025 Alberto David Perez Guevara 24.10-1 - Upgrade version to 24.10.0 diff --git a/SPECS/openssl/0001-Add-NULL-check-to-PKCS12_item_decrypt_d2i_ex.patch b/SPECS/openssl/0001-Add-NULL-check-to-PKCS12_item_decrypt_d2i_ex.patch new file mode 100644 index 00000000000..29e6358893e --- /dev/null +++ b/SPECS/openssl/0001-Add-NULL-check-to-PKCS12_item_decrypt_d2i_ex.patch @@ -0,0 +1,31 @@ +From 0a2ecb95993b588d2156dd6527459cc3983aabd5 Mon Sep 17 00:00:00 2001 +From: Andrew Dinh +Date: Thu, 8 Jan 2026 01:24:30 +0900 +Subject: [PATCH] Add NULL check to PKCS12_item_decrypt_d2i_ex + +Address CVE-2025-69421 + +Add NULL check for oct parameter +--- + crypto/pkcs12/p12_decr.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/crypto/pkcs12/p12_decr.c b/crypto/pkcs12/p12_decr.c +index 606713b9ee..1614da4404 100644 +--- a/crypto/pkcs12/p12_decr.c ++++ b/crypto/pkcs12/p12_decr.c +@@ -146,6 +146,11 @@ void *PKCS12_item_decrypt_d2i_ex(const X509_ALGOR *algor, const ASN1_ITEM *it, + void *ret; + int outlen = 0; + ++ if (oct == NULL) { ++ ERR_raise(ERR_LIB_PKCS12, ERR_R_PASSED_NULL_PARAMETER); ++ return NULL; ++ } ++ + if (!PKCS12_pbe_crypt_ex(algor, pass, passlen, oct->data, oct->length, + &out, &outlen, 0, libctx, propq)) + return NULL; +-- +2.52.0 + diff --git a/SPECS/openssl/0001-Check-the-received-uncompressed-certificate-length-t.patch b/SPECS/openssl/0001-Check-the-received-uncompressed-certificate-length-t.patch new file mode 100644 index 00000000000..0fb4c0f8a6d --- /dev/null +++ b/SPECS/openssl/0001-Check-the-received-uncompressed-certificate-length-t.patch @@ -0,0 +1,33 @@ +From 04a93ac145041e3ef0121a2688cf7c1b23780519 Mon Sep 17 00:00:00 2001 +From: Igor Ustinov +Date: Thu, 8 Jan 2026 14:02:54 +0100 +Subject: [PATCH] Check the received uncompressed certificate length to prevent + excessive pre-decompression allocation. + +The patch was proposed by Tomas Dulka and Stanislav Fort (Aisle Research). + +Fixes: CVE-2025-66199 +--- + ssl/statem/statem_lib.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/ssl/statem/statem_lib.c b/ssl/statem/statem_lib.c +index 9e0c853c0d..f82d8dcdac 100644 +--- a/ssl/statem/statem_lib.c ++++ b/ssl/statem/statem_lib.c +@@ -2877,6 +2877,12 @@ MSG_PROCESS_RETURN tls13_process_compressed_certificate(SSL_CONNECTION *sc, + goto err; + } + ++ /* Prevent excessive pre-decompression allocation */ ++ if (expected_length > sc->max_cert_list) { ++ SSLfatal(sc, SSL_AD_ILLEGAL_PARAMETER, SSL_R_EXCESSIVE_MESSAGE_SIZE); ++ goto err; ++ } ++ + if (PACKET_remaining(pkt) != comp_length || comp_length == 0) { + SSLfatal(sc, SSL_AD_DECODE_ERROR, SSL_R_BAD_DECOMPRESSION); + goto err; +-- +2.52.0 + diff --git a/SPECS/openssl/0001-Correct-handling-of-AEAD-encrypted-CMS-with-inadmiss.patch b/SPECS/openssl/0001-Correct-handling-of-AEAD-encrypted-CMS-with-inadmiss.patch new file mode 100644 index 00000000000..5633532ac6b --- /dev/null +++ b/SPECS/openssl/0001-Correct-handling-of-AEAD-encrypted-CMS-with-inadmiss.patch @@ -0,0 +1,31 @@ +From 190ba58c0a1d995d4da8b017054d4b74d138291c Mon Sep 17 00:00:00 2001 +From: Igor Ustinov +Date: Mon, 12 Jan 2026 12:13:35 +0100 +Subject: [PATCH 1/3] Correct handling of AEAD-encrypted CMS with inadmissibly + long IV + +Fixes CVE-2025-15467 +--- + crypto/evp/evp_lib.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/crypto/evp/evp_lib.c b/crypto/evp/evp_lib.c +index 9eae1d421c..58fa7ce43b 100644 +--- a/crypto/evp/evp_lib.c ++++ b/crypto/evp/evp_lib.c +@@ -228,10 +228,9 @@ int evp_cipher_get_asn1_aead_params(EVP_CIPHER_CTX *c, ASN1_TYPE *type, + if (type == NULL || asn1_params == NULL) + return 0; + +- i = ossl_asn1_type_get_octetstring_int(type, &tl, NULL, EVP_MAX_IV_LENGTH); +- if (i <= 0) ++ i = ossl_asn1_type_get_octetstring_int(type, &tl, iv, EVP_MAX_IV_LENGTH); ++ if (i <= 0 || i > EVP_MAX_IV_LENGTH) + return -1; +- ossl_asn1_type_get_octetstring_int(type, &tl, iv, i); + + memcpy(asn1_params->iv, iv, i); + asn1_params->iv_len = i; +-- +2.52.0 + diff --git a/SPECS/openssl/0001-Fix-OCB-AES-NI-HW-stream-path-unauthenticated-unencr.patch b/SPECS/openssl/0001-Fix-OCB-AES-NI-HW-stream-path-unauthenticated-unencr.patch new file mode 100644 index 00000000000..668455a689c --- /dev/null +++ b/SPECS/openssl/0001-Fix-OCB-AES-NI-HW-stream-path-unauthenticated-unencr.patch @@ -0,0 +1,70 @@ +From 1a556ff619473af9e179b202284a961590d5a2bd Mon Sep 17 00:00:00 2001 +From: Norbert Pocs +Date: Thu, 8 Jan 2026 15:04:54 +0100 +Subject: [PATCH] Fix OCB AES-NI/HW stream path unauthenticated/unencrypted + trailing bytes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +When ctx->stream (e.g., AES‑NI or ARMv8 CE) is available, the fast path +encrypts/decrypts full blocks but does not advance in/out pointers. The +tail-handling code then operates on the base pointers, effectively reprocessing +the beginning of the buffer while leaving the actual trailing bytes +unencrypted (encryption) or using the wrong plaintext (decryption). The +authentication checksum excludes the true tail. + +CVE-2025-69418 + +Fixes: https://github.com/openssl/srt/issues/58 + +Signed-off-by: Norbert Pocs +--- + crypto/modes/ocb128.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/crypto/modes/ocb128.c b/crypto/modes/ocb128.c +index ce72baf6da..8a5d7c7db0 100644 +--- a/crypto/modes/ocb128.c ++++ b/crypto/modes/ocb128.c +@@ -337,7 +337,7 @@ int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, + + if (num_blocks && all_num_blocks == (size_t)all_num_blocks + && ctx->stream != NULL) { +- size_t max_idx = 0, top = (size_t)all_num_blocks; ++ size_t max_idx = 0, top = (size_t)all_num_blocks, processed_bytes = 0; + + /* + * See how many L_{i} entries we need to process data at hand +@@ -351,6 +351,9 @@ int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, + ctx->stream(in, out, num_blocks, ctx->keyenc, + (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c, + (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c); ++ processed_bytes = num_blocks * 16; ++ in += processed_bytes; ++ out += processed_bytes; + } else { + /* Loop through all full blocks to be encrypted */ + for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) { +@@ -429,7 +432,7 @@ int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, + + if (num_blocks && all_num_blocks == (size_t)all_num_blocks + && ctx->stream != NULL) { +- size_t max_idx = 0, top = (size_t)all_num_blocks; ++ size_t max_idx = 0, top = (size_t)all_num_blocks, processed_bytes = 0; + + /* + * See how many L_{i} entries we need to process data at hand +@@ -443,6 +446,9 @@ int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, + ctx->stream(in, out, num_blocks, ctx->keydec, + (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c, + (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c); ++ processed_bytes = num_blocks * 16; ++ in += processed_bytes; ++ out += processed_bytes; + } else { + OCB_BLOCK tmp; + +-- +2.52.0 + diff --git a/SPECS/openssl/0001-Fix-heap-buffer-overflow-in-BIO_f_linebuffer.patch b/SPECS/openssl/0001-Fix-heap-buffer-overflow-in-BIO_f_linebuffer.patch new file mode 100644 index 00000000000..7aff251f9c1 --- /dev/null +++ b/SPECS/openssl/0001-Fix-heap-buffer-overflow-in-BIO_f_linebuffer.patch @@ -0,0 +1,67 @@ +From fb41a020b838f8145d07586275053568469a999c Mon Sep 17 00:00:00 2001 +From: Neil Horman +Date: Wed, 7 Jan 2026 11:52:09 -0500 +Subject: [PATCH] Fix heap buffer overflow in BIO_f_linebuffer + +When a FIO_f_linebuffer is part of a bio chain, and the next BIO +preforms short writes, the remainder of the unwritten buffer is copied +unconditionally to the internal buffer ctx->obuf, which may not be +sufficiently sized to handle the remaining data, resulting in a buffer +overflow. + +Fix it by only copying data when ctx->obuf has space, flushing to the +next BIO to increase available storage if needed. + +Fixes CVE-2025-68160 +--- + crypto/bio/bf_lbuf.c | 32 ++++++++++++++++++++++++++------ + 1 file changed, 26 insertions(+), 6 deletions(-) + +diff --git a/crypto/bio/bf_lbuf.c b/crypto/bio/bf_lbuf.c +index eed3dc4..ce71231 100644 +--- a/crypto/bio/bf_lbuf.c ++++ b/crypto/bio/bf_lbuf.c +@@ -186,14 +186,34 @@ static int linebuffer_write(BIO *b, const char *in, int inl) + while (foundnl && inl > 0); + /* + * We've written as much as we can. The rest of the input buffer, if +- * any, is text that doesn't and with a NL and therefore needs to be +- * saved for the next trip. ++ * any, is text that doesn't end with a NL and therefore we need to try ++ * free up some space in our obuf so we can make forward progress. + */ +- if (inl > 0) { +- memcpy(&(ctx->obuf[ctx->obuf_len]), in, inl); +- ctx->obuf_len += inl; +- num += inl; ++ while (inl > 0) { ++ size_t avail = (size_t)ctx->obuf_size - (size_t)ctx->obuf_len; ++ size_t to_copy; ++ ++ if (avail == 0) { ++ /* Flush buffered data to make room */ ++ i = BIO_write(b->next_bio, ctx->obuf, ctx->obuf_len); ++ if (i <= 0) { ++ BIO_copy_next_retry(b); ++ return num > 0 ? num : i; ++ } ++ if (i < ctx->obuf_len) ++ memmove(ctx->obuf, ctx->obuf + i, ctx->obuf_len - i); ++ ctx->obuf_len -= i; ++ continue; ++ } ++ ++ to_copy = inl > (int)avail ? avail : (size_t)inl; ++ memcpy(&(ctx->obuf[ctx->obuf_len]), in, to_copy); ++ ctx->obuf_len += (int)to_copy; ++ in += to_copy; ++ inl -= (int)to_copy; ++ num += (int)to_copy; + } ++ + return num; + } + +-- +2.45.4 + diff --git a/SPECS/openssl/0001-Verify-ASN1-object-s-types-before-attempting-to-acce.patch b/SPECS/openssl/0001-Verify-ASN1-object-s-types-before-attempting-to-acce.patch new file mode 100644 index 00000000000..3a2d5250a09 --- /dev/null +++ b/SPECS/openssl/0001-Verify-ASN1-object-s-types-before-attempting-to-acce.patch @@ -0,0 +1,40 @@ +From 6453d278557c8719233793730ec500c84aea55d9 Mon Sep 17 00:00:00 2001 +From: Bob Beck +Date: Wed, 7 Jan 2026 11:29:48 -0700 +Subject: [PATCH] Verify ASN1 object's types before attempting to access them + as a particular type + +Issue was reported in ossl_ess_get_signing_cert but is also present in +ossl_ess_get_signing_cert_v2. + +Fixes: https://github.com/openssl/srt/issues/61 +Fixes CVE-2025-69420 +--- + crypto/ts/ts_rsp_verify.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/crypto/ts/ts_rsp_verify.c b/crypto/ts/ts_rsp_verify.c +index 3876e30f47..40dab687d1 100644 +--- a/crypto/ts/ts_rsp_verify.c ++++ b/crypto/ts/ts_rsp_verify.c +@@ -209,7 +209,7 @@ static ESS_SIGNING_CERT *ossl_ess_get_signing_cert(const PKCS7_SIGNER_INFO *si) + const unsigned char *p; + + attr = PKCS7_get_signed_attribute(si, NID_id_smime_aa_signingCertificate); +- if (attr == NULL) ++ if (attr == NULL || attr->type != V_ASN1_SEQUENCE) + return NULL; + p = attr->value.sequence->data; + return d2i_ESS_SIGNING_CERT(NULL, &p, attr->value.sequence->length); +@@ -221,7 +221,7 @@ static ESS_SIGNING_CERT_V2 *ossl_ess_get_signing_cert_v2(const PKCS7_SIGNER_INFO + const unsigned char *p; + + attr = PKCS7_get_signed_attribute(si, NID_id_smime_aa_signingCertificateV2); +- if (attr == NULL) ++ if (attr == NULL || attr->type != V_ASN1_SEQUENCE) + return NULL; + p = attr->value.sequence->data; + return d2i_ESS_SIGNING_CERT_V2(NULL, &p, attr->value.sequence->length); +-- +2.52.0 + diff --git a/SPECS/openssl/0001-ossl_quic_get_cipher_by_char-Add-a-NULL-guard-before.patch b/SPECS/openssl/0001-ossl_quic_get_cipher_by_char-Add-a-NULL-guard-before.patch new file mode 100644 index 00000000000..8545630588a --- /dev/null +++ b/SPECS/openssl/0001-ossl_quic_get_cipher_by_char-Add-a-NULL-guard-before.patch @@ -0,0 +1,27 @@ +From 7da6afe3dac7d65b30f87f2c5d305b6e699bc5dc Mon Sep 17 00:00:00 2001 +From: Daniel Kubec +Date: Fri, 9 Jan 2026 14:33:24 +0100 +Subject: [PATCH] ossl_quic_get_cipher_by_char(): Add a NULL guard before + dereferencing SSL_CIPHER + +Fixes CVE-2025-15468 +--- + ssl/quic/quic_impl.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/ssl/quic/quic_impl.c b/ssl/quic/quic_impl.c +index 87c1370a8d..89c108a973 100644 +--- a/ssl/quic/quic_impl.c ++++ b/ssl/quic/quic_impl.c +@@ -5222,6 +5222,8 @@ const SSL_CIPHER *ossl_quic_get_cipher_by_char(const unsigned char *p) + { + const SSL_CIPHER *ciph = ssl3_get_cipher_by_char(p); + ++ if (ciph == NULL) ++ return NULL; + if ((ciph->algorithm2 & SSL_QUIC) == 0) + return NULL; + +-- +2.52.0 + diff --git a/SPECS/openssl/0002-Some-comments-to-clarify-functions-usage.patch b/SPECS/openssl/0002-Some-comments-to-clarify-functions-usage.patch new file mode 100644 index 00000000000..aa1c7459ce4 --- /dev/null +++ b/SPECS/openssl/0002-Some-comments-to-clarify-functions-usage.patch @@ -0,0 +1,57 @@ +From 6fb47957bfb0aef2deaa7df7aebd4eb52ffe20ce Mon Sep 17 00:00:00 2001 +From: Igor Ustinov +Date: Mon, 12 Jan 2026 12:15:42 +0100 +Subject: [PATCH 2/3] Some comments to clarify functions usage + +--- + crypto/asn1/evp_asn1.c | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/crypto/asn1/evp_asn1.c b/crypto/asn1/evp_asn1.c +index 382576364b..e73bda64e3 100644 +--- a/crypto/asn1/evp_asn1.c ++++ b/crypto/asn1/evp_asn1.c +@@ -60,6 +60,12 @@ static ossl_inline void asn1_type_init_oct(ASN1_OCTET_STRING *oct, + oct->flags = 0; + } + ++/* ++ * This function copies 'anum' to 'num' and the data of 'oct' to 'data'. ++ * If the length of 'data' > 'max_len', copies only the first 'max_len' ++ * bytes, but returns the full length of 'oct'; this allows distinguishing ++ * whether all the data was copied. ++ */ + static int asn1_type_get_int_oct(ASN1_OCTET_STRING *oct, int32_t anum, + long *num, unsigned char *data, int max_len) + { +@@ -106,6 +112,13 @@ int ASN1_TYPE_set_int_octetstring(ASN1_TYPE *a, long num, unsigned char *data, + return 0; + } + ++/* ++ * This function decodes an int-octet sequence and copies the integer to 'num' ++ * and the data of octet to 'data'. ++ * If the length of 'data' > 'max_len', copies only the first 'max_len' ++ * bytes, but returns the full length of 'oct'; this allows distinguishing ++ * whether all the data was copied. ++ */ + int ASN1_TYPE_get_int_octetstring(const ASN1_TYPE *a, long *num, + unsigned char *data, int max_len) + { +@@ -162,6 +175,13 @@ int ossl_asn1_type_set_octetstring_int(ASN1_TYPE *a, long num, + return 0; + } + ++/* ++ * This function decodes an octet-int sequence and copies the data of octet ++ * to 'data' and the integer to 'num'. ++ * If the length of 'data' > 'max_len', copies only the first 'max_len' ++ * bytes, but returns the full length of 'oct'; this allows distinguishing ++ * whether all the data was copied. ++ */ + int ossl_asn1_type_get_octetstring_int(const ASN1_TYPE *a, long *num, + unsigned char *data, int max_len) + { +-- +2.52.0 + diff --git a/SPECS/openssl/0003-Test-for-handling-of-AEAD-encrypted-CMS-with-inadmis.patch b/SPECS/openssl/0003-Test-for-handling-of-AEAD-encrypted-CMS-with-inadmis.patch new file mode 100644 index 00000000000..54927d90ca9 --- /dev/null +++ b/SPECS/openssl/0003-Test-for-handling-of-AEAD-encrypted-CMS-with-inadmis.patch @@ -0,0 +1,126 @@ +From 1e8f5c7cd2c46b25a2877e8f3f4bbf954fbcdf77 Mon Sep 17 00:00:00 2001 +From: Igor Ustinov +Date: Sun, 11 Jan 2026 11:35:15 +0100 +Subject: [PATCH 3/3] Test for handling of AEAD-encrypted CMS with inadmissibly + long IV + +--- + test/cmsapitest.c | 39 ++++++++++++++++++- + test/recipes/80-test_cmsapi.t | 3 +- + .../encDataWithTooLongIV.pem | 11 ++++++ + 3 files changed, 50 insertions(+), 3 deletions(-) + create mode 100644 test/recipes/80-test_cmsapi_data/encDataWithTooLongIV.pem + +diff --git a/test/cmsapitest.c b/test/cmsapitest.c +index 88d519fd14..472d30c9e5 100644 +--- a/test/cmsapitest.c ++++ b/test/cmsapitest.c +@@ -9,10 +9,10 @@ + + #include + ++#include + #include + #include + #include +-#include + #include "../crypto/cms/cms_local.h" /* for d.signedData and d.envelopedData */ + + #include "testutil.h" +@@ -20,6 +20,7 @@ + static X509 *cert = NULL; + static EVP_PKEY *privkey = NULL; + static char *derin = NULL; ++static char *too_long_iv_cms_in = NULL; + + static int test_encrypt_decrypt(const EVP_CIPHER *cipher) + { +@@ -382,6 +383,38 @@ end: + return ret; + } + ++static int test_cms_aesgcm_iv_too_long(void) ++{ ++ int ret = 0; ++ BIO *cmsbio = NULL, *out = NULL; ++ CMS_ContentInfo *cms = NULL; ++ unsigned long err = 0; ++ ++ if (!TEST_ptr(cmsbio = BIO_new_file(too_long_iv_cms_in, "r"))) ++ goto end; ++ ++ if (!TEST_ptr(cms = PEM_read_bio_CMS(cmsbio, NULL, NULL, NULL))) ++ goto end; ++ ++ /* Must fail cleanly (no crash) */ ++ if (!TEST_false(CMS_decrypt(cms, privkey, cert, NULL, out, 0))) ++ goto end; ++ err = ERR_peek_last_error(); ++ if (!TEST_ulong_ne(err, 0)) ++ goto end; ++ if (!TEST_int_eq(ERR_GET_LIB(err), ERR_LIB_CMS)) ++ goto end; ++ if (!TEST_int_eq(ERR_GET_REASON(err), CMS_R_CIPHER_PARAMETER_INITIALISATION_ERROR)) ++ goto end; ++ ++ ret = 1; ++end: ++ CMS_ContentInfo_free(cms); ++ BIO_free(cmsbio); ++ BIO_free(out); ++ return ret; ++} ++ + OPT_TEST_DECLARE_USAGE("certfile privkeyfile derfile\n") + + int setup_tests(void) +@@ -396,7 +429,8 @@ int setup_tests(void) + + if (!TEST_ptr(certin = test_get_argument(0)) + || !TEST_ptr(privkeyin = test_get_argument(1)) +- || !TEST_ptr(derin = test_get_argument(2))) ++ || !TEST_ptr(derin = test_get_argument(2)) ++ || !TEST_ptr(too_long_iv_cms_in = test_get_argument(3))) + return 0; + + certbio = BIO_new_file(certin, "r"); +@@ -429,6 +463,7 @@ int setup_tests(void) + ADD_TEST(test_CMS_add1_cert); + ADD_TEST(test_d2i_CMS_bio_NULL); + ADD_ALL_TESTS(test_d2i_CMS_decode, 2); ++ ADD_TEST(test_cms_aesgcm_iv_too_long); + return 1; + } + +diff --git a/test/recipes/80-test_cmsapi.t b/test/recipes/80-test_cmsapi.t +index af00355a9d..182629e71a 100644 +--- a/test/recipes/80-test_cmsapi.t ++++ b/test/recipes/80-test_cmsapi.t +@@ -18,5 +18,6 @@ plan tests => 1; + + ok(run(test(["cmsapitest", srctop_file("test", "certs", "servercert.pem"), + srctop_file("test", "certs", "serverkey.pem"), +- srctop_file("test", "recipes", "80-test_cmsapi_data", "encryptedData.der")])), ++ srctop_file("test", "recipes", "80-test_cmsapi_data", "encryptedData.der"), ++ srctop_file("test", "recipes", "80-test_cmsapi_data", "encDataWithTooLongIV.pem")])), + "running cmsapitest"); +diff --git a/test/recipes/80-test_cmsapi_data/encDataWithTooLongIV.pem b/test/recipes/80-test_cmsapi_data/encDataWithTooLongIV.pem +new file mode 100644 +index 0000000000..4323cd2fb0 +--- /dev/null ++++ b/test/recipes/80-test_cmsapi_data/encDataWithTooLongIV.pem +@@ -0,0 +1,11 @@ ++-----BEGIN CMS----- ++MIIBmgYLKoZIhvcNAQkQARegggGJMIIBhQIBADGCATMwggEvAgEAMBcwEjEQMA4G ++A1UEAwwHUm9vdCBDQQIBAjANBgkqhkiG9w0BAQEFAASCAQC8ZqP1OqbletcUre1V ++b4XOobZzQr6wKMSsdjtGzVbZowUVv5DkOn9VOefrpg4HxMq/oi8IpzVYj8ZiKRMV ++NTJ+/d8FwwBwUUNNP/IDnfEpX+rT1+pGS5zAa7NenLoZgGBNjPy5I2OHP23fPnEd ++sm8YkFjzubkhAD1lod9pEOEqB3V2kTrTTiwzSNtMHggna1zPox6TkdZwFmMnp8d2 ++CVa6lIPGx26gFwCuIDSaavmQ2URJ615L8gAvpYUlpsDqjFsabWsbaOFbMz3bIGJu ++GkrX2ezX7CpuC1wjix26ojlTySJHv+L0IrpcaIzLlC5lB1rqtuija8dGm3rBNm/P ++AAUNMDcGCSqGSIb3DQEHATAjBglghkgBZQMEAQYwFgQRzxwoRQzOHVooVn3CpaWl ++paUCARCABUNdolo6BBA55E9hYaYO2S8C/ZnD8dRO ++-----END CMS----- +-- +2.52.0 + diff --git a/SPECS/openssl/CVE-2025-69419.patch b/SPECS/openssl/CVE-2025-69419.patch new file mode 100644 index 00000000000..811801edd8a --- /dev/null +++ b/SPECS/openssl/CVE-2025-69419.patch @@ -0,0 +1,48 @@ +From a26a90d38edec3748566129d824e664b54bee2e2 Mon Sep 17 00:00:00 2001 +From: Norbert Pocs +Date: Thu, 11 Dec 2025 12:49:00 +0100 +Subject: [PATCH] Check return code of UTF8_putc + +Signed-off-by: Norbert Pocs + +Reviewed-by: Nikola Pajkovsky +Reviewed-by: Viktor Dukhovni +(Merged from https://github.com/openssl/openssl/pull/29376) +--- + crypto/asn1/a_strex.c | 6 ++++-- + crypto/pkcs12/p12_utl.c | 5 +++++ + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/crypto/asn1/a_strex.c b/crypto/asn1/a_strex.c +index 683b8a06fc540..68c2e31a70a28 100644 +--- a/crypto/asn1/a_strex.c ++++ b/crypto/asn1/a_strex.c +@@ -198,8 +198,10 @@ static int do_buf(unsigned char *buf, int buflen, + orflags = CHARTYPE_LAST_ESC_2253; + if (type & BUF_TYPE_CONVUTF8) { + unsigned char utfbuf[6]; +- int utflen; +- utflen = UTF8_putc(utfbuf, sizeof(utfbuf), c); ++ int utflen = UTF8_putc(utfbuf, sizeof(utfbuf), c); ++ ++ if (utflen < 0) ++ return -1; /* error happened with UTF8 */ + for (i = 0; i < utflen; i++) { + /* + * We don't need to worry about setting orflags correctly +diff --git a/crypto/pkcs12/p12_utl.c b/crypto/pkcs12/p12_utl.c +index 1669ef5b07939..9360f9930713f 100644 +--- a/crypto/pkcs12/p12_utl.c ++++ b/crypto/pkcs12/p12_utl.c +@@ -206,6 +206,11 @@ char *OPENSSL_uni2utf8(const unsigned char *uni, int unilen) + /* re-run the loop emitting UTF-8 string */ + for (asclen = 0, i = 0; i < unilen; ) { + j = bmp_to_utf8(asctmp+asclen, uni+i, unilen-i); ++ /* when UTF8_putc fails */ ++ if (j < 0) { ++ OPENSSL_free(asctmp); ++ return NULL; ++ } + if (j == 4) i += 4; + else i += 2; + asclen += j; diff --git a/SPECS/openssl/CVE-2026-22796.patch b/SPECS/openssl/CVE-2026-22796.patch new file mode 100644 index 00000000000..c1dc210699f --- /dev/null +++ b/SPECS/openssl/CVE-2026-22796.patch @@ -0,0 +1,73 @@ +From eeee3cbd4d682095ed431052f00403004596373e Mon Sep 17 00:00:00 2001 +From: Bob Beck +Date: Wed, 7 Jan 2026 11:29:48 -0700 +Subject: [PATCH] Ensure ASN1 types are checked before use. + +Some of these were fixed by LibreSSL in commit https://github.com/openbsd/src/commit/aa1f637d454961d22117b4353f98253e984b3ba8 +this fix includes the other fixes in that commit, as well as fixes for others found by a scan +for a similar unvalidated access paradigm in the tree. + +Reviewed-by: Kurt Roeckx +Reviewed-by: Shane Lontis +Reviewed-by: Tomas Mraz +(Merged from https://github.com/openssl/openssl/pull/29582) + +Fixes CVE-2026-22796, CVE-2026-22795 +--- + apps/s_client.c | 3 ++- + crypto/pkcs12/p12_kiss.c | 10 ++++++++-- + crypto/pkcs7/pk7_doit.c | 2 ++ + 3 files changed, 12 insertions(+), 3 deletions(-) + +diff --git a/apps/s_client.c b/apps/s_client.c +index c5b7384a290a4..1f52cf378fbbc 100644 +--- a/apps/s_client.c ++++ b/apps/s_client.c +@@ -2832,8 +2832,9 @@ int s_client_main(int argc, char **argv) + goto end; + } + atyp = ASN1_generate_nconf(genstr, cnf); +- if (atyp == NULL) { ++ if (atyp == NULL || atyp->type != V_ASN1_SEQUENCE) { + NCONF_free(cnf); ++ ASN1_TYPE_free(atyp); + BIO_printf(bio_err, "ASN1_generate_nconf failed\n"); + goto end; + } +diff --git a/crypto/pkcs12/p12_kiss.c b/crypto/pkcs12/p12_kiss.c +index 10b581612dbb2..d0236e34fe9df 100644 +--- a/crypto/pkcs12/p12_kiss.c ++++ b/crypto/pkcs12/p12_kiss.c +@@ -196,11 +196,17 @@ static int parse_bag(PKCS12_SAFEBAG *bag, const char *pass, int passlen, + ASN1_BMPSTRING *fname = NULL; + ASN1_OCTET_STRING *lkid = NULL; + +- if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_friendlyName))) ++ if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_friendlyName))) { ++ if (attrib->type != V_ASN1_BMPSTRING) ++ return 0; + fname = attrib->value.bmpstring; ++ } + +- if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_localKeyID))) ++ if ((attrib = PKCS12_SAFEBAG_get0_attr(bag, NID_localKeyID))) { ++ if (attrib->type != V_ASN1_OCTET_STRING) ++ return 0; + lkid = attrib->value.octet_string; ++ } + + switch (PKCS12_SAFEBAG_get_nid(bag)) { + case NID_keyBag: +diff --git a/crypto/pkcs7/pk7_doit.c b/crypto/pkcs7/pk7_doit.c +index 74f863af8fa52..6353fec47c068 100644 +--- a/crypto/pkcs7/pk7_doit.c ++++ b/crypto/pkcs7/pk7_doit.c +@@ -1178,6 +1178,8 @@ ASN1_OCTET_STRING *PKCS7_digest_from_attributes(STACK_OF(X509_ATTRIBUTE) *sk) + ASN1_TYPE *astype; + if ((astype = get_attribute(sk, NID_pkcs9_messageDigest)) == NULL) + return NULL; ++ if (astype->type != V_ASN1_OCTET_STRING) ++ return NULL; + return astype->value.octet_string; + } + diff --git a/SPECS/openssl/openssl.spec b/SPECS/openssl/openssl.spec index 6d4f21f9200..cb290482336 100644 --- a/SPECS/openssl/openssl.spec +++ b/SPECS/openssl/openssl.spec @@ -9,7 +9,7 @@ Summary: Utilities from the general purpose cryptography library with TLS implementation Name: openssl Version: 3.3.5 -Release: 1%{?dist} +Release: 3%{?dist} Vendor: Microsoft Corporation Distribution: Azure Linux Source: https://github.com/openssl/openssl/releases/download/openssl-%{version}/openssl-%{version}.tar.gz @@ -66,6 +66,17 @@ Patch80: 0001-Replacing-deprecated-functions-with-NULL-or-highest.patch # algorithms that are used in the speed tests. This patch skips those tests. # If SymCrypt adds support, we should change and eventually remove this patch. Patch82: prevent-unsupported-calls-into-symcrypt-in-speed.patch +Patch100: 0001-Correct-handling-of-AEAD-encrypted-CMS-with-inadmiss.patch +Patch101: 0002-Some-comments-to-clarify-functions-usage.patch +Patch102: 0003-Test-for-handling-of-AEAD-encrypted-CMS-with-inadmis.patch +Patch103: 0001-ossl_quic_get_cipher_by_char-Add-a-NULL-guard-before.patch +Patch104: 0001-Check-the-received-uncompressed-certificate-length-t.patch +Patch105: 0001-Fix-heap-buffer-overflow-in-BIO_f_linebuffer.patch +Patch106: 0001-Fix-OCB-AES-NI-HW-stream-path-unauthenticated-unencr.patch +Patch107: 0001-Verify-ASN1-object-s-types-before-attempting-to-acce.patch +Patch108: 0001-Add-NULL-check-to-PKCS12_item_decrypt_d2i_ex.patch +Patch109: CVE-2025-69419.patch +Patch110: CVE-2026-22796.patch License: Apache-2.0 URL: http://www.openssl.org/ @@ -85,13 +96,10 @@ BuildRequires: perl(lib) BuildRequires: perl(Pod::Html) BuildRequires: perl(Text::Template) BuildRequires: sed - -%if 0%{?with_check} BuildRequires: perl(Math::BigInt) BuildRequires: perl(Test::Harness) BuildRequires: perl(Test::More) BuildRequires: perl(Time::Piece) -%endif Requires: %{name}-libs%{?_isa} = %{version}-%{release} @@ -362,6 +370,13 @@ install -m644 %{SOURCE9} \ %ldconfig_scriptlets libs %changelog +* Thu Jan 29 2026 Lynsey Rydberg - 3.3.5-3 +- Patch CVE-2025-69419, CVE-2026-22795, and CVE-2026-22796 + +* Tue Jan 27 2026 Lynsey Rydberg - 3.3.5-2 +- Patch CVE-2025-15467, CVE-2025-15468, CVE-2025-66199, CVE-2025-68160, + CVE-2025-69418, CVE-2025-69420, and CVE-2025-69421 + * Thu Oct 02 2025 CBL-Mariner Servicing Account - 3.3.5-1 - Auto-upgrade to 3.3.5 for CVE-2025-9230 and CVE-2025-9232 diff --git a/SPECS/openvswitch/0001-tests-Fix-SSL-db-implementation-test-with-openssl-3..patch b/SPECS/openvswitch/0001-tests-Fix-SSL-db-implementation-test-with-openssl-3..patch new file mode 100644 index 00000000000..3fee58644ac --- /dev/null +++ b/SPECS/openvswitch/0001-tests-Fix-SSL-db-implementation-test-with-openssl-3..patch @@ -0,0 +1,38 @@ +From 7c4c453945ce68b04f36162637dac8a651a1601a Mon Sep 17 00:00:00 2001 +Message-ID: <7c4c453945ce68b04f36162637dac8a651a1601a.1709801660.git.tredaelli@redhat.com> +From: Timothy Redaelli +Date: Tue, 5 Mar 2024 19:50:10 +0100 +Subject: [PATCH] tests: Fix "SSL db: implementation" test with openssl > + 3.2.0. + +In OpenSSL 3.2.0 (81b741f) all the "alert" error messages was updated to +replace "sslv3" with "ssl/tls". + +This commit updates the "SSL db: implementation" test to support both the +pre-openssl 3.2.0 error message: "sslv3 alert certificate unknown" and the +post-openssl 3.2.0 error message: "ssl/tls alert certificate unknown". + +Signed-off-by: Timothy Redaelli +--- + tests/ovsdb-server.at | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at +index b8ccc4c8e..35447a52e 100644 +--- a/tests/ovsdb-server.at ++++ b/tests/ovsdb-server.at +@@ -936,8 +936,10 @@ AT_CHECK_UNQUOTED( + [ignore]) + # The error message for being unable to negotiate a shared ciphersuite + # is 'sslv3 alert handshake failure'. This is not the clearest message. ++# In openssl 3.2.0 all the error messages was updated to replace "sslv3" with ++# "ssl/tls". + AT_CHECK_UNQUOTED( +- [grep "sslv3 alert handshake failure" output], [0], ++ [grep -E "(sslv3|ssl/tls) alert handshake failure" output], [0], + [stdout], + [ignore]) + OVSDB_SERVER_SHUTDOWN([" +-- +2.44.0 + diff --git a/SPECS/openvswitch/0001-tests-Fix-compatibility-issue-with-Python-3.13-in-vl.patch b/SPECS/openvswitch/0001-tests-Fix-compatibility-issue-with-Python-3.13-in-vl.patch new file mode 100644 index 00000000000..d74d4ff457e --- /dev/null +++ b/SPECS/openvswitch/0001-tests-Fix-compatibility-issue-with-Python-3.13-in-vl.patch @@ -0,0 +1,42 @@ +From 9185793e75435d890f18d391eaaeab0ade6f1415 Mon Sep 17 00:00:00 2001 +Message-ID: <9185793e75435d890f18d391eaaeab0ade6f1415.1716386938.git.tredaelli@redhat.com> +From: Frode Nordahl +Date: Mon, 8 Apr 2024 23:24:14 +0200 +Subject: [PATCH] tests: Fix compatibility issue with Python 3.13 in vlog.at. + +The vlog - Python3 test makes use of output from Python +Tracebacks in its test assertion. + +In Python 3.13 a line with tophat (``^``) markers is added below +Tracebacks from calls to assert [0], which makes the test fail. +This change of behavior is also backported to the Python 3.12 and +3.11 stable branches [1]. + +Strip lines containing one or more occurrence of the ``^`` +character from the output before performing the test assertions. + +0: https://github.com/python/cpython/pull/105935 +1: https://github.com/python/cpython/issues/116034 + +Reported-at: https://launchpad.net/bugs/2060434 +Signed-off-by: Frode Nordahl +Signed-off-by: Ilya Maximets +--- + tests/vlog.at | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tests/vlog.at b/tests/vlog.at +index 785014956..efe91479a 100644 +--- a/tests/vlog.at ++++ b/tests/vlog.at +@@ -8,6 +8,7 @@ AT_CHECK([$PYTHON3 $srcdir/test-vlog.py --log-file log_file \ + + AT_CHECK([sed -e 's/.*-.*-.*T..:..:..Z |//' \ + -e 's/File ".*", line [[0-9]][[0-9]]*,/File , line ,/' \ ++-e '/\^\+/d' \ + stderr_log], [0], [dnl + 0 | module_0 | EMER | emergency + 1 | module_0 | ERR | error +-- +2.45.0 + diff --git a/SPECS/openvswitch/openvswitch.spec b/SPECS/openvswitch/openvswitch.spec index 65294b861ff..93420038476 100644 --- a/SPECS/openvswitch/openvswitch.spec +++ b/SPECS/openvswitch/openvswitch.spec @@ -20,7 +20,7 @@ Summary: Open vSwitch daemon/database/utilities Name: openvswitch Version: 3.3.0 -Release: 1%{?dist} +Release: 2%{?dist} License: ASL 2.0 AND LGPLv2+ AND SISSL Vendor: Microsoft Corporation Distribution: Azure Linux @@ -32,6 +32,8 @@ Source1: openvswitch.sysusers # ovs-patches # OVS (including OVN) backports (0 - 300) +Patch0: 0001-tests-Fix-SSL-db-implementation-test-with-openssl-3..patch +Patch10: 0001-tests-Fix-compatibility-issue-with-Python-3.13-in-vl.patch BuildRequires: gcc gcc-c++ make BuildRequires: autoconf automake libtool @@ -394,7 +396,7 @@ fi %{_sbindir}/ovs-bugtool %{_mandir}/man8/ovs-dpctl-top.8* %{_mandir}/man8/ovs-bugtool.8* -%doc LICENSE +%license LICENSE %files test %{_bindir}/ovs-pcap @@ -495,13 +497,18 @@ fi %{_mandir}/man8/ovs-vswitchd.8* %{_mandir}/man8/ovs-parse-backtrace.8* %{_udevrulesdir}/91-vfio.rules -%doc LICENSE NOTICE README.rst NEWS rhel/README.RHEL.rst +%license LICENSE NOTICE +%doc README.rst NEWS rhel/README.RHEL.rst /var/lib/openvswitch %attr(750,openvswitch,openvswitch) %verify(not owner group) /var/log/openvswitch %ghost %attr(755,root,root) %verify(not owner group) %{_rundir}/openvswitch %{_sysusersdir}/openvswitch.conf %changelog +* Thu Jan 08 2026 Tobias Brick - 3.3.0-2 +- Add patches from fedora f40 to fix tests with new versions of openssl and python. +- Update to use correct locations for license files. + * Wed Feb 21 2024 Thien Trung Vuong - 3.3.0-1 - Update to version 3.3.0 diff --git a/SPECS/perftest/perftest.signatures.json b/SPECS/perftest/perftest.signatures.json index 30096a64469..23f91967b09 100644 --- a/SPECS/perftest/perftest.signatures.json +++ b/SPECS/perftest/perftest.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "perftest-24.10.0.tar.gz": "e81308122e667361ddaac9efb5af7fbfdf177df92bc19587291a8ea78322f239" + "perftest-25.07.0-0.104.g0c03534.tar.gz": "4e402c97aadd0305622245f750e808b1a4ba7a004ce7cc4dc834e81ec112628f" } } diff --git a/SPECS/perftest/perftest.spec b/SPECS/perftest/perftest.spec index be339ec9c87..4c60169058c 100644 --- a/SPECS/perftest/perftest.spec +++ b/SPECS/perftest/perftest.spec @@ -1,16 +1,19 @@ -%global extended_release 0.65.g9093bae -%global MLNX_OFED_VERSION 24.10-0.7.0.0 +%global extended_release 0.104.g0c03534 +%global MLNX_OFED_VERSION 25.07-0.9.7.1 Summary: IB Performance tests Name: perftest # Update extended_release with version updates -Version: 24.10.0 +Version: 25.07.0 Release: 1%{?dist} License: BSD or GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux Group: Productivity/Networking/Diagnostic URL: https://www.openfabrics.org -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/%{MLNX_OFED_VERSION}/SRPMS/%{name}-%{version}-%{extended_release}.tar.gz#/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}-%{extended_release}.tar.gz BuildRequires: libibumad-devel BuildRequires: libibverbs-devel BuildRequires: librdmacm-devel @@ -38,6 +41,10 @@ chmod -x runme %_mandir/man1/*.1* %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07.0-1 +- Upgrade version to 25.07.0. +- Update source path + * Wed Jan 08 2025 Alberto David Perez Guevara - 24.10.0-1 - Upgrade version to 24.10.0 diff --git a/SPECS/pgbouncer/pgbouncer.signatures.json b/SPECS/pgbouncer/pgbouncer.signatures.json index bb8cc45a1dd..e3853c1a3ab 100644 --- a/SPECS/pgbouncer/pgbouncer.signatures.json +++ b/SPECS/pgbouncer/pgbouncer.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { "pgbouncer.service": "9c158af014827b4b96577caacce1d5fbf1e186ebb481c96f4f071a0f05425fe1", - "pgbouncer-1.24.1.tar.gz": "da72a3aba13072876d055a3e58dd4aba4a5de4ed6148e73033185245598fd3e0" + "pgbouncer-1.25.1.tar.gz": "6e566ae92fe3ef7f6a1b9e26d6049f7d7ca39c40e29e7b38f6d5500ae15d8465" } } diff --git a/SPECS/pgbouncer/pgbouncer.spec b/SPECS/pgbouncer/pgbouncer.spec index 1f86faceabd..36e5e482c58 100644 --- a/SPECS/pgbouncer/pgbouncer.spec +++ b/SPECS/pgbouncer/pgbouncer.spec @@ -1,6 +1,6 @@ Summary: Connection pooler for PostgreSQL. Name: pgbouncer -Version: 1.24.1 +Version: 1.25.1 Release: 1%{?dist} License: ISC License URL: https://www.pgbouncer.org/ @@ -22,6 +22,9 @@ Pgbouncer is a light-weight, robust connection pooler for PostgreSQL. %prep %setup +#Prevent the installation of manpages since it depends on the pandoc package +sed -i 's|dist_man_MANS = doc/pgbouncer.1 doc/pgbouncer.5|dist_man_MANS =|' Makefile + %build %configure --datadir=%{_datadir} @@ -75,11 +78,12 @@ fi %{_bindir}/* /etc/systemd/system/%{name}.service %config(noreplace) %{_sysconfdir}/%{name}.ini -%{_mandir}/man1/%{name}.* -%{_mandir}/man5/%{name}.* /usr/share/doc/pgbouncer/* %changelog +* Thu Dec 04 2025 CBL-Mariner Servicing Account - 1.25.1-1 +- Auto-upgrade to 1.25.1 - for CVE-2025-12819 + * Tue Apr 22 2025 CBL-Mariner Servicing Account - 1.24.1-1 - Auto-upgrade to 1.24.1 - bump version to fix CVE-2025-2291 diff --git a/SPECS/php/php.signatures.json b/SPECS/php/php.signatures.json index f47d317aef7..6d4045d8437 100644 --- a/SPECS/php/php.signatures.json +++ b/SPECS/php/php.signatures.json @@ -1,19 +1,19 @@ { - "Signatures": { - "10-opcache.ini": "6065beb2ace54d6cb5a8cde751330ea358bd23692073c6e3d2c57f7c97bec869", - "20-ffi.ini": "f5e968fdd3eca54f3dab2399e243931cf16cd9da034f0364800aefab222271c0", - "macros.php": "917104496e8239e1ed1d4812871be772a5fa8b38cf80c4c59ec3e0c36d48310e", - "nginx-fpm.conf": "5a222ab2c3fc0145cb67a1c5125471bbf097de304e77c9858e7077a3b4fcad59", - "nginx-php.conf": "b3b3f744c4c122302fcb11f39cac78d01cef15ee6f8bd67e98b3438efcf8dc95", - "opcache-default.blacklist": "4eef0875e1a0c6a75b8a2bafd4ddc029b83be74dd336a6a99214b0c32808cb38", - "php-8.3.23.tar.xz": "08be64700f703bca6ff1284bf1fdaffa37ae1b9734b6559f8350248e8960a6db", - "php-fpm-www.conf": "1cacdd4962c01a0a968933c38db503023940ad9105f021bdab85d6cdc46dcbb8", - "php-fpm.conf": "bb261d53b9b42bb163a7637bb373ffa18a20dddf27a3efe6cb5ed1b1cf5981a9", - "php-fpm.logrotate": "7d8279bebb9ffabc596a2699150e93d4ce4513245890b9b786d337288b19fa79", - "php-fpm.service": "574f50dec5a0edd60e60e44e7cc2d03575bc728bdc0b0cab021ce3c55abc0117", - "php-fpm.wants": "846297e91ba02bd0e29b6635eeddcca01a7ad4faf5a8f27113543804331d0328", - "php.conf": "e2388be032eccf7c0197d597ba72259a095bf8434438a184e6a640edb4b59de2", - "php.ini": "8fd5a4d891c19320c07010fbbbac982c886b422bc8d062acaeae49d70c136fc8", - "php.modconf": "dc7303ea584452d2f742d002a648abe74905025aabf240259c7e8bd01746d278" - } -} \ No newline at end of file + "Signatures": { + "10-opcache.ini": "6065beb2ace54d6cb5a8cde751330ea358bd23692073c6e3d2c57f7c97bec869", + "20-ffi.ini": "f5e968fdd3eca54f3dab2399e243931cf16cd9da034f0364800aefab222271c0", + "macros.php": "917104496e8239e1ed1d4812871be772a5fa8b38cf80c4c59ec3e0c36d48310e", + "nginx-fpm.conf": "5a222ab2c3fc0145cb67a1c5125471bbf097de304e77c9858e7077a3b4fcad59", + "nginx-php.conf": "b3b3f744c4c122302fcb11f39cac78d01cef15ee6f8bd67e98b3438efcf8dc95", + "opcache-default.blacklist": "4eef0875e1a0c6a75b8a2bafd4ddc029b83be74dd336a6a99214b0c32808cb38", + "php-fpm-www.conf": "1cacdd4962c01a0a968933c38db503023940ad9105f021bdab85d6cdc46dcbb8", + "php-fpm.conf": "bb261d53b9b42bb163a7637bb373ffa18a20dddf27a3efe6cb5ed1b1cf5981a9", + "php-fpm.logrotate": "7d8279bebb9ffabc596a2699150e93d4ce4513245890b9b786d337288b19fa79", + "php-fpm.service": "574f50dec5a0edd60e60e44e7cc2d03575bc728bdc0b0cab021ce3c55abc0117", + "php-fpm.wants": "846297e91ba02bd0e29b6635eeddcca01a7ad4faf5a8f27113543804331d0328", + "php.conf": "e2388be032eccf7c0197d597ba72259a095bf8434438a184e6a640edb4b59de2", + "php.ini": "8fd5a4d891c19320c07010fbbbac982c886b422bc8d062acaeae49d70c136fc8", + "php.modconf": "dc7303ea584452d2f742d002a648abe74905025aabf240259c7e8bd01746d278", + "php-8.3.29.tar.xz": "f7950ca034b15a78f5de9f1b22f4d9bad1dd497114d175cb1672a4ca78077af5" + } +} diff --git a/SPECS/php/php.spec b/SPECS/php/php.spec index 51f7b8f6f18..48b16be4dd5 100644 --- a/SPECS/php/php.spec +++ b/SPECS/php/php.spec @@ -32,7 +32,7 @@ %global with_qdbm 0 Summary: PHP scripting language for creating dynamic web sites Name: php -Version: 8.3.23 +Version: 8.3.29 Release: 1%{?dist} # All files licensed under PHP version 3.01, except # Zend is licensed under Zend @@ -1514,6 +1514,9 @@ systemctl try-restart php-fpm.service >/dev/null 2>&1 || : %dir %{_datadir}/php/preload %changelog +* Sun Dec 28 2025 CBL-Mariner Servicing Account - 8.3.29-1 +- Auto-upgrade to 8.3.29 - for CVE-2025-14177, CVE-2025-14178, CVE-2025-14180 + * Tue Jul 15 2025 Aninda Pradhan - 8.3.23-1 - Upgrade to 8.3.23 to fix CVE-2025-1735, CVE-2025-6491, CVE-2025-1220 - Fixed build issue by replacing php-8.0.0-embed.patch with php-8.3.20-embed.patch diff --git a/SPECS/python-filelock/python-filelock.signatures.json b/SPECS/python-filelock/python-filelock.signatures.json index 76b2ab838fc..9d4863d343e 100644 --- a/SPECS/python-filelock/python-filelock.signatures.json +++ b/SPECS/python-filelock/python-filelock.signatures.json @@ -1,5 +1,5 @@ { - "Signatures": { - "python-filelock-3.14.0.tar.gz": "6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a" - } + "Signatures": { + "python-filelock-3.20.3.tar.gz": "18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1" + } } diff --git a/SPECS/python-filelock/python-filelock.spec b/SPECS/python-filelock/python-filelock.spec index 15f76974f5f..8a4c96d361e 100644 --- a/SPECS/python-filelock/python-filelock.spec +++ b/SPECS/python-filelock/python-filelock.spec @@ -1,13 +1,14 @@ %global srcname filelock Summary: A platform independent file lock Name: python-filelock -Version: 3.14.0 +Version: 3.20.3 Release: 1%{?dist} License: Unlicense Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/toxdev/filelock Source0: https://files.pythonhosted.org/packages/source/f/%{srcname}/%{srcname}-%{version}.tar.gz#/%{name}-%{version}.tar.gz +Patch0: remove-python-3.14-classifier.patch BuildArch: noarch %description @@ -30,6 +31,7 @@ BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-setuptools_scm BuildRequires: python%{python3_pkgversion}-trove-classifiers %if %{with check} +BuildRequires: python%{python3_pkgversion}-pytest-asyncio BuildRequires: python%{python3_pkgversion}-pytest BuildRequires: python%{python3_pkgversion}-pytest-mock %endif @@ -55,14 +57,25 @@ the same lock object twice, it will not block. %pyproject_save_files %{srcname} %check +sed -i '/asyncio_default_fixture_loop_scope/d' pyproject.toml +sed -i '/verbosity_assertions/d' pyproject.toml pip3 install iniconfig -%pytest +pip3 install pytest-virtualenv +pip3 install pytest-timeout +pip3 install pytest-asyncio +%pytest -k "not test_mtime_zero_exit_branch" -v %files -n python%{python3_pkgversion}-%{srcname} -f %{pyproject_files} %doc README.md %license %{python3_sitelib}/%{srcname}-%{version}.dist-info/licenses/LICENSE %changelog +* Tue Jan 13 2026 CBL-Mariner Servicing Account - 3.20.3-1 +- Auto-upgrade to 3.20.3 - for CVE-2026-22701 + +* Tue Dec 23 2025 Archana Shettigar - 3.20.1-1 +- Auto-upgrade to 3.20.1 - for CVE-2025-68146 + * Fri Apr 26 2024 Osama Esmail - 3.14.0-1 - Lot of redoing to use pyproject - Removing 'docs' subpackage since the new src doesn't include that folder diff --git a/SPECS/python-filelock/remove-python-3.14-classifier.patch b/SPECS/python-filelock/remove-python-3.14-classifier.patch new file mode 100644 index 00000000000..d02ea061888 --- /dev/null +++ b/SPECS/python-filelock/remove-python-3.14-classifier.patch @@ -0,0 +1,13 @@ +diff --git a/pyproject.toml b/pyproject.toml +index 7b09dd5..568f3dd 100644 +--- a/pyproject.toml ++++ b/pyproject.toml +@@ -31,8 +31,6 @@ classifiers = [ + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +- "Programming Language :: Python :: 3.13", +- "Programming Language :: Python :: 3.14", + "Topic :: Internet", + "Topic :: Software Development :: Libraries", + "Topic :: System", diff --git a/SPECS/python-urllib3/CVE-2025-66418.patch b/SPECS/python-urllib3/CVE-2025-66418.patch new file mode 100644 index 00000000000..28a7ef4c999 --- /dev/null +++ b/SPECS/python-urllib3/CVE-2025-66418.patch @@ -0,0 +1,76 @@ +From 70238cb513ac67a51d431efa46acb4239659144a Mon Sep 17 00:00:00 2001 +From: Illia Volochii +Date: Fri, 5 Dec 2025 16:41:33 +0200 +Subject: [PATCH] Merge commit from fork + +* Add a hard-coded limit for the decompression chain + +* Reuse new list + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/urllib3/urllib3/commit/24d7b67eac89f94e11003424bcf0d8f7b72222a8.patch +--- + changelog/GHSA-gm62-xv2j-4w53.security.rst | 4 ++++ + src/urllib3/response.py | 12 +++++++++++- + test/test_response.py | 10 ++++++++++ + 3 files changed, 25 insertions(+), 1 deletion(-) + create mode 100644 changelog/GHSA-gm62-xv2j-4w53.security.rst + +diff --git a/changelog/GHSA-gm62-xv2j-4w53.security.rst b/changelog/GHSA-gm62-xv2j-4w53.security.rst +new file mode 100644 +index 0000000..6646eaa +--- /dev/null ++++ b/changelog/GHSA-gm62-xv2j-4w53.security.rst +@@ -0,0 +1,4 @@ ++Fixed a security issue where an attacker could compose an HTTP response with ++virtually unlimited links in the ``Content-Encoding`` header, potentially ++leading to a denial of service (DoS) attack by exhausting system resources ++during decoding. The number of allowed chained encodings is now limited to 5. +diff --git a/src/urllib3/response.py b/src/urllib3/response.py +index 12097ea..6156ee2 100644 +--- a/src/urllib3/response.py ++++ b/src/urllib3/response.py +@@ -192,8 +192,18 @@ class MultiDecoder(ContentDecoder): + they were applied. + """ + ++ # Maximum allowed number of chained HTTP encodings in the ++ # Content-Encoding header. ++ max_decode_links = 5 ++ + def __init__(self, modes: str) -> None: +- self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] ++ encodings = [m.strip() for m in modes.split(",")] ++ if len(encodings) > self.max_decode_links: ++ raise DecodeError( ++ "Too many content encodings in the chain: " ++ f"{len(encodings)} > {self.max_decode_links}" ++ ) ++ self._decoders = [_get_decoder(e) for e in encodings] + + def flush(self) -> bytes: + return self._decoders[0].flush() +diff --git a/test/test_response.py b/test/test_response.py +index c6d9d15..8be23c9 100644 +--- a/test/test_response.py ++++ b/test/test_response.py +@@ -434,6 +434,16 @@ class TestResponse: + assert r.read(9 * 37) == b"foobarbaz" * 37 + assert r.read() == b"" + ++ def test_read_multi_decoding_too_many_links(self) -> None: ++ fp = BytesIO(b"foo") ++ with pytest.raises( ++ DecodeError, match="Too many content encodings in the chain: 6 > 5" ++ ): ++ HTTPResponse( ++ fp, ++ headers={"content-encoding": "gzip, deflate, br, zstd, gzip, deflate"}, ++ ) ++ + def test_body_blob(self) -> None: + resp = HTTPResponse(b"foo") + assert resp.data == b"foo" +-- +2.45.4 + diff --git a/SPECS/python-urllib3/CVE-2025-66471.patch b/SPECS/python-urllib3/CVE-2025-66471.patch new file mode 100644 index 00000000000..89cb2cb0458 --- /dev/null +++ b/SPECS/python-urllib3/CVE-2025-66471.patch @@ -0,0 +1,829 @@ +From c19571de34c47de3a766541b041637ba5f716ed7 Mon Sep 17 00:00:00 2001 +From: Illia Volochii +Date: Fri, 5 Dec 2025 16:40:41 +0200 +Subject: [PATCH] Merge commit from fork + +Modified to apply to Azure Linux + +Upstream Patch Reference: https://github.com/urllib3/urllib3/commit/c19571de34c47de3a766541b041637ba5f716ed7.patch +--- + docs/advanced-usage.rst | 3 +- + docs/user-guide.rst | 4 +- + noxfile.py | 11 +- + pyproject.toml | 5 +- + src/urllib3/response.py | 260 +++++++++++++++++++++++++++++++++------ + test/test_response.py | 264 +++++++++++++++++++++++++++++++++++++++- + 6 files changed, 499 insertions(+), 48 deletions(-) + +diff --git a/docs/advanced-usage.rst b/docs/advanced-usage.rst +index 36a51e6..a12c714 100644 +--- a/docs/advanced-usage.rst ++++ b/docs/advanced-usage.rst +@@ -66,7 +66,8 @@ When using ``preload_content=True`` (the default setting) the + response body will be read immediately into memory and the HTTP connection + will be released back into the pool without manual intervention. + +-However, when dealing with large responses it's often better to stream the response ++However, when dealing with responses of large or unknown length, ++it's often better to stream the response + content using ``preload_content=False``. Setting ``preload_content`` to ``False`` means + that urllib3 will only read from the socket when data is requested. + +diff --git a/docs/user-guide.rst b/docs/user-guide.rst +index 9416fe1..b003793 100644 +--- a/docs/user-guide.rst ++++ b/docs/user-guide.rst +@@ -143,8 +143,8 @@ to a byte string representing the response content: + print(resp.data) + # b"\xaa\xa5H?\x95\xe9\x9b\x11" + +-.. note:: For larger responses, it's sometimes better to :ref:`stream ` +- the response. ++.. note:: For responses of large or unknown length, it's sometimes better to ++ :ref:`stream ` the response. + + Using io Wrappers with Response Content + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +diff --git a/noxfile.py b/noxfile.py +index 4573370..f8e99ce 100644 +--- a/noxfile.py ++++ b/noxfile.py +@@ -10,11 +10,14 @@ import nox + def tests_impl( + session: nox.Session, + extras: str = "socks,secure,brotli,zstd", ++ extra_dependencies: list[str] | None = None, + byte_string_comparisons: bool = True, + ) -> None: + # Install deps and the package itself. + session.install("-r", "dev-requirements.txt") + session.install(f".[{extras}]") ++ if extra_dependencies: ++ session.install(*extra_dependencies) + + # Show the pip version. + session.run("pip", "--version") +@@ -68,8 +71,12 @@ def test_brotlipy(session: nox.Session) -> None: + """Check that if 'brotlipy' is installed instead of 'brotli' or + 'brotlicffi' that we still don't blow up. + """ +- session.install("brotlipy") +- tests_impl(session, extras="socks,secure", byte_string_comparisons=False) ++ tests_impl( ++ session, ++ extras="socks,secure", ++ extra_dependencies=["brotlipy"], ++ byte_string_comparisons=False, ++ ) + + + def git_clone(session: nox.Session, git_url: str) -> None: +diff --git a/pyproject.toml b/pyproject.toml +index c8c75b9..99652de 100644 +--- a/pyproject.toml ++++ b/pyproject.toml +@@ -40,8 +40,8 @@ dynamic = ["version"] + + [project.optional-dependencies] + brotli = [ +- "brotli>=1.0.9; platform_python_implementation == 'CPython'", +- "brotlicffi>=0.8.0; platform_python_implementation != 'CPython'" ++ "brotli>=1.2.0; platform_python_implementation == 'CPython'", ++ "brotlicffi>=1.2.0.0; platform_python_implementation != 'CPython'" + ] + zstd = [ + "zstandard>=0.18.0", +@@ -98,6 +98,7 @@ filterwarnings = [ + '''default:ssl\.PROTOCOL_TLSv1_2 is deprecated:DeprecationWarning''', + '''default:unclosed .*:ResourceWarning''', + '''default:ssl NPN is deprecated, use ALPN instead:DeprecationWarning''', ++ '''default:Brotli >= 1.2.0 is required to prevent decompression bombs\.:urllib3.exceptions.DependencyWarning''' + ] + + [tool.isort] +diff --git a/src/urllib3/response.py b/src/urllib3/response.py +index 12097ea..2fc5546 100644 +--- a/src/urllib3/response.py ++++ b/src/urllib3/response.py +@@ -45,6 +45,7 @@ from .connection import BaseSSLError, HTTPConnection, HTTPException + from .exceptions import ( + BodyNotHttplibCompatible, + DecodeError, ++ DependencyWarning, + HTTPError, + IncompleteRead, + InvalidChunkLength, +@@ -66,7 +67,11 @@ log = logging.getLogger(__name__) + + + class ContentDecoder: +- def decompress(self, data: bytes) -> bytes: ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: ++ raise NotImplementedError() ++ ++ @property ++ def has_unconsumed_tail(self) -> bool: + raise NotImplementedError() + + def flush(self) -> bytes: +@@ -76,30 +81,57 @@ class ContentDecoder: + class DeflateDecoder(ContentDecoder): + def __init__(self) -> None: + self._first_try = True +- self._data = b"" ++ self._first_try_data = b"" ++ self._unfed_data = b"" + self._obj = zlib.decompressobj() + +- def decompress(self, data: bytes) -> bytes: +- if not data: ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: ++ data = self._unfed_data + data ++ self._unfed_data = b"" ++ if not data and not self._obj.unconsumed_tail: + return data ++ original_max_length = max_length ++ if original_max_length < 0: ++ max_length = 0 ++ elif original_max_length == 0: ++ # We should not pass 0 to the zlib decompressor because 0 is ++ # the default value that will make zlib decompress without a ++ # length limit. ++ # Data should be stored for subsequent calls. ++ self._unfed_data = data ++ return b"" + ++ # Subsequent calls always reuse `self._obj`. zlib requires ++ # passing the unconsumed tail if decompression is to continue. + if not self._first_try: +- return self._obj.decompress(data) ++ return self._obj.decompress( ++ self._obj.unconsumed_tail + data, max_length=max_length ++ ) + +- self._data += data ++ # First call tries with RFC 1950 ZLIB format. ++ self._first_try_data += data + try: +- decompressed = self._obj.decompress(data) ++ decompressed = self._obj.decompress(data, max_length=max_length) + if decompressed: + self._first_try = False +- self._data = None # type: ignore[assignment] ++ self._first_try_data = b"" + return decompressed ++ # On failure, it falls back to RFC 1951 DEFLATE format. + except zlib.error: + self._first_try = False + self._obj = zlib.decompressobj(-zlib.MAX_WBITS) + try: +- return self.decompress(self._data) ++ return self.decompress( ++ self._first_try_data, max_length=original_max_length ++ ) + finally: +- self._data = None # type: ignore[assignment] ++ self._first_try_data = b"" ++ ++ @property ++ def has_unconsumed_tail(self) -> bool: ++ return bool(self._unfed_data) or ( ++ bool(self._obj.unconsumed_tail) and not self._first_try ++ ) + + def flush(self) -> bytes: + return self._obj.flush() +@@ -115,27 +147,61 @@ class GzipDecoder(ContentDecoder): + def __init__(self) -> None: + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + self._state = GzipDecoderState.FIRST_MEMBER ++ self._unconsumed_tail = b"" + +- def decompress(self, data: bytes) -> bytes: ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: + ret = bytearray() +- if self._state == GzipDecoderState.SWALLOW_DATA or not data: ++ if self._state == GzipDecoderState.SWALLOW_DATA: + return bytes(ret) ++ ++ if max_length == 0: ++ # We should not pass 0 to the zlib decompressor because 0 is ++ # the default value that will make zlib decompress without a ++ # length limit. ++ # Data should be stored for subsequent calls. ++ self._unconsumed_tail += data ++ return b"" ++ ++ # zlib requires passing the unconsumed tail to the subsequent ++ # call if decompression is to continue. ++ data = self._unconsumed_tail + data ++ if not data and self._obj.eof: ++ return bytes(ret) ++ + while True: + try: +- ret += self._obj.decompress(data) ++ ret += self._obj.decompress( ++ data, max_length=max(max_length - len(ret), 0) ++ ) + except zlib.error: + previous_state = self._state + # Ignore data after the first error + self._state = GzipDecoderState.SWALLOW_DATA ++ self._unconsumed_tail = b"" + if previous_state == GzipDecoderState.OTHER_MEMBERS: + # Allow trailing garbage acceptable in other gzip clients + return bytes(ret) + raise +- data = self._obj.unused_data ++ ++ self._unconsumed_tail = data = ( ++ self._obj.unconsumed_tail or self._obj.unused_data ++ ) ++ if max_length > 0 and len(ret) >= max_length: ++ break ++ + if not data: + return bytes(ret) +- self._state = GzipDecoderState.OTHER_MEMBERS +- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) ++ # When the end of a gzip member is reached, a new decompressor ++ # must be created for unused (possibly future) data. ++ if self._obj.eof: ++ self._state = GzipDecoderState.OTHER_MEMBERS ++ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) ++ ++ return bytes(ret) ++ ++ @property ++ def has_unconsumed_tail(self) -> bool: ++ return bool(self._unconsumed_tail) + + def flush(self) -> bytes: + return self._obj.flush() +@@ -150,9 +216,35 @@ if brotli is not None: + def __init__(self) -> None: + self._obj = brotli.Decompressor() + if hasattr(self._obj, "decompress"): +- setattr(self, "decompress", self._obj.decompress) ++ setattr(self, "_decompress", self._obj.decompress) + else: +- setattr(self, "decompress", self._obj.process) ++ setattr(self, "_decompress", self._obj.process) ++ ++ # Requires Brotli >= 1.2.0 for `output_buffer_limit`. ++ def _decompress(self, data: bytes, output_buffer_limit: int = -1) -> bytes: ++ raise NotImplementedError() ++ ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: ++ try: ++ if max_length > 0: ++ return self._decompress(data, output_buffer_limit=max_length) ++ else: ++ return self._decompress(data) ++ except TypeError: ++ # Fallback for Brotli/brotlicffi/brotlipy versions without ++ # the `output_buffer_limit` parameter. ++ warnings.warn( ++ "Brotli >= 1.2.0 is required to prevent decompression bombs.", ++ DependencyWarning, ++ ) ++ return self._decompress(data) ++ ++ @property ++ def has_unconsumed_tail(self) -> bool: ++ try: ++ return not self._obj.can_accept_more_data() ++ except AttributeError: ++ return False + + def flush(self) -> bytes: + if hasattr(self._obj, "flush"): +@@ -166,16 +258,46 @@ if zstd is not None: + def __init__(self) -> None: + self._obj = zstd.ZstdDecompressor().decompressobj() + +- def decompress(self, data: bytes) -> bytes: +- if not data: ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: ++ if not data and not self.has_unconsumed_tail: + return b"" +- data_parts = [self._obj.decompress(data)] +- while self._obj.eof and self._obj.unused_data: ++ if self._obj.eof: ++ data = self._obj.unused_data + data ++ self._obj = zstd.ZstdDecompressor() ++ part = self._obj.decompress(data, max_length=max_length) ++ length = len(part) ++ data_parts = [part] ++ # Every loop iteration is supposed to read data from a separate frame. ++ # The loop breaks when: ++ # - enough data is read; ++ # - no more unused data is available; ++ # - end of the last read frame has not been reached (i.e., ++ # more data has to be fed). ++ while ( ++ self._obj.eof ++ and self._obj.unused_data ++ and (max_length < 0 or length < max_length) ++ ): + unused_data = self._obj.unused_data +- self._obj = zstd.ZstdDecompressor().decompressobj() +- data_parts.append(self._obj.decompress(unused_data)) ++ if not self._obj.needs_input: ++ self._obj = zstd.ZstdDecompressor() ++ part = self._obj.decompress( ++ unused_data, ++ max_length=(max_length - length) if max_length > 0 else -1, ++ ) ++ if part_length := len(part): ++ data_parts.append(part) ++ length += part_length ++ elif self._obj.needs_input: ++ break + return b"".join(data_parts) + ++ @property ++ def has_unconsumed_tail(self) -> bool: ++ return not (self._obj.needs_input or self._obj.eof) or bool( ++ self._obj.unused_data ++ ) ++ + def flush(self) -> bytes: + ret = self._obj.flush() # note: this is a no-op + if not self._obj.eof: +@@ -198,10 +320,35 @@ class MultiDecoder(ContentDecoder): + def flush(self) -> bytes: + return self._decoders[0].flush() + +- def decompress(self, data: bytes) -> bytes: +- for d in reversed(self._decoders): +- data = d.decompress(data) +- return data ++ def decompress(self, data: bytes, max_length: int = -1) -> bytes: ++ if max_length <= 0: ++ for d in reversed(self._decoders): ++ data = d.decompress(data) ++ return data ++ ++ ret = bytearray() ++ # Every while loop iteration goes through all decoders once. ++ # It exits when enough data is read or no more data can be read. ++ # It is possible that the while loop iteration does not produce ++ # any data because we retrieve up to `max_length` from every ++ # decoder, and the amount of bytes may be insufficient for the ++ # next decoder to produce enough/any output. ++ while True: ++ any_data = False ++ for d in reversed(self._decoders): ++ data = d.decompress(data, max_length=max_length - len(ret)) ++ if data: ++ any_data = True ++ # We should not break when no data is returned because ++ # next decoders may produce data even with empty input. ++ ret += data ++ if not any_data or len(ret) >= max_length: ++ return bytes(ret) ++ data = b"" ++ ++ @property ++ def has_unconsumed_tail(self) -> bool: ++ return any(d.has_unconsumed_tail for d in self._decoders) + + + def _get_decoder(mode: str) -> ContentDecoder: +@@ -232,9 +379,6 @@ class BytesQueueBuffer: + + * self.buffer, which contains the full data + * the largest chunk that we will copy in get() +- +- The worst case scenario is a single chunk, in which case we'll make a full copy of +- the data inside get(). + """ + + def __init__(self) -> None: +@@ -256,6 +400,10 @@ class BytesQueueBuffer: + elif n < 0: + raise ValueError("n should be > 0") + ++ if len(self.buffer[0]) == n and isinstance(self.buffer[0], bytes): ++ self._size -= n ++ return self.buffer.popleft() ++ + fetched = 0 + ret = io.BytesIO() + while fetched < n: +@@ -427,7 +575,11 @@ class BaseHTTPResponse(io.IOBase): + self._decoder = _get_decoder(content_encoding) + + def _decode( +- self, data: bytes, decode_content: bool | None, flush_decoder: bool ++ self, ++ data: bytes, ++ decode_content: bool | None, ++ flush_decoder: bool, ++ max_length: int | None = None, + ) -> bytes: + """ + Decode the data passed in and potentially flush the decoder. +@@ -440,9 +592,12 @@ class BaseHTTPResponse(io.IOBase): + ) + return data + ++ if max_length is None or flush_decoder: ++ max_length = -1 ++ + try: + if self._decoder: +- data = self._decoder.decompress(data) ++ data = self._decoder.decompress(data, max_length=max_length) + self._has_decoded_content = True + except self.DECODER_ERROR_CLASSES as e: + content_encoding = self.headers.get("content-encoding", "").lower() +@@ -873,6 +1028,14 @@ class HTTPResponse(BaseHTTPResponse): + if amt is not None: + cache_content = False + ++ if self._decoder and self._decoder.has_unconsumed_tail: ++ decoded_data = self._decode( ++ b"", ++ decode_content, ++ flush_decoder=False, ++ max_length=amt - len(self._decoded_buffer), ++ ) ++ self._decoded_buffer.put(decoded_data) + if len(self._decoded_buffer) >= amt: + return self._decoded_buffer.get(amt) + +@@ -880,7 +1043,11 @@ class HTTPResponse(BaseHTTPResponse): + + flush_decoder = amt is None or (amt != 0 and not data) + +- if not data and len(self._decoded_buffer) == 0: ++ if ( ++ not data ++ and len(self._decoded_buffer) == 0 ++ and not (self._decoder and self._decoder.has_unconsumed_tail) ++ ): + return data + + if amt is None: +@@ -897,7 +1064,12 @@ class HTTPResponse(BaseHTTPResponse): + ) + return data + +- decoded_data = self._decode(data, decode_content, flush_decoder) ++ decoded_data = self._decode( ++ data, ++ decode_content, ++ flush_decoder, ++ max_length=amt - len(self._decoded_buffer), ++ ) + self._decoded_buffer.put(decoded_data) + + while len(self._decoded_buffer) < amt and data: +@@ -905,7 +1077,12 @@ class HTTPResponse(BaseHTTPResponse): + # For example, the GZ file header takes 10 bytes, we don't want to read + # it one byte at a time + data = self._raw_read(amt) +- decoded_data = self._decode(data, decode_content, flush_decoder) ++ decoded_data = self._decode( ++ data, ++ decode_content, ++ flush_decoder, ++ max_length=amt - len(self._decoded_buffer), ++ ) + self._decoded_buffer.put(decoded_data) + data = self._decoded_buffer.get(amt) + +@@ -932,7 +1109,11 @@ class HTTPResponse(BaseHTTPResponse): + if self.chunked and self.supports_chunked_reads(): + yield from self.read_chunked(amt, decode_content=decode_content) + else: +- while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0: ++ while ( ++ not is_fp_closed(self._fp) ++ or len(self._decoded_buffer) > 0 ++ or (self._decoder and self._decoder.has_unconsumed_tail) ++ ): + data = self.read(amt=amt, decode_content=decode_content) + + if data: +@@ -1075,7 +1256,10 @@ class HTTPResponse(BaseHTTPResponse): + break + chunk = self._handle_chunk(amt) + decoded = self._decode( +- chunk, decode_content=decode_content, flush_decoder=False ++ chunk, ++ decode_content=decode_content, ++ flush_decoder=False, ++ max_length=amt, + ) + if decoded: + yield decoded +diff --git a/test/test_response.py b/test/test_response.py +index c6d9d15..ab9a254 100644 +--- a/test/test_response.py ++++ b/test/test_response.py +@@ -1,6 +1,7 @@ + from __future__ import annotations + + import contextlib ++import gzip + import http.client as httplib + import socket + import ssl +@@ -36,6 +37,32 @@ from urllib3.response import ( # type: ignore[attr-defined] + from urllib3.util.response import is_fp_closed + from urllib3.util.retry import RequestHistory, Retry + ++def zstd_compress(data: bytes) -> bytes: ++ if sys.version_info >= (3, 14): ++ from compression import zstd ++ else: ++ from backports import zstd ++ return zstd.compress(data) ++ ++ ++def deflate2_compress(data: bytes) -> bytes: ++ compressor = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS) ++ return compressor.compress(data) + compressor.flush() ++ ++ ++if brotli: ++ try: ++ brotli.Decompressor().process(b"", output_buffer_limit=1024) ++ _brotli_gte_1_2_0_available = True ++ except (AttributeError, TypeError): ++ _brotli_gte_1_2_0_available = False ++else: ++ _brotli_gte_1_2_0_available = False ++try: ++ zstd_compress(b"") ++ _zstd_available = True ++except ModuleNotFoundError: ++ _zstd_available = False + + class TestBytesQueueBuffer: + def test_single_chunk(self) -> None: +@@ -333,7 +360,26 @@ class TestResponse: + assert r.data == b"foo" + + @onlyZstd() +- def test_decode_multiframe_zstd(self) -> None: ++ @pytest.mark.parametrize( ++ "read_amt", ++ ( ++ # Read all data at once. ++ None, ++ # Read one byte at a time, data of frames will be returned ++ # separately. ++ 1, ++ # Read two bytes at a time, the second read should return ++ # data from both frames. ++ 2, ++ # Read three bytes at a time, the whole frames will be ++ # returned separately in two calls. ++ 3, ++ # Read four bytes at a time, the first read should return ++ # data from the first frame and a part of the second frame. ++ 4, ++ ), ++ ) ++ def test_decode_multiframe_zstd(self, read_amt: int | None) -> None: + data = ( + # Zstandard frame + zstd.compress(b"foo") +@@ -348,8 +394,57 @@ class TestResponse: + ) + + fp = BytesIO(data) +- r = HTTPResponse(fp, headers={"content-encoding": "zstd"}) +- assert r.data == b"foobar" ++ result = bytearray() ++ r = HTTPResponse( ++ fp, headers={"content-encoding": "zstd"}, preload_content=False ++ ) ++ total_length = 6 ++ while len(result) < total_length: ++ chunk = r.read(read_amt, decode_content=True) ++ if read_amt is None: ++ assert len(chunk) == total_length ++ else: ++ assert len(chunk) == min(read_amt, total_length - len(result)) ++ result += chunk ++ assert bytes(result) == b"foobar" ++ ++ @onlyZstd() ++ def test_decode_multiframe_zstd_with_max_length_close_to_compressed_data_size( ++ self, ++ ) -> None: ++ """ ++ Test decoding when the first read from the socket returns all ++ the compressed frames, but then it has to be decompressed in a ++ couple of read calls. ++ """ ++ data = ( ++ # Zstandard frame ++ zstd_compress(b"x" * 1024) ++ # skippable frame (must be ignored) ++ + bytes.fromhex( ++ "50 2A 4D 18" # Magic_Number (little-endian) ++ "07 00 00 00" # Frame_Size (little-endian) ++ "00 00 00 00 00 00 00" # User_Data ++ ) ++ # Zstandard frame ++ + zstd_compress(b"y" * 1024) ++ ) ++ ++ fp = BytesIO(data) ++ r = HTTPResponse( ++ fp, headers={"content-encoding": "zstd"}, preload_content=False ++ ) ++ # Read the whole first frame. ++ assert r.read(1024) == b"x" * 1024 ++ assert len(r._decoded_buffer) == 0 ++ # Read the whole second frame in two reads. ++ assert r.read(512) == b"y" * 512 ++ assert len(r._decoded_buffer) == 0 ++ assert r.read(512) == b"y" * 512 ++ assert len(r._decoded_buffer) == 0 ++ # Ensure no more data is left. ++ assert r.read() == b"" ++ assert len(r._decoded_buffer) == 0 + + @onlyZstd() + def test_chunked_decoding_zstd(self) -> None: +@@ -385,6 +480,169 @@ class TestResponse: + with pytest.raises(DecodeError): + HTTPResponse(fp, headers={"content-encoding": "zstd"}) + ++ _test_compressor_params: list[ ++ tuple[str, tuple[str, typing.Callable[[bytes], bytes]] | None] ++ ] = [ ++ ("deflate1", ("deflate", zlib.compress)), ++ ("deflate2", ("deflate", deflate2_compress)), ++ ("gzip", ("gzip", gzip.compress)), ++ ] ++ if _brotli_gte_1_2_0_available: ++ _test_compressor_params.append(("brotli", ("br", brotli.compress))) ++ else: ++ _test_compressor_params.append(("brotli", None)) ++ if _zstd_available: ++ _test_compressor_params.append(("zstd", ("zstd", zstd_compress))) ++ else: ++ _test_compressor_params.append(("zstd", None)) ++ ++ @pytest.mark.parametrize("read_method", ("read",)) ++ @pytest.mark.parametrize( ++ "data", ++ [d[1] for d in _test_compressor_params], ++ ids=[d[0] for d in _test_compressor_params], ++ ) ++ def test_read_with_all_data_already_in_decompressor( ++ self, ++ request: pytest.FixtureRequest, ++ read_method: str, ++ data: tuple[str, typing.Callable[[bytes], bytes]] | None, ++ ) -> None: ++ if data is None: ++ pytest.skip(f"Proper {request.node.callspec.id} decoder is not available") ++ original_data = b"bar" * 1000 ++ name, compress_func = data ++ compressed_data = compress_func(original_data) ++ fp = mock.Mock(read=mock.Mock(return_value=b"")) ++ r = HTTPResponse(fp, headers={"content-encoding": name}, preload_content=False) ++ # Put all data in the decompressor's buffer. ++ r._init_decoder() ++ assert r._decoder is not None # for mypy ++ decoded = r._decoder.decompress(compressed_data, max_length=0) ++ if name == "br": ++ # It's known that some Brotli libraries do not respect ++ # `max_length`. ++ r._decoded_buffer.put(decoded) ++ else: ++ assert decoded == b"" ++ # Read the data via `HTTPResponse`. ++ read = getattr(r, read_method) ++ assert read(0) == b"" ++ assert read(2500) == original_data[:2500] ++ assert read(500) == original_data[2500:] ++ assert read(0) == b"" ++ assert read() == b"" ++ ++ @pytest.mark.parametrize( ++ "delta", ++ ( ++ 0, # First read from socket returns all compressed data. ++ -1, # First read from socket returns all but one byte of compressed data. ++ ), ++ ) ++ @pytest.mark.parametrize("read_method", ("read",)) ++ @pytest.mark.parametrize( ++ "data", ++ [d[1] for d in _test_compressor_params], ++ ids=[d[0] for d in _test_compressor_params], ++ ) ++ def test_decode_with_max_length_close_to_compressed_data_size( ++ self, ++ request: pytest.FixtureRequest, ++ delta: int, ++ read_method: str, ++ data: tuple[str, typing.Callable[[bytes], bytes]] | None, ++ ) -> None: ++ """ ++ Test decoding when the first read from the socket returns all or ++ almost all the compressed data, but then it has to be ++ decompressed in a couple of read calls. ++ """ ++ if data is None: ++ pytest.skip(f"Proper {request.node.callspec.id} decoder is not available") ++ ++ original_data = b"foo" * 1000 ++ name, compress_func = data ++ compressed_data = compress_func(original_data) ++ fp = BytesIO(compressed_data) ++ r = HTTPResponse(fp, headers={"content-encoding": name}, preload_content=False) ++ initial_limit = len(compressed_data) + delta ++ read = getattr(r, read_method) ++ initial_chunk = read(amt=initial_limit, decode_content=True) ++ assert len(initial_chunk) == initial_limit ++ assert ( ++ len(read(amt=len(original_data), decode_content=True)) ++ == len(original_data) - initial_limit ++ ) ++ ++ # Prepare 50 MB of compressed data outside of the test measuring ++ # memory usage. ++ _test_memory_usage_decode_with_max_length_params: list[ ++ tuple[str, tuple[str, bytes] | None] ++ ] = [ ++ ( ++ params[0], ++ (params[1][0], params[1][1](b"A" * (50 * 2**20))) if params[1] else None, ++ ) ++ for params in _test_compressor_params ++ ] ++ ++ @pytest.mark.parametrize( ++ "data", ++ [d[1] for d in _test_memory_usage_decode_with_max_length_params], ++ ids=[d[0] for d in _test_memory_usage_decode_with_max_length_params], ++ ) ++ @pytest.mark.parametrize("read_method", ("read", "read_chunked", "stream")) ++ # Decoders consume different amounts of memory during decompression. ++ # We set the 10 MB limit to ensure that the whole decompressed data ++ # is not stored unnecessarily. ++ # ++ # FYI, the following consumption was observed for the test with ++ # `read` on CPython 3.14.0: ++ # - deflate: 2.3 MiB ++ # - deflate2: 2.1 MiB ++ # - gzip: 2.1 MiB ++ # - brotli: ++ # - brotli v1.2.0: 9 MiB ++ # - brotlicffi v1.2.0.0: 6 MiB ++ # - brotlipy v0.7.0: 105.8 MiB ++ # - zstd: 4.5 MiB ++ @pytest.mark.limit_memory("10 MB", current_thread_only=True) ++ def test_memory_usage_decode_with_max_length( ++ self, ++ request: pytest.FixtureRequest, ++ read_method: str, ++ data: tuple[str, bytes] | None, ++ ) -> None: ++ if data is None: ++ pytest.skip(f"Proper {request.node.callspec.id} decoder is not available") ++ ++ name, compressed_data = data ++ limit = 1024 * 1024 # 1 MiB ++ if read_method in ("read_chunked", "stream"): ++ httplib_r = httplib.HTTPResponse(MockSock) # type: ignore[arg-type] ++ httplib_r.fp = MockChunkedEncodingResponse([compressed_data]) # type: ignore[assignment] ++ r = HTTPResponse( ++ httplib_r, ++ preload_content=False, ++ headers={"transfer-encoding": "chunked", "content-encoding": name}, ++ ) ++ next(getattr(r, read_method)(amt=limit, decode_content=True)) ++ else: ++ fp = BytesIO(compressed_data) ++ r = HTTPResponse( ++ fp, headers={"content-encoding": name}, preload_content=False ++ ) ++ getattr(r, read_method)(amt=limit, decode_content=True) ++ ++ # Check that the internal decoded buffer is empty unless brotli ++ # is used. ++ # Google's brotli library does not fully respect the output ++ # buffer limit: https://github.com/google/brotli/issues/1396 ++ # And unmaintained brotlipy cannot limit the output buffer size. ++ if name != "br" or brotli.__name__ == "brotlicffi": ++ assert len(r._decoded_buffer) == 0 ++ + def test_multi_decoding_deflate_deflate(self) -> None: + data = zlib.compress(zlib.compress(b"foo")) + +-- +2.43.0 + diff --git a/SPECS/python-urllib3/CVE-2026-21441.patch b/SPECS/python-urllib3/CVE-2026-21441.patch new file mode 100644 index 00000000000..2daeb19a47f --- /dev/null +++ b/SPECS/python-urllib3/CVE-2026-21441.patch @@ -0,0 +1,89 @@ +From 8864ac407bba8607950025e0979c4c69bc7abc7b Mon Sep 17 00:00:00 2001 +From: Illia Volochii +Date: Wed, 7 Jan 2026 18:07:30 +0200 +Subject: [PATCH] Merge commit from fork + +* Stop decoding response content during redirects needlessly + +* Rename the new query parameter + +* Add a changelog entry + +Upstream patch Reference: https://github.com/urllib3/urllib3/commit/8864ac407bba8607950025e0979c4c69bc7abc7b.patch +--- + dummyserver/handlers.py | 9 ++++++++- + src/urllib3/response.py | 6 +++++- + test/with_dummyserver/test_connectionpool.py | 19 +++++++++++++++++++ + 3 files changed, 32 insertions(+), 2 deletions(-) + +diff --git a/dummyserver/handlers.py b/dummyserver/handlers.py +index 86201a1..2bef080 100644 +--- a/dummyserver/handlers.py ++++ b/dummyserver/handlers.py +@@ -205,8 +205,15 @@ class TestingApp(RequestHandler): + if len(status) == 3: + status = f"{status} Redirect" + ++ compressed = params.get("compressed") == b"true" ++ + headers = [("Location", target)] +- return Response(status=status, headers=headers) ++ if compressed: ++ headers.append(("Content-Encoding", "gzip")) ++ data = gzip.compress(b"foo") ++ else: ++ data = b"" ++ return Response(body=data, status=status, headers=headers) + + def not_found(self, request: httputil.HTTPServerRequest) -> Response: + return Response("Not found", status="404 Not Found") +diff --git a/src/urllib3/response.py b/src/urllib3/response.py +index a06ecfb..6ddcbd6 100644 +--- a/src/urllib3/response.py ++++ b/src/urllib3/response.py +@@ -773,7 +773,11 @@ class HTTPResponse(BaseHTTPResponse): + Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. + """ + try: +- self.read() ++ self.read( ++ # Do not spend resources decoding the content unless ++ # decoding has already been initiated. ++ decode_content=self._has_decoded_content, ++ ) + except (HTTPError, OSError, BaseSSLError, HTTPException): + pass + +diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py +index ebfaf38..4f82136 100644 +--- a/test/with_dummyserver/test_connectionpool.py ++++ b/test/with_dummyserver/test_connectionpool.py +@@ -480,6 +480,25 @@ class TestConnectionPool(HTTPDummyServerTestCase): + assert r.status == 200 + assert r.data == b"Dummy server!" + ++ @mock.patch("urllib3.response.GzipDecoder.decompress") ++ def test_no_decoding_with_redirect_when_preload_disabled( ++ self, gzip_decompress: mock.MagicMock ++ ) -> None: ++ """ ++ Test that urllib3 does not attempt to decode a gzipped redirect ++ response when `preload_content` is set to `False`. ++ """ ++ with HTTPConnectionPool(self.host, self.port) as pool: ++ # Three requests are expected: two redirects and one final / 200 OK. ++ response = pool.request( ++ "GET", ++ "/redirect", ++ fields={"target": "/redirect?compressed=true", "compressed": "true"}, ++ preload_content=False, ++ ) ++ assert response.status == 200 ++ gzip_decompress.assert_not_called() ++ + def test_303_redirect_makes_request_lose_body(self) -> None: + with HTTPConnectionPool(self.host, self.port) as pool: + response = pool.request( +-- +2.43.0 + diff --git a/SPECS/python-urllib3/python-urllib3.spec b/SPECS/python-urllib3/python-urllib3.spec index 95048bf773e..25331f6cbdd 100644 --- a/SPECS/python-urllib3/python-urllib3.spec +++ b/SPECS/python-urllib3/python-urllib3.spec @@ -1,7 +1,7 @@ Summary: A powerful, sanity-friendly HTTP client for Python. Name: python-urllib3 Version: 2.0.7 -Release: 2%{?dist} +Release: 4%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -13,6 +13,9 @@ Patch0: urllib3_test_recent_date.patch Patch1: change-backend-to-flit_core.patch Patch2: CVE-2024-37891.patch Patch3: CVE-2025-50181.patch +Patch4: CVE-2025-66418.patch +Patch5: CVE-2025-66471.patch +Patch6: CVE-2026-21441.patch %description A powerful, sanity-friendly HTTP client for Python. @@ -84,6 +87,12 @@ skiplist+=" or test_respect_retry_after_header_sleep" %{python3_sitelib}/* %changelog +* Fri Jan 09 2026 Azure Linux Security Servicing Account - 2.0.7-4 +- Patch for CVE-2026-21441 + +* Wed Dec 10 2025 Azure Linux Security Servicing Account - 2.0.7-3 +- Patch for CVE-2025-66418, CVE-2025-66471 + * Tue Jun 24 2025 Durga Jagadeesh Palli - 2.0.7-2 - add patch for CVE-2025-50181 diff --git a/SPECS/python-wheel/Use-vendored-packaging-to-canonicalize-requirements.patch b/SPECS/python-wheel/Use-vendored-packaging-to-canonicalize-requirements.patch new file mode 100644 index 00000000000..7d7e37d91e3 --- /dev/null +++ b/SPECS/python-wheel/Use-vendored-packaging-to-canonicalize-requirements.patch @@ -0,0 +1,38 @@ +From c35c77748f7ed54c0deee7dcf874a7acb4856008 Mon Sep 17 00:00:00 2001 +From: Archana Shettigar +Date: Tue, 3 Feb 2026 12:59:16 +0530 +Subject: [PATCH] Use vendored packaging to canonicalize requirements +Upstream Reference Patch: https://github.com/pypa/wheel/commit/4ec2ae368bb30b0a92617824f833ae615aca18cf + +--- + tests/test_metadata.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/tests/test_metadata.py b/tests/test_metadata.py +index db0ab0c..3719c6f 100644 +--- a/tests/test_metadata.py ++++ b/tests/test_metadata.py +@@ -12,9 +12,9 @@ def test_pkginfo_to_metadata(tmp_path: Path) -> None: + ("Metadata-Version", "2.1"), + ("Name", "spam"), + ("Version", "0.1"), +- ("Requires-Dist", "pip @ https://github.com/pypa/pip/archive/1.3.1.zip"), ++ ("Requires-Dist", "pip@ https://github.com/pypa/pip/archive/1.3.1.zip"), + ("Requires-Dist", 'pywin32; sys_platform == "win32"'), +- ("Requires-Dist", 'foo @ http://host/foo.zip ; sys_platform == "win32"'), ++ ("Requires-Dist", 'foo@ http://host/foo.zip ; sys_platform == "win32"'), + ("Provides-Extra", "signatures"), + ( + "Requires-Dist", +@@ -22,7 +22,7 @@ def test_pkginfo_to_metadata(tmp_path: Path) -> None: + ), + ("Provides-Extra", "empty_extra"), + ("Provides-Extra", "extra"), +- ("Requires-Dist", 'bar @ http://host/bar.zip ; extra == "extra"'), ++ ("Requires-Dist", 'bar@ http://host/bar.zip ; extra == "extra"'), + ("Provides-Extra", "faster-signatures"), + ("Requires-Dist", 'ed25519ll; extra == "faster-signatures"'), + ("Provides-Extra", "rest"), +-- +2.45.4 + diff --git a/SPECS/python-wheel/python-wheel.signatures.json b/SPECS/python-wheel/python-wheel.signatures.json index 204e84b55c9..e37c83f8aa1 100644 --- a/SPECS/python-wheel/python-wheel.signatures.json +++ b/SPECS/python-wheel/python-wheel.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "wheel-0.43.0.tar.gz": "23060d7cc8afafc2930554624b4bae7d58031830672048622c926675ab91e3b0" + "wheel-0.46.3.tar.gz": "36327d3bba035d9c3509421a42b59914fe9aab79d894b21cb9be17353abf6d2c" } -} \ No newline at end of file +} diff --git a/SPECS/python-wheel/python-wheel.spec b/SPECS/python-wheel/python-wheel.spec index c6880ac23ce..2f212f5516d 100644 --- a/SPECS/python-wheel/python-wheel.spec +++ b/SPECS/python-wheel/python-wheel.spec @@ -1,15 +1,17 @@ # The function of bootstrap is that it disables the wheel subpackage %bcond_with bootstrap +%global pypi_name wheel %bcond main_python 1 Summary: Built-package format for Python Name: python-%{pypi_name} -Version: 0.43.0 +Version: 0.46.3 Release: 1%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/pypa/wheel Source0: %{url}/archive/%{version}/%{pypi_name}-%{version}.tar.gz +Patch0: Use-vendored-packaging-to-canonicalize-requirements.patch %global pypi_name wheel %global python_wheel_name %{pypi_name}-%{version}-py3-none-any.whl %global python_wheeldir %{_datadir}/python-wheels @@ -58,6 +60,9 @@ A Python wheel of wheel to use with virtualenv. %prep %autosetup -n %{pypi_name}-%{version} -p1 +# flit_core expects [project].license to be a table/dict, not a string +sed -i 's/^license = "MIT"$/license = { text = "MIT" }/' pyproject.toml + %generate_buildrequires %pyproject_buildrequires @@ -115,6 +120,9 @@ pip3 install iniconfig %endif %changelog +* Wed Jan 28 2026 Azure Linux Security Servicing Account - 0.46.3-1 +- Updated to 0.46.3 to fix CVE-2026-24049 + * Fri May 10 2024 Betty Lakes - 0.43.0-1 - Updated to 0.43.0 diff --git a/SPECS/python3/CVE-2025-11468.patch b/SPECS/python3/CVE-2025-11468.patch new file mode 100644 index 00000000000..30bab5fabb7 --- /dev/null +++ b/SPECS/python3/CVE-2025-11468.patch @@ -0,0 +1,114 @@ +From e3c17d8ad56afeb3579052e5aebc6aa5ae115eef Mon Sep 17 00:00:00 2001 +From: Seth Michael Larson +Date: Mon, 19 Jan 2026 06:38:22 -0600 +Subject: [PATCH] gh-143935: Email preserve parens when folding comments + (GH-143936) + +Fix a bug in the folding of comments when flattening an email message +using a modern email policy. Comments consisting of a very long sequence of +non-foldable characters could trigger a forced line wrap that omitted the +required leading space on the continuation line, causing the remainder of +the comment to be interpreted as a new header field. This enabled header +injection with carefully crafted inputs. +(cherry picked from commit 17d1490aa97bd6b98a42b1a9b324ead84e7fd8a2) + +Co-authored-by: Seth Michael Larson +Co-authored-by: Denis Ledoux +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/144036.patch +--- + Lib/email/_header_value_parser.py | 15 +++++++++++- + .../test_email/test__header_value_parser.py | 23 +++++++++++++++++++ + ...-01-16-14-40-31.gh-issue-143935.U2YtKl.rst | 6 +++++ + 3 files changed, 43 insertions(+), 1 deletion(-) + create mode 100644 Misc/NEWS.d/next/Security/2026-01-16-14-40-31.gh-issue-143935.U2YtKl.rst + +diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py +index 3d845c0..4608f94 100644 +--- a/Lib/email/_header_value_parser.py ++++ b/Lib/email/_header_value_parser.py +@@ -101,6 +101,12 @@ def make_quoted_pairs(value): + return str(value).replace('\\', '\\\\').replace('"', '\\"') + + ++def make_parenthesis_pairs(value): ++ """Escape parenthesis and backslash for use within a comment.""" ++ return str(value).replace('\\', '\\\\') \ ++ .replace('(', '\\(').replace(')', '\\)') ++ ++ + def quote_string(value): + escaped = make_quoted_pairs(value) + return f'"{escaped}"' +@@ -933,7 +939,7 @@ class WhiteSpaceTerminal(Terminal): + return ' ' + + def startswith_fws(self): +- return True ++ return self and self[0] in WSP + + + class ValueTerminal(Terminal): +@@ -2922,6 +2928,13 @@ def _refold_parse_tree(parse_tree, *, policy): + [ValueTerminal(make_quoted_pairs(p), 'ptext') + for p in newparts] + + [ValueTerminal('"', 'ptext')]) ++ if part.token_type == 'comment': ++ newparts = ( ++ [ValueTerminal('(', 'ptext')] + ++ [ValueTerminal(make_parenthesis_pairs(p), 'ptext') ++ if p.token_type == 'ptext' else p ++ for p in newparts] + ++ [ValueTerminal(')', 'ptext')]) + if not part.as_ew_allowed: + wrap_as_ew_blocked += 1 + newparts.append(end_ew_not_allowed) +diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py +index efd1695..8ca170e 100644 +--- a/Lib/test/test_email/test__header_value_parser.py ++++ b/Lib/test/test_email/test__header_value_parser.py +@@ -3116,6 +3116,29 @@ class TestFolding(TestEmailBase): + with self.subTest(to=to): + self._test(parser.get_address_list(to)[0], folded, policy=policy) + ++ def test_address_list_with_long_unwrapable_comment(self): ++ policy = self.policy.clone(max_line_length=40) ++ cases = [ ++ # (to, folded) ++ ('(loremipsumdolorsitametconsecteturadipi)', ++ '(loremipsumdolorsitametconsecteturadipi)\n'), ++ ('(loremipsumdolorsitametconsecteturadipi)', ++ '(loremipsumdolorsitametconsecteturadipi)\n'), ++ ('(loremipsum dolorsitametconsecteturadipi)', ++ '(loremipsum dolorsitametconsecteturadipi)\n'), ++ ('(loremipsum dolorsitametconsecteturadipi)', ++ '(loremipsum\n dolorsitametconsecteturadipi)\n'), ++ ('(Escaped \\( \\) chars \\\\ in comments stay escaped)', ++ '(Escaped \\( \\) chars \\\\ in comments stay\n escaped)\n'), ++ ('((loremipsum)(loremipsum)(loremipsum)(loremipsum))', ++ '((loremipsum)(loremipsum)(loremipsum)(loremipsum))\n'), ++ ('((loremipsum)(loremipsum)(loremipsum) (loremipsum))', ++ '((loremipsum)(loremipsum)(loremipsum)\n (loremipsum))\n'), ++ ] ++ for (to, folded) in cases: ++ with self.subTest(to=to): ++ self._test(parser.get_address_list(to)[0], folded, policy=policy) ++ + # XXX Need tests with comments on various sides of a unicode token, + # and with unicode tokens in the comments. Spaces inside the quotes + # currently don't do the right thing. +diff --git a/Misc/NEWS.d/next/Security/2026-01-16-14-40-31.gh-issue-143935.U2YtKl.rst b/Misc/NEWS.d/next/Security/2026-01-16-14-40-31.gh-issue-143935.U2YtKl.rst +new file mode 100644 +index 0000000..c3d8649 +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2026-01-16-14-40-31.gh-issue-143935.U2YtKl.rst +@@ -0,0 +1,6 @@ ++Fixed a bug in the folding of comments when flattening an email message ++using a modern email policy. Comments consisting of a very long sequence of ++non-foldable characters could trigger a forced line wrap that omitted the ++required leading space on the continuation line, causing the remainder of ++the comment to be interpreted as a new header field. This enabled header ++injection with carefully crafted inputs. +-- +2.45.4 + diff --git a/SPECS/python3/CVE-2025-12084.patch b/SPECS/python3/CVE-2025-12084.patch new file mode 100644 index 00000000000..3e764dc90ed --- /dev/null +++ b/SPECS/python3/CVE-2025-12084.patch @@ -0,0 +1,271 @@ +From 51f916d05ccca54ee82210ffb54b0cb117443a67 Mon Sep 17 00:00:00 2001 +From: Seth Michael Larson +Date: Wed, 3 Dec 2025 01:16:37 -0600 +Subject: [PATCH 1/4] gh-142145: Remove quadratic behavior in node ID cache + clearing (GH-142146) + +* Remove quadratic behavior in node ID cache clearing + +Co-authored-by: Jacob Walls <38668450+jacobtylerwalls@users.noreply.github.com> + +* Add news fragment + +--------- +(cherry picked from commit 08d8e18ad81cd45bc4a27d6da478b51ea49486e4) + +Co-authored-by: Seth Michael Larson +Co-authored-by: Jacob Walls <38668450+jacobtylerwalls@users.noreply.github.com> +--- + Lib/test/test_minidom.py | 18 ++++++++++++++++++ + Lib/xml/dom/minidom.py | 9 +-------- + ...5-12-01-09-36-45.gh-issue-142145.tcAUhg.rst | 1 + + 3 files changed, 20 insertions(+), 8 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst + +diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py +index 699265c..a83cb88 100644 +--- a/Lib/test/test_minidom.py ++++ b/Lib/test/test_minidom.py +@@ -2,6 +2,7 @@ + + import copy + import pickle ++import time + import io + from test import support + import unittest +@@ -176,6 +177,23 @@ class MinidomTest(unittest.TestCase): + self.confirm(dom.documentElement.childNodes[-1].data == "Hello") + dom.unlink() + ++ def testAppendChildNoQuadraticComplexity(self): ++ impl = getDOMImplementation() ++ ++ newdoc = impl.createDocument(None, "some_tag", None) ++ top_element = newdoc.documentElement ++ children = [newdoc.createElement(f"child-{i}") for i in range(1, 2 ** 15 + 1)] ++ element = top_element ++ ++ start = time.time() ++ for child in children: ++ element.appendChild(child) ++ element = child ++ end = time.time() ++ ++ # This example used to take at least 30 seconds. ++ self.assertLess(end - start, 1) ++ + def testAppendChildFragment(self): + dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes() + dom.documentElement.appendChild(frag) +diff --git a/Lib/xml/dom/minidom.py b/Lib/xml/dom/minidom.py +index ef8a159..83f717e 100644 +--- a/Lib/xml/dom/minidom.py ++++ b/Lib/xml/dom/minidom.py +@@ -292,13 +292,6 @@ def _append_child(self, node): + childNodes.append(node) + node.parentNode = self + +-def _in_document(node): +- # return True iff node is part of a document tree +- while node is not None: +- if node.nodeType == Node.DOCUMENT_NODE: +- return True +- node = node.parentNode +- return False + + def _write_data(writer, data): + "Writes datachars to writer." +@@ -1539,7 +1532,7 @@ def _clear_id_cache(node): + if node.nodeType == Node.DOCUMENT_NODE: + node._id_cache.clear() + node._id_search_stack = None +- elif _in_document(node): ++ elif node.ownerDocument: + node.ownerDocument._id_cache.clear() + node.ownerDocument._id_search_stack= None + +diff --git a/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst b/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst +new file mode 100644 +index 0000000..440bc77 +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst +@@ -0,0 +1 @@ ++Remove quadratic behavior in ``xml.minidom`` node ID cache clearing. +-- +2.45.4 + + +From 64c592467bed5abd63c63926b1c701fb59b245f4 Mon Sep 17 00:00:00 2001 +From: "Miss Islington (bot)" + <31488909+miss-islington@users.noreply.github.com> +Date: Sun, 21 Dec 2025 00:56:47 +0100 +Subject: [PATCH 2/4] gh-142754: Ensure that Element & Attr instances have the + ownerDocument attribute (GH-142794) (#142818) + +gh-142754: Ensure that Element & Attr instances have the ownerDocument attribute (GH-142794) +(cherry picked from commit 1cc7551b3f9f71efbc88d96dce90f82de98b2454) + +Co-authored-by: Petr Viktorin +Co-authored-by: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> +--- + Lib/test/test_minidom.py | 10 +++++++++- + Lib/xml/dom/minidom.py | 2 ++ + .../2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst | 4 ++++ + 3 files changed, 15 insertions(+), 1 deletion(-) + create mode 100644 Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst + +diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py +index a83cb88..5a1f96d 100644 +--- a/Lib/test/test_minidom.py ++++ b/Lib/test/test_minidom.py +@@ -9,7 +9,7 @@ import unittest + + import xml.dom.minidom + +-from xml.dom.minidom import parse, Attr, Node, Document, parseString ++from xml.dom.minidom import parse, Attr, Node, Document, Element, parseString + from xml.dom.minidom import getDOMImplementation + from xml.parsers.expat import ExpatError + +@@ -194,6 +194,14 @@ class MinidomTest(unittest.TestCase): + # This example used to take at least 30 seconds. + self.assertLess(end - start, 1) + ++ def testSetAttributeNodeWithoutOwnerDocument(self): ++ # regression test for gh-142754 ++ elem = Element("test") ++ attr = Attr("id") ++ attr.value = "test-id" ++ elem.setAttributeNode(attr) ++ self.assertEqual(elem.getAttribute("id"), "test-id") ++ + def testAppendChildFragment(self): + dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes() + dom.documentElement.appendChild(frag) +diff --git a/Lib/xml/dom/minidom.py b/Lib/xml/dom/minidom.py +index 83f717e..cada981 100644 +--- a/Lib/xml/dom/minidom.py ++++ b/Lib/xml/dom/minidom.py +@@ -348,6 +348,7 @@ class Attr(Node): + def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None, + prefix=None): + self.ownerElement = None ++ self.ownerDocument = None + self._name = qName + self.namespaceURI = namespaceURI + self._prefix = prefix +@@ -673,6 +674,7 @@ class Element(Node): + + def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None, + localName=None): ++ self.ownerDocument = None + self.parentNode = None + self.tagName = self.nodeName = tagName + self.prefix = prefix +diff --git a/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst b/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst +new file mode 100644 +index 0000000..d4e158c +--- /dev/null ++++ b/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst +@@ -0,0 +1,4 @@ ++Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` elements and attributes ++created by directly instantiating the ``Element`` or ``Attr`` class. Note that ++this way of creating nodes is not supported; creator functions like ++:py:meth:`xml.dom.Document.documentElement` should be used instead. +-- +2.45.4 + + +From d69b5fca81bec892e0cf410f74eab3315027dea2 Mon Sep 17 00:00:00 2001 +From: "Gregory P. Smith" <68491+gpshead@users.noreply.github.com> +Date: Sat, 20 Dec 2025 15:42:06 -0800 +Subject: [PATCH 3/4] gh-142145: relax the no-longer-quadratic test timing + (GH-143030) + +* gh-142145: relax the no-longer-quadratic test timing + +* require cpu resource +(cherry picked from commit 8d2d7bb2e754f8649a68ce4116271a4932f76907) + +Co-authored-by: Gregory P. Smith <68491+gpshead@users.noreply.github.com> +--- + Lib/test/test_minidom.py | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py +index 5a1f96d..ab4823c 100644 +--- a/Lib/test/test_minidom.py ++++ b/Lib/test/test_minidom.py +@@ -177,6 +177,7 @@ class MinidomTest(unittest.TestCase): + self.confirm(dom.documentElement.childNodes[-1].data == "Hello") + dom.unlink() + ++ @support.requires_resource('cpu') + def testAppendChildNoQuadraticComplexity(self): + impl = getDOMImplementation() + +@@ -185,14 +186,18 @@ class MinidomTest(unittest.TestCase): + children = [newdoc.createElement(f"child-{i}") for i in range(1, 2 ** 15 + 1)] + element = top_element + +- start = time.time() ++ start = time.monotonic() + for child in children: + element.appendChild(child) + element = child +- end = time.time() ++ end = time.monotonic() + + # This example used to take at least 30 seconds. +- self.assertLess(end - start, 1) ++ # Conservative assertion due to the wide variety of systems and ++ # build configs timing based tests wind up run under. ++ # A --with-address-sanitizer --with-pydebug build on a rpi5 still ++ # completes this loop in <0.5 seconds. ++ self.assertLess(end - start, 4) + + def testSetAttributeNodeWithoutOwnerDocument(self): + # regression test for gh-142754 +-- +2.45.4 + + +From 1e5c25d54597d5464533b00cff681dcc33757eb4 Mon Sep 17 00:00:00 2001 +From: "Gregory P. Smith" +Date: Sun, 21 Dec 2025 00:05:37 +0000 +Subject: [PATCH 4/4] merge NEWS entries into one + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/142211.patch +--- + .../Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst | 4 ---- + .../2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst | 7 ++++++- + 2 files changed, 6 insertions(+), 5 deletions(-) + delete mode 100644 Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst + +diff --git a/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst b/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst +deleted file mode 100644 +index d4e158c..0000000 +--- a/Misc/NEWS.d/next/Library/2025-12-16-11-55-55.gh-issue-142754.xuCrt3.rst ++++ /dev/null +@@ -1,4 +0,0 @@ +-Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` elements and attributes +-created by directly instantiating the ``Element`` or ``Attr`` class. Note that +-this way of creating nodes is not supported; creator functions like +-:py:meth:`xml.dom.Document.documentElement` should be used instead. +diff --git a/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst b/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst +index 440bc77..05c7df3 100644 +--- a/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst ++++ b/Misc/NEWS.d/next/Security/2025-12-01-09-36-45.gh-issue-142145.tcAUhg.rst +@@ -1 +1,6 @@ +-Remove quadratic behavior in ``xml.minidom`` node ID cache clearing. ++Remove quadratic behavior in ``xml.minidom`` node ID cache clearing. In order ++to do this without breaking existing users, we also add the *ownerDocument* ++attribute to :mod:`xml.dom.minidom` elements and attributes created by directly ++instantiating the ``Element`` or ``Attr`` class. Note that this way of creating ++nodes is not supported; creator functions like ++:py:meth:`xml.dom.Document.documentElement` should be used instead. +-- +2.45.4 + diff --git a/SPECS/python3/CVE-2025-13836.patch b/SPECS/python3/CVE-2025-13836.patch new file mode 100644 index 00000000000..3aaaf022332 --- /dev/null +++ b/SPECS/python3/CVE-2025-13836.patch @@ -0,0 +1,160 @@ +From 40a210f6712ee19d310a34fb7b764cf3d4cd043a Mon Sep 17 00:00:00 2001 +From: Serhiy Storchaka +Date: Mon, 1 Dec 2025 17:26:07 +0200 +Subject: [PATCH] gh-119451: Fix a potential denial of service in http.client + (GH-119454) + +Reading the whole body of the HTTP response could cause OOM if +the Content-Length value is too large even if the server does not send +a large amount of data. Now the HTTP client reads large data by chunks, +therefore the amount of consumed memory is proportional to the amount +of sent data. +(cherry picked from commit 5a4c4a033a4a54481be6870aa1896fad732555b5) + +Co-authored-by: Serhiy Storchaka +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/142140.patch +--- + Lib/http/client.py | 28 ++++++-- + Lib/test/test_httplib.py | 66 +++++++++++++++++++ + ...-05-23-11-47-48.gh-issue-119451.qkJe9-.rst | 5 ++ + 3 files changed, 95 insertions(+), 4 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2024-05-23-11-47-48.gh-issue-119451.qkJe9-.rst + +diff --git a/Lib/http/client.py b/Lib/http/client.py +index fb29923..70451d6 100644 +--- a/Lib/http/client.py ++++ b/Lib/http/client.py +@@ -111,6 +111,11 @@ responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()} + _MAXLINE = 65536 + _MAXHEADERS = 100 + ++# Data larger than this will be read in chunks, to prevent extreme ++# overallocation. ++_MIN_READ_BUF_SIZE = 1 << 20 ++ ++ + # Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2) + # + # VCHAR = %x21-7E +@@ -639,10 +644,25 @@ class HTTPResponse(io.BufferedIOBase): + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ +- data = self.fp.read(amt) +- if len(data) < amt: +- raise IncompleteRead(data, amt-len(data)) +- return data ++ cursize = min(amt, _MIN_READ_BUF_SIZE) ++ data = self.fp.read(cursize) ++ if len(data) >= amt: ++ return data ++ if len(data) < cursize: ++ raise IncompleteRead(data, amt - len(data)) ++ ++ data = io.BytesIO(data) ++ data.seek(0, 2) ++ while True: ++ # This is a geometric increase in read size (never more than ++ # doubling out the current length of data per loop iteration). ++ delta = min(cursize, amt - cursize) ++ data.write(self.fp.read(delta)) ++ if data.tell() >= amt: ++ return data.getvalue() ++ cursize += delta ++ if data.tell() < cursize: ++ raise IncompleteRead(data.getvalue(), amt - data.tell()) + + def _safe_readinto(self, b): + """Same as _safe_read, but for reading into a buffer.""" +diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py +index 01f5a10..e46dac0 100644 +--- a/Lib/test/test_httplib.py ++++ b/Lib/test/test_httplib.py +@@ -1452,6 +1452,72 @@ class BasicTest(TestCase): + thread.join() + self.assertEqual(result, b"proxied data\n") + ++ def test_large_content_length(self): ++ serv = socket.create_server((HOST, 0)) ++ self.addCleanup(serv.close) ++ ++ def run_server(): ++ [conn, address] = serv.accept() ++ with conn: ++ while conn.recv(1024): ++ conn.sendall( ++ b"HTTP/1.1 200 Ok\r\n" ++ b"Content-Length: %d\r\n" ++ b"\r\n" % size) ++ conn.sendall(b'A' * (size//3)) ++ conn.sendall(b'B' * (size - size//3)) ++ ++ thread = threading.Thread(target=run_server) ++ thread.start() ++ self.addCleanup(thread.join, 1.0) ++ ++ conn = client.HTTPConnection(*serv.getsockname()) ++ try: ++ for w in range(15, 27): ++ size = 1 << w ++ conn.request("GET", "/") ++ with conn.getresponse() as response: ++ self.assertEqual(len(response.read()), size) ++ finally: ++ conn.close() ++ thread.join(1.0) ++ ++ def test_large_content_length_truncated(self): ++ serv = socket.create_server((HOST, 0)) ++ self.addCleanup(serv.close) ++ ++ def run_server(): ++ while True: ++ [conn, address] = serv.accept() ++ with conn: ++ conn.recv(1024) ++ if not size: ++ break ++ conn.sendall( ++ b"HTTP/1.1 200 Ok\r\n" ++ b"Content-Length: %d\r\n" ++ b"\r\n" ++ b"Text" % size) ++ ++ thread = threading.Thread(target=run_server) ++ thread.start() ++ self.addCleanup(thread.join, 1.0) ++ ++ conn = client.HTTPConnection(*serv.getsockname()) ++ try: ++ for w in range(18, 65): ++ size = 1 << w ++ conn.request("GET", "/") ++ with conn.getresponse() as response: ++ self.assertRaises(client.IncompleteRead, response.read) ++ conn.close() ++ finally: ++ conn.close() ++ size = 0 ++ conn.request("GET", "/") ++ conn.close() ++ thread.join(1.0) ++ + def test_putrequest_override_domain_validation(self): + """ + It should be possible to override the default validation +diff --git a/Misc/NEWS.d/next/Security/2024-05-23-11-47-48.gh-issue-119451.qkJe9-.rst b/Misc/NEWS.d/next/Security/2024-05-23-11-47-48.gh-issue-119451.qkJe9-.rst +new file mode 100644 +index 0000000..6d6f25c +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2024-05-23-11-47-48.gh-issue-119451.qkJe9-.rst +@@ -0,0 +1,5 @@ ++Fix a potential memory denial of service in the :mod:`http.client` module. ++When connecting to a malicious server, it could cause ++an arbitrary amount of memory to be allocated. ++This could have led to symptoms including a :exc:`MemoryError`, swapping, out ++of memory (OOM) killed processes or containers, or even system crashes. +-- +2.45.4 + diff --git a/SPECS/python3/CVE-2025-13837.patch b/SPECS/python3/CVE-2025-13837.patch new file mode 100644 index 00000000000..860a3fbcdce --- /dev/null +++ b/SPECS/python3/CVE-2025-13837.patch @@ -0,0 +1,165 @@ +From 5881591933139f6b3d5791fda8c737d24a6de819 Mon Sep 17 00:00:00 2001 +From: Serhiy Storchaka +Date: Mon, 1 Dec 2025 17:28:15 +0200 +Subject: [PATCH] gh-119342: Fix a potential denial of service in plistlib + (GH-119343) + +Reading a specially prepared small Plist file could cause OOM because file's +read(n) preallocates a bytes object for reading the specified amount of +data. Now plistlib reads large data by chunks, therefore the upper limit of +consumed memory is proportional to the size of the input file. +(cherry picked from commit 694922cf40aa3a28f898b5f5ee08b71b4922df70) + +Co-authored-by: Serhiy Storchaka +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/142149.patch +--- + Lib/plistlib.py | 31 ++++++++++------ + Lib/test/test_plistlib.py | 37 +++++++++++++++++-- + ...-05-21-22-11-31.gh-issue-119342.BTFj4Z.rst | 5 +++ + 3 files changed, 59 insertions(+), 14 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2024-05-21-22-11-31.gh-issue-119342.BTFj4Z.rst + +diff --git a/Lib/plistlib.py b/Lib/plistlib.py +index 3292c30..c5554ea 100644 +--- a/Lib/plistlib.py ++++ b/Lib/plistlib.py +@@ -73,6 +73,9 @@ from xml.parsers.expat import ParserCreate + PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__) + globals().update(PlistFormat.__members__) + ++# Data larger than this will be read in chunks, to prevent extreme ++# overallocation. ++_MIN_READ_BUF_SIZE = 1 << 20 + + class UID: + def __init__(self, data): +@@ -499,12 +502,24 @@ class _BinaryPlistParser: + + return tokenL + ++ def _read(self, size): ++ cursize = min(size, _MIN_READ_BUF_SIZE) ++ data = self._fp.read(cursize) ++ while True: ++ if len(data) != cursize: ++ raise InvalidFileException ++ if cursize == size: ++ return data ++ delta = min(cursize, size - cursize) ++ data += self._fp.read(delta) ++ cursize += delta ++ + def _read_ints(self, n, size): +- data = self._fp.read(size * n) ++ data = self._read(size * n) + if size in _BINARY_FORMAT: + return struct.unpack(f'>{n}{_BINARY_FORMAT[size]}', data) + else: +- if not size or len(data) != size * n: ++ if not size: + raise InvalidFileException() + return tuple(int.from_bytes(data[i: i + size], 'big') + for i in range(0, size * n, size)) +@@ -561,22 +576,16 @@ class _BinaryPlistParser: + + elif tokenH == 0x40: # data + s = self._get_size(tokenL) +- result = self._fp.read(s) +- if len(result) != s: +- raise InvalidFileException() ++ result = self._read(s) + + elif tokenH == 0x50: # ascii string + s = self._get_size(tokenL) +- data = self._fp.read(s) +- if len(data) != s: +- raise InvalidFileException() ++ data = self._read(s) + result = data.decode('ascii') + + elif tokenH == 0x60: # unicode string + s = self._get_size(tokenL) * 2 +- data = self._fp.read(s) +- if len(data) != s: +- raise InvalidFileException() ++ data = self._read(s) + result = data.decode('utf-16be') + + elif tokenH == 0x80: # UID +diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py +index fa46050..229a5a2 100644 +--- a/Lib/test/test_plistlib.py ++++ b/Lib/test/test_plistlib.py +@@ -841,8 +841,7 @@ class TestPlistlib(unittest.TestCase): + + class TestBinaryPlistlib(unittest.TestCase): + +- @staticmethod +- def decode(*objects, offset_size=1, ref_size=1): ++ def build(self, *objects, offset_size=1, ref_size=1): + data = [b'bplist00'] + offset = 8 + offsets = [] +@@ -854,7 +853,11 @@ class TestBinaryPlistlib(unittest.TestCase): + len(objects), 0, offset) + data.extend(offsets) + data.append(tail) +- return plistlib.loads(b''.join(data), fmt=plistlib.FMT_BINARY) ++ return b''.join(data) ++ ++ def decode(self, *objects, offset_size=1, ref_size=1): ++ data = self.build(*objects, offset_size=offset_size, ref_size=ref_size) ++ return plistlib.loads(data, fmt=plistlib.FMT_BINARY) + + def test_nonstandard_refs_size(self): + # Issue #21538: Refs and offsets are 24-bit integers +@@ -963,6 +966,34 @@ class TestBinaryPlistlib(unittest.TestCase): + with self.assertRaises(plistlib.InvalidFileException): + plistlib.loads(b'bplist00' + data, fmt=plistlib.FMT_BINARY) + ++ def test_truncated_large_data(self): ++ self.addCleanup(os_helper.unlink, os_helper.TESTFN) ++ def check(data): ++ with open(os_helper.TESTFN, 'wb') as f: ++ f.write(data) ++ # buffered file ++ with open(os_helper.TESTFN, 'rb') as f: ++ with self.assertRaises(plistlib.InvalidFileException): ++ plistlib.load(f, fmt=plistlib.FMT_BINARY) ++ # unbuffered file ++ with open(os_helper.TESTFN, 'rb', buffering=0) as f: ++ with self.assertRaises(plistlib.InvalidFileException): ++ plistlib.load(f, fmt=plistlib.FMT_BINARY) ++ for w in range(20, 64): ++ s = 1 << w ++ # data ++ check(self.build(b'\x4f\x13' + s.to_bytes(8, 'big'))) ++ # ascii string ++ check(self.build(b'\x5f\x13' + s.to_bytes(8, 'big'))) ++ # unicode string ++ check(self.build(b'\x6f\x13' + s.to_bytes(8, 'big'))) ++ # array ++ check(self.build(b'\xaf\x13' + s.to_bytes(8, 'big'))) ++ # dict ++ check(self.build(b'\xdf\x13' + s.to_bytes(8, 'big'))) ++ # number of objects ++ check(b'bplist00' + struct.pack('>6xBBQQQ', 1, 1, s, 0, 8)) ++ + + class TestKeyedArchive(unittest.TestCase): + def test_keyed_archive_data(self): +diff --git a/Misc/NEWS.d/next/Security/2024-05-21-22-11-31.gh-issue-119342.BTFj4Z.rst b/Misc/NEWS.d/next/Security/2024-05-21-22-11-31.gh-issue-119342.BTFj4Z.rst +new file mode 100644 +index 0000000..04fd8fa +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2024-05-21-22-11-31.gh-issue-119342.BTFj4Z.rst +@@ -0,0 +1,5 @@ ++Fix a potential memory denial of service in the :mod:`plistlib` module. ++When reading a Plist file received from untrusted source, it could cause ++an arbitrary amount of memory to be allocated. ++This could have led to symptoms including a :exc:`MemoryError`, swapping, out ++of memory (OOM) killed processes or containers, or even system crashes. +-- +2.45.4 + diff --git a/SPECS/python3/CVE-2026-0672.patch b/SPECS/python3/CVE-2026-0672.patch new file mode 100644 index 00000000000..304e7d5dc92 --- /dev/null +++ b/SPECS/python3/CVE-2026-0672.patch @@ -0,0 +1,189 @@ +From 62498dced866fee86727379378acb20a541f3371 Mon Sep 17 00:00:00 2001 +From: Seth Michael Larson +Date: Tue, 20 Jan 2026 15:23:42 -0600 +Subject: [PATCH] gh-143919: Reject control characters in http cookies (cherry + picked from commit 95746b3a13a985787ef53b977129041971ed7f70) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Co-authored-by: Seth Michael Larson +Co-authored-by: Bartosz Sławecki +Co-authored-by: sobolevn +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/144091.patch +--- + Doc/library/http.cookies.rst | 4 +- + Lib/http/cookies.py | 25 +++++++-- + Lib/test/test_http_cookies.py | 52 +++++++++++++++++-- + ...-01-16-11-13-15.gh-issue-143919.kchwZV.rst | 1 + + 4 files changed, 73 insertions(+), 9 deletions(-) + create mode 100644 Misc/NEWS.d/next/Security/2026-01-16-11-13-15.gh-issue-143919.kchwZV.rst + +diff --git a/Doc/library/http.cookies.rst b/Doc/library/http.cookies.rst +index ad37a0f..317a71a 100644 +--- a/Doc/library/http.cookies.rst ++++ b/Doc/library/http.cookies.rst +@@ -272,9 +272,9 @@ The following example demonstrates how to use the :mod:`http.cookies` module. + Set-Cookie: chips=ahoy + Set-Cookie: vienna=finger + >>> C = cookies.SimpleCookie() +- >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') ++ >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=;";') + >>> print(C) +- Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" ++ Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=;" + >>> C = cookies.SimpleCookie() + >>> C["oreo"] = "doublestuff" + >>> C["oreo"]["path"] = "/" +diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py +index 57791c6..d0a69cb 100644 +--- a/Lib/http/cookies.py ++++ b/Lib/http/cookies.py +@@ -87,9 +87,9 @@ within a string. Escaped quotation marks, nested semicolons, and other + such trickeries do not confuse it. + + >>> C = cookies.SimpleCookie() +- >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') ++ >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=;";') + >>> print(C) +- Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" ++ Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=;" + + Each element of the Cookie also supports all of the RFC 2109 + Cookie attributes. Here's an example which sets the Path +@@ -170,6 +170,15 @@ _Translator.update({ + }) + + _is_legal_key = re.compile('[%s]+' % re.escape(_LegalChars)).fullmatch ++_control_character_re = re.compile(r'[\x00-\x1F\x7F]') ++ ++ ++def _has_control_character(*val): ++ """Detects control characters within a value. ++ Supports any type, as header values can be any type. ++ """ ++ return any(_control_character_re.search(str(v)) for v in val) ++ + + def _quote(str): + r"""Quote a string for use in a cookie header. +@@ -292,12 +301,16 @@ class Morsel(dict): + K = K.lower() + if not K in self._reserved: + raise CookieError("Invalid attribute %r" % (K,)) ++ if _has_control_character(K, V): ++ raise CookieError(f"Control characters are not allowed in cookies {K!r} {V!r}") + dict.__setitem__(self, K, V) + + def setdefault(self, key, val=None): + key = key.lower() + if key not in self._reserved: + raise CookieError("Invalid attribute %r" % (key,)) ++ if _has_control_character(key, val): ++ raise CookieError("Control characters are not allowed in cookies %r %r" % (key, val,)) + return dict.setdefault(self, key, val) + + def __eq__(self, morsel): +@@ -333,6 +346,9 @@ class Morsel(dict): + raise CookieError('Attempt to set a reserved key %r' % (key,)) + if not _is_legal_key(key): + raise CookieError('Illegal key %r' % (key,)) ++ if _has_control_character(key, val, coded_val): ++ raise CookieError( ++ "Control characters are not allowed in cookies %r %r %r" % (key, val, coded_val,)) + + # It's a good key, so save it. + self._key = key +@@ -486,7 +502,10 @@ class BaseCookie(dict): + result = [] + items = sorted(self.items()) + for key, value in items: +- result.append(value.output(attrs, header)) ++ value_output = value.output(attrs, header) ++ if _has_control_character(value_output): ++ raise CookieError("Control characters are not allowed in cookies") ++ result.append(value_output) + return sep.join(result) + + __str__ = output +diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py +index 7b3dc0f..f196bcc 100644 +--- a/Lib/test/test_http_cookies.py ++++ b/Lib/test/test_http_cookies.py +@@ -17,10 +17,10 @@ class CookieTests(unittest.TestCase): + 'repr': "", + 'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger'}, + +- {'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"', +- 'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'}, +- 'repr': '''''', +- 'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'}, ++ {'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=;"', ++ 'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=;'}, ++ 'repr': '''''', ++ 'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=;"'}, + + # Check illegal cookies that have an '=' char in an unquoted value + {'data': 'keebler=E=mc2', +@@ -563,6 +563,50 @@ class MorselTests(unittest.TestCase): + r'Set-Cookie: key=coded_val; ' + r'expires=\w+, \d+ \w+ \d+ \d+:\d+:\d+ \w+') + ++ def test_control_characters(self): ++ for c0 in support.control_characters_c0(): ++ morsel = cookies.Morsel() ++ ++ # .__setitem__() ++ with self.assertRaises(cookies.CookieError): ++ morsel[c0] = "val" ++ with self.assertRaises(cookies.CookieError): ++ morsel["path"] = c0 ++ ++ # .setdefault() ++ with self.assertRaises(cookies.CookieError): ++ morsel.setdefault("path", c0) ++ with self.assertRaises(cookies.CookieError): ++ morsel.setdefault(c0, "val") ++ ++ # .set() ++ with self.assertRaises(cookies.CookieError): ++ morsel.set(c0, "val", "coded-value") ++ with self.assertRaises(cookies.CookieError): ++ morsel.set("path", c0, "coded-value") ++ with self.assertRaises(cookies.CookieError): ++ morsel.set("path", "val", c0) ++ ++ def test_control_characters_output(self): ++ # Tests that even if the internals of Morsel are modified ++ # that a call to .output() has control character safeguards. ++ for c0 in support.control_characters_c0(): ++ morsel = cookies.Morsel() ++ morsel.set("key", "value", "coded-value") ++ morsel._key = c0 # Override private variable. ++ cookie = cookies.SimpleCookie() ++ cookie["cookie"] = morsel ++ with self.assertRaises(cookies.CookieError): ++ cookie.output() ++ ++ morsel = cookies.Morsel() ++ morsel.set("key", "value", "coded-value") ++ morsel._coded_value = c0 # Override private variable. ++ cookie = cookies.SimpleCookie() ++ cookie["cookie"] = morsel ++ with self.assertRaises(cookies.CookieError): ++ cookie.output() ++ + + def load_tests(loader, tests, pattern): + tests.addTest(doctest.DocTestSuite(cookies)) +diff --git a/Misc/NEWS.d/next/Security/2026-01-16-11-13-15.gh-issue-143919.kchwZV.rst b/Misc/NEWS.d/next/Security/2026-01-16-11-13-15.gh-issue-143919.kchwZV.rst +new file mode 100644 +index 0000000..788c3e4 +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2026-01-16-11-13-15.gh-issue-143919.kchwZV.rst +@@ -0,0 +1 @@ ++Reject control characters in :class:`http.cookies.Morsel` fields and values. +-- +2.45.4 + diff --git a/SPECS/python3/CVE-2026-0865.patch b/SPECS/python3/CVE-2026-0865.patch new file mode 100644 index 00000000000..c19c80c2787 --- /dev/null +++ b/SPECS/python3/CVE-2026-0865.patch @@ -0,0 +1,102 @@ +From 3b17e0a6b6af91a844bd6b5725b7d6a806d36714 Mon Sep 17 00:00:00 2001 +From: "Gregory P. Smith" <68491+gpshead@users.noreply.github.com> +Date: Sat, 17 Jan 2026 10:23:57 -0800 +Subject: [PATCH] gh-143916: Reject control characters in + wsgiref.headers.Headers (GH-143917) (GH-143973) + +gh-143916: Reject control characters in wsgiref.headers.Headers (GH-143917) + +* Add 'test.support' fixture for C0 control characters +* gh-143916: Reject control characters in wsgiref.headers.Headers + +(cherry picked from commit f7fceed79ca1bceae8dbe5ba5bc8928564da7211) +(cherry picked from commit 22e4d55285cee52bc4dbe061324e5f30bd4dee58) + +Co-authored-by: Gregory P. Smith <68491+gpshead@users.noreply.github.com> +Co-authored-by: Seth Michael Larson +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/python/cpython/pull/143974.patch +--- + Lib/test/support/__init__.py | 7 +++++++ + Lib/test/test_wsgiref.py | 12 +++++++++++- + Lib/wsgiref/headers.py | 3 +++ + .../2026-01-16-11-07-36.gh-issue-143916.dpWeOD.rst | 2 ++ + 4 files changed, 23 insertions(+), 1 deletion(-) + create mode 100644 Misc/NEWS.d/next/Security/2026-01-16-11-07-36.gh-issue-143916.dpWeOD.rst + +diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py +index ba57eb3..abd2fea 100644 +--- a/Lib/test/support/__init__.py ++++ b/Lib/test/support/__init__.py +@@ -2551,3 +2551,10 @@ class BrokenIter: + if self.iter_raises: + 1/0 + return self ++ ++ ++def control_characters_c0() -> list[str]: ++ """Returns a list of C0 control characters as strings. ++ C0 control characters defined as the byte range 0x00-0x1F, and 0x7F. ++ """ ++ return [chr(c) for c in range(0x00, 0x20)] + ["\x7F"] +diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py +index 9316d0e..28e3656 100644 +--- a/Lib/test/test_wsgiref.py ++++ b/Lib/test/test_wsgiref.py +@@ -1,6 +1,6 @@ + from unittest import mock + from test import support +-from test.support import socket_helper ++from test.support import socket_helper, control_characters_c0 + from test.test_httpservers import NoLogRequestHandler + from unittest import TestCase + from wsgiref.util import setup_testing_defaults +@@ -503,6 +503,16 @@ class HeaderTests(TestCase): + '\r\n' + ) + ++ def testRaisesControlCharacters(self): ++ headers = Headers() ++ for c0 in control_characters_c0(): ++ self.assertRaises(ValueError, headers.__setitem__, f"key{c0}", "val") ++ self.assertRaises(ValueError, headers.__setitem__, "key", f"val{c0}") ++ self.assertRaises(ValueError, headers.add_header, f"key{c0}", "val", param="param") ++ self.assertRaises(ValueError, headers.add_header, "key", f"val{c0}", param="param") ++ self.assertRaises(ValueError, headers.add_header, "key", "val", param=f"param{c0}") ++ ++ + class ErrorHandler(BaseCGIHandler): + """Simple handler subclass for testing BaseHandler""" + +diff --git a/Lib/wsgiref/headers.py b/Lib/wsgiref/headers.py +index fab851c..fd98e85 100644 +--- a/Lib/wsgiref/headers.py ++++ b/Lib/wsgiref/headers.py +@@ -9,6 +9,7 @@ written by Barry Warsaw. + # existence of which force quoting of the parameter value. + import re + tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') ++_control_chars_re = re.compile(r'[\x00-\x1F\x7F]') + + def _formatparam(param, value=None, quote=1): + """Convenience function to format and return a key=value pair. +@@ -41,6 +42,8 @@ class Headers: + def _convert_string_type(self, value): + """Convert/check value type.""" + if type(value) is str: ++ if _control_chars_re.search(value): ++ raise ValueError("Control characters not allowed in headers") + return value + raise AssertionError("Header names/values must be" + " of type str (got {0})".format(repr(value))) +diff --git a/Misc/NEWS.d/next/Security/2026-01-16-11-07-36.gh-issue-143916.dpWeOD.rst b/Misc/NEWS.d/next/Security/2026-01-16-11-07-36.gh-issue-143916.dpWeOD.rst +new file mode 100644 +index 0000000..44bd0b2 +--- /dev/null ++++ b/Misc/NEWS.d/next/Security/2026-01-16-11-07-36.gh-issue-143916.dpWeOD.rst +@@ -0,0 +1,2 @@ ++Reject C0 control characters within wsgiref.headers.Headers fields, values, ++and parameters. +-- +2.45.4 + diff --git a/SPECS/python3/python3.spec b/SPECS/python3/python3.spec index e66cb5274ad..c35f8f64b56 100644 --- a/SPECS/python3/python3.spec +++ b/SPECS/python3/python3.spec @@ -6,7 +6,7 @@ Summary: A high-level scripting language Name: python3 Version: 3.12.9 -Release: 6%{?dist} +Release: 8%{?dist} License: PSF Vendor: Microsoft Corporation Distribution: Azure Linux @@ -23,6 +23,12 @@ Patch3: CVE-2025-6069.patch Patch4: CVE-2025-8194.patch Patch5: CVE-2025-8291.patch Patch6: CVE-2025-6075.patch +Patch7: CVE-2025-12084.patch +Patch8: CVE-2025-13836.patch +Patch9: CVE-2025-13837.patch +Patch10: CVE-2025-11468.patch +Patch11: CVE-2026-0672.patch +Patch12: CVE-2026-0865.patch BuildRequires: bzip2-devel BuildRequires: expat-devel >= 2.1.0 @@ -245,6 +251,12 @@ rm -rf %{buildroot}%{_bindir}/__pycache__ %{_libdir}/python%{majmin}/test/* %changelog +* Wed Jan 28 2026 Azure Linux Security Servicing Account - 3.12.9-8 +- Patch for CVE-2026-0865, CVE-2025-11468, CVE-2026-0672 + +* Wed Dec 24 2025 Azure Linux Security Servicing Account - 3.12.9-7 +- Patch for CVE-2025-13837, CVE-2025-12084, CVE-2025-13836 + * Tue Nov 04 2025 Azure Linux Security Servicing Account - 3.12.9-6 - Patch for CVE-2025-6075 diff --git a/SPECS/pytorch/CVE-2025-3001.patch b/SPECS/pytorch/CVE-2025-3001.patch new file mode 100644 index 00000000000..e2430bf4ac1 --- /dev/null +++ b/SPECS/pytorch/CVE-2025-3001.patch @@ -0,0 +1,119 @@ +From 55c933d182200c684639f1e77a60eae196e31d84 Mon Sep 17 00:00:00 2001 +From: Yuxingwang-intel +Date: Fri, 19 Dec 2025 10:20:47 +0000 +Subject: [PATCH] Fix segmentation fault caused by invalid gate weight size in + lstm_cell (#168348) + +This PR adds parameter checks for LSTM weights to fix https://github.com/pytorch/pytorch/issues/149626 +Pull Request resolved: https://github.com/pytorch/pytorch/pull/168348 +Approved by: https://github.com/jiayisunx, https://github.com/mingfeima, https://github.com/albanD, https://github.com/cyyever + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/pytorch/pytorch/commit/999d94b5ede5f4ec111ba7dd144129e2c2725b03.patch +--- + aten/src/ATen/native/RNN.cpp | 15 ++++++++++++++- + test/test_nn.py | 33 +++++++++++++++++++++++++++++++++ + 2 files changed, 47 insertions(+), 1 deletion(-) + +diff --git a/aten/src/ATen/native/RNN.cpp b/aten/src/ATen/native/RNN.cpp +index 015e7797..4b3b2f89 100644 +--- a/aten/src/ATen/native/RNN.cpp ++++ b/aten/src/ATen/native/RNN.cpp +@@ -689,6 +689,15 @@ void check_rnn_cell_forward_hidden(const Tensor& input, const Tensor& hx, c10::S + "hidden", hidden_label, " has inconsistent hidden_size: got ", hx.sym_size(1), ", expected ", hidden_size); + } + ++template ++inline void check_rnn_cell_forward_weights(const Tensor& w_ih, const Tensor& w_hh, const c10::SymInt& hidden_size){ ++ TORCH_CHECK(w_ih.size(0) == gate_count * hidden_size, "weight_ih first dim must be ", gate_count, " * hidden_size = ", ++ gate_count * hidden_size, ", but got ", w_ih.size(0)); ++ TORCH_CHECK(w_hh.size(0) == gate_count * hidden_size, "weight_hh first dim must be ", gate_count, " * hidden_size = ", ++ gate_count * hidden_size, ", but got ", w_hh.size(0)); ++} ++ ++ + template + struct Cell { + using hidden_type = hidden_type_tmpl; +@@ -1536,8 +1545,9 @@ std::tuple lstm_cell( + const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();}); + + TORCH_CHECK(hx.size() == 2, "lstm_cell expects two hidden states"); +- check_rnn_cell_forward_input(input, w_ih.sym_size(1)); + auto hidden_size = w_hh.sym_size(1); ++ check_rnn_cell_forward_input(input, w_ih.sym_size(1)); ++ check_rnn_cell_forward_weights<4>(w_ih, w_hh, hidden_size); + check_rnn_cell_forward_hidden(input, hx[0], hidden_size, 0); + check_rnn_cell_forward_hidden(input, hx[1], std::move(hidden_size), 1); + static at::Tensor undefined; +@@ -1651,6 +1661,7 @@ Tensor gru_cell( + + check_rnn_cell_forward_input(input, w_ih.size(1)); + check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0); ++ check_rnn_cell_forward_weights<3>(w_ih, w_hh, w_hh.size(1)); + static at::Tensor undefined; + return GRUCell{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined}); + } +@@ -1664,6 +1675,7 @@ Tensor rnn_tanh_cell( + const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();}); + + static at::Tensor undefined; ++ check_rnn_cell_forward_weights<1>(w_ih, w_hh, w_hh.size(1)); + check_rnn_cell_forward_input(input, w_ih.size(1)); + check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0); + return SimpleCell{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined}); +@@ -1678,6 +1690,7 @@ Tensor rnn_relu_cell( + const Tensor& b_hh = c10::value_or_else(b_hh_opt, [] {return Tensor();}); + + static at::Tensor undefined; ++ check_rnn_cell_forward_weights<1>(w_ih, w_hh, w_hh.size(1)); + check_rnn_cell_forward_input(input, w_ih.size(1)); + check_rnn_cell_forward_hidden(input, hx, w_hh.size(1), 0); + return SimpleCell{}(input, hx, CellParams{w_ih, w_hh, b_ih, b_hh, undefined}); +diff --git a/test/test_nn.py b/test/test_nn.py +index 08200e9d..ea69f97d 100644 +--- a/test/test_nn.py ++++ b/test/test_nn.py +@@ -7335,6 +7335,39 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") + with self.assertRaises(RuntimeError): + res = arg_class(*arg_4) + ++ def test_rnn_cell_gate_weights_size(self): ++ def test_rnn_cell(cell_fn, gate_count): ++ input_size = 8 ++ hidden_size = 16 ++ x = torch.randn(4, input_size) ++ hx = torch.randn(4, hidden_size) ++ cx = torch.randn(4, hidden_size) ++ ++ w_ih_invalid = torch.randn((gate_count * hidden_size) + 1, 8) ++ w_ih = torch.randn(gate_count * hidden_size, 8) ++ w_hh_invalid = torch.randn((gate_count * hidden_size) + 1, 16) ++ w_hh = torch.randn(gate_count * hidden_size, 16) ++ b_ih = torch.randn(gate_count * hidden_size) ++ b_hh = torch.randn(gate_count * hidden_size) ++ ++ if cell_fn is torch.lstm_cell: ++ state = (hx, cx) ++ else: ++ state = hx ++ ++ with self.assertRaisesRegex(RuntimeError, "weight_ih"): ++ cell_fn(x, state, w_ih_invalid, w_hh, b_ih, b_hh) ++ ++ with self.assertRaisesRegex(RuntimeError, "weight_hh"): ++ cell_fn(x, state, w_ih, w_hh_invalid, b_ih, b_hh) ++ for cell_fn, gate_count in [ ++ (torch.lstm_cell, 4), ++ (torch.gru_cell, 3), ++ (torch.rnn_relu_cell, 1), ++ (torch.rnn_tanh_cell, 1), ++ ]: ++ test_rnn_cell(cell_fn, gate_count) ++ + class TestFusionEval(TestCase): + @set_default_dtype(torch.double) + @given(X=hu.tensor(shapes=((5, 3, 5, 5),), dtype=np.double), +-- +2.45.4 + diff --git a/SPECS/pytorch/CVE-2026-24747.patch b/SPECS/pytorch/CVE-2026-24747.patch new file mode 100644 index 00000000000..a4b6201f34a --- /dev/null +++ b/SPECS/pytorch/CVE-2026-24747.patch @@ -0,0 +1,76 @@ +From cef4c4308729099f4d2fc11bfd5f5132cc6c0225 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Wed, 28 Jan 2026 18:21:32 +0000 +Subject: [PATCH] override SWALR.state_dict and load_state_dict; add + _set_anneal_func and use in __init__ + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/pytorch/pytorch/commit/167ad09be5af5c52666759412a3804068c6955d1.patch +--- + torch/optim/swa_utils.py | 39 +++++++++++++++++++++++++++++++++++---- + 1 file changed, 35 insertions(+), 4 deletions(-) + +diff --git a/torch/optim/swa_utils.py b/torch/optim/swa_utils.py +index 90b3f159..e792f727 100644 +--- a/torch/optim/swa_utils.py ++++ b/torch/optim/swa_utils.py +@@ -7,6 +7,8 @@ import torch + from torch.nn import Module + from torch.optim.lr_scheduler import LRScheduler + from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices ++from typing_extensions import override ++ + + __all__ = [ + 'AveragedModel', +@@ -326,10 +328,7 @@ class SWALR(LRScheduler): + if anneal_strategy not in ['cos', 'linear']: + raise ValueError("anneal_strategy must by one of 'cos' or 'linear', " + f"instead got {anneal_strategy}") +- elif anneal_strategy == 'cos': +- self.anneal_func = self._cosine_anneal +- elif anneal_strategy == 'linear': +- self.anneal_func = self._linear_anneal ++ self._set_anneal_func(anneal_strategy) + if not isinstance(anneal_epochs, int) or anneal_epochs < 0: + raise ValueError(f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}") + self.anneal_epochs = anneal_epochs +@@ -375,3 +374,35 @@ class SWALR(LRScheduler): + alpha = self.anneal_func(t) + return [group['swa_lr'] * alpha + lr * (1 - alpha) + for group, lr in zip(self.optimizer.param_groups, prev_lrs)] ++ ++ ++ def _set_anneal_func(self, anneal_strategy): ++ self._anneal_strategy = anneal_strategy ++ if anneal_strategy == 'cos': ++ self.anneal_func = self._cosine_anneal ++ else: ++ self.anneal_func = self._linear_anneal ++ ++ @override ++ def state_dict(self): ++ """Return the state of the scheduler as a :class:`dict`. ++ ++ It contains an entry for every variable in self.__dict__ which ++ is not the optimizer or anneal_func. ++ """ ++ return { ++ key: value ++ for key, value in self.__dict__.items() ++ if key not in ("optimizer", "anneal_func") ++ } ++ ++ @override ++ def load_state_dict(self, state_dict): ++ """Load the scheduler's state. ++ ++ Args: ++ state_dict (dict): scheduler state. Should be an object returned ++ from a call to :meth:`state_dict`. ++ """ ++ self.__dict__.update(state_dict) ++ self._set_anneal_func(self._anneal_strategy) +-- +2.45.4 + diff --git a/SPECS/pytorch/pytorch.spec b/SPECS/pytorch/pytorch.spec index b12aaf09c0c..63292eeeb1f 100644 --- a/SPECS/pytorch/pytorch.spec +++ b/SPECS/pytorch/pytorch.spec @@ -2,7 +2,7 @@ Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration. Name: pytorch Version: 2.2.2 -Release: 9%{?dist} +Release: 11%{?dist} License: BSD-3-Clause Vendor: Microsoft Corporation Distribution: Azure Linux @@ -35,6 +35,8 @@ Patch10: CVE-2025-2953.patch Patch11: CVE-2025-55552.patch Patch12: CVE-2025-55560.patch Patch13: CVE-2025-46152.patch +Patch14: CVE-2025-3001.patch +Patch15: CVE-2026-24747.patch %description PyTorch is a Python package that provides two high-level features: @@ -96,6 +98,12 @@ cp -arf docs %{buildroot}/%{_pkgdocdir} %{_docdir}/* %changelog +* Wed Jan 28 2026 Azure Linux Security Servicing Account - 2.2.2-11 +- Patch for CVE-2026-24747 + +* Thu Dec 25 2025 Azure Linux Security Servicing Account - 2.2.2-10 +- Patch for CVE-2025-3001 + * Thu Dec 04 2025 Azure Linux Security Servicing Account - 2.2.2-9 - Patch for CVE-2025-55560 & CVE-2025-46152 diff --git a/SPECS/qemu/qemu.spec b/SPECS/qemu/qemu.spec index 303f7dae300..91685146bc4 100644 --- a/SPECS/qemu/qemu.spec +++ b/SPECS/qemu/qemu.spec @@ -428,7 +428,7 @@ Obsoletes: sgabios-bin <= 1:0.20180715git-10.fc38 Summary: QEMU is a FAST! processor emulator Name: qemu Version: 8.2.0 -Release: 25%{?dist} +Release: 27%{?dist} License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND FSFAP AND GPL-1.0-or-later AND GPL-2.0-only AND GPL-2.0-or-later AND GPL-2.0-or-later WITH GCC-exception-2.0 AND LGPL-2.0-only AND LGPL-2.0-or-later AND LGPL-2.1-only AND LGPL-2.1-or-later AND MIT AND LicenseRef-Fedora-Public-Domain AND CC-BY-3.0 URL: http://www.qemu.org/ @@ -654,7 +654,7 @@ BuildRequires: rutabaga-gfx-ffi-devel %endif %if %{user_static} -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: glib2-static zlib-static BuildRequires: pcre2-static %endif @@ -3435,6 +3435,12 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %changelog +* Thu Jan 22 2026 Kanishk Bansal - 8.2.0-27 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 8.2.0-26 +- Bump to rebuild with updated glibc + * Wed Nov 19 2025 Aditya Singh - 8.2.0-25 - Added Patch for CVE-2025-12464 diff --git a/SPECS/qtdeclarative/CVE-2025-12385.patch b/SPECS/qtdeclarative/CVE-2025-12385.patch new file mode 100644 index 00000000000..5bb15aa8c26 --- /dev/null +++ b/SPECS/qtdeclarative/CVE-2025-12385.patch @@ -0,0 +1,232 @@ +From 475ae118abf121046525a44d95a9524f780e8fa7 Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Mon, 8 Dec 2025 11:21:20 +0000 +Subject: [PATCH] Increase robustness of tag handling and rich text + object allocation + +- In qquicktextdocument.cpp, cap image width/height when rounding to avoid overflow in debug builds. +- In qquickstyledtext.cpp, guard against excessively large width/height using QQUICKSTYLEDPARSER_COORD_LIMIT and warn on invalid values. +- In qquicktextnodeengine.cpp, use QImageIOHandler::allocateImage to limit allocations for rich text objects. +- Add tests to cover out-of-bounds image sizes for styled and rich text. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/qt/qtdeclarative/commit/4aaf9bf21f7cc69d73066785e254b664fcc82025.patch https://github.com/qt/qtdeclarative/commit/144ce34e846b3f732bdb003f99b1f9455425416f.patch +--- + src/quick/items/qquicktextdocument.cpp | 4 +- + src/quick/items/qquicktextnodeengine.cpp | 12 ++- + src/quick/util/qquickstyledtext.cpp | 19 +++- + .../auto/quick/qquicktext/tst_qquicktext.cpp | 100 ++++++++++++++++++ + 4 files changed, 126 insertions(+), 9 deletions(-) + +diff --git a/src/quick/items/qquicktextdocument.cpp b/src/quick/items/qquicktextdocument.cpp +index ff28abe6..fc466e10 100644 +--- a/src/quick/items/qquicktextdocument.cpp ++++ b/src/quick/items/qquicktextdocument.cpp +@@ -105,9 +105,9 @@ QSizeF QQuickTextDocumentWithImageResources::intrinsicSize( + if (format.isImageFormat()) { + QTextImageFormat imageFormat = format.toImageFormat(); + +- const int width = qRound(imageFormat.width()); ++ const int width = qRound(qBound(qreal(INT_MIN), imageFormat.width(), qreal(INT_MAX))); + const bool hasWidth = imageFormat.hasProperty(QTextFormat::ImageWidth) && width > 0; +- const int height = qRound(imageFormat.height()); ++ const int height = qRound(qBound(qreal(INT_MIN), imageFormat.height(), qreal(INT_MAX))); + const bool hasHeight = imageFormat.hasProperty(QTextFormat::ImageHeight) && height > 0; + + QSizeF size(width, height); +diff --git a/src/quick/items/qquicktextnodeengine.cpp b/src/quick/items/qquicktextnodeengine.cpp +index 64636288..e5f1ef3f 100644 +--- a/src/quick/items/qquicktextnodeengine.cpp ++++ b/src/quick/items/qquicktextnodeengine.cpp +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -441,11 +442,12 @@ void QQuickTextNodeEngine::addTextObject(const QTextBlock &block, const QPointF + } + + if (image.isNull()) { +- image = QImage(size.toSize(), QImage::Format_ARGB32_Premultiplied); +- image.fill(Qt::transparent); +- { +- QPainter painter(&image); +- handler->drawObject(&painter, image.rect(), textDocument, pos, format); ++ if (QImageIOHandler::allocateImage(size.toSize(), QImage::Format_ARGB32_Premultiplied, &image)) { ++ image.fill(Qt::transparent); ++ { ++ QPainter painter(&image); ++ handler->drawObject(&painter, image.rect(), textDocument, pos, format); ++ } + } + } + +diff --git a/src/quick/util/qquickstyledtext.cpp b/src/quick/util/qquickstyledtext.cpp +index 527b8dbf..4da211a7 100644 +--- a/src/quick/util/qquickstyledtext.cpp ++++ b/src/quick/util/qquickstyledtext.cpp +@@ -10,6 +10,11 @@ + #include "qquickstyledtext_p.h" + #include + #include ++#include ++ ++#ifndef QQUICKSTYLEDPARSER_COORD_LIMIT ++# define QQUICKSTYLEDPARSER_COORD_LIMIT QT_RASTER_COORD_LIMIT ++#endif + + Q_LOGGING_CATEGORY(lcStyledText, "qt.quick.styledtext") + +@@ -659,9 +664,19 @@ void QQuickStyledTextPrivate::parseImageAttributes(const QChar *&ch, const QStri + if (is_equal_ignoring_case(attr.first, QLatin1String("src"))) { + image->url = QUrl(attr.second.toString()); + } else if (is_equal_ignoring_case(attr.first, QLatin1String("width"))) { +- image->size.setWidth(attr.second.toString().toInt()); ++ bool ok; ++ int v = attr.second.toString().toInt(&ok); ++ if (ok && v <= QQUICKSTYLEDPARSER_COORD_LIMIT) ++ image->size.setWidth(v); ++ else ++ qCWarning(lcStyledText) << "Invalid width provided for "; + } else if (is_equal_ignoring_case(attr.first, QLatin1String("height"))) { +- image->size.setHeight(attr.second.toString().toInt()); ++ bool ok; ++ int v = attr.second.toString().toInt(&ok); ++ if (ok && v <= QQUICKSTYLEDPARSER_COORD_LIMIT) ++ image->size.setHeight(v); ++ else ++ qCWarning(lcStyledText) << "Invalid height provided for "; + } else if (is_equal_ignoring_case(attr.first, QLatin1String("align"))) { + if (is_equal_ignoring_case(attr.second, QLatin1String("top"))) { + image->align = QQuickStyledTextImgTag::Top; +diff --git a/tests/auto/quick/qquicktext/tst_qquicktext.cpp b/tests/auto/quick/qquicktext/tst_qquicktext.cpp +index 3cdfeeb0..580516ad 100644 +--- a/tests/auto/quick/qquicktext/tst_qquicktext.cpp ++++ b/tests/auto/quick/qquicktext/tst_qquicktext.cpp +@@ -116,6 +116,8 @@ private slots: + void imgTagsElide(); + void imgTagsUpdates(); + void imgTagsError(); ++ void imgSize_data(); ++ void imgSize(); + void fontSizeMode_data(); + void fontSizeMode(); + void fontSizeModeMultiline_data(); +@@ -3413,6 +3415,103 @@ void tst_qquicktext::imgTagsError() + + QVERIFY(textObject != nullptr); + delete textObject; ++void tst_qquicktext::imgSize_data() ++{ ++ QTest::addColumn("url"); ++ QTest::addColumn("width"); ++ QTest::addColumn("height"); ++ QTest::addColumn("format"); ++ ++ QTest::newRow("negative (styled text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(-0x7FFFFF) ++ << qint64(-0x7FFFFF) ++ << QQuickText::StyledText; ++ QTest::newRow("negative (rich text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(-0x7FFFFF) ++ << qint64(-0x7FFFFF) ++ << QQuickText::RichText; ++ QTest::newRow("large (styled text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(0x7FFFFF) ++ << qint64(0x7FFFFF) ++ << QQuickText::StyledText; ++ QTest::newRow("large (right text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(0x7FFFFF) ++ << qint64(0x7FFFFF) ++ << QQuickText::RichText; ++ QTest::newRow("medium (styled text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(0x10000) ++ << qint64(0x10000) ++ << QQuickText::StyledText; ++ QTest::newRow("medium (right text)") << QStringLiteral("images/starfish_2.png") ++ << qint64(0x10000) ++ << qint64(0x10000) ++ << QQuickText::RichText; ++ QTest::newRow("large non-existent (styled text)") << QStringLiteral("a") ++ << qint64(0x7FFFFF) ++ << qint64(0x7FFFFF) ++ << QQuickText::StyledText; ++ QTest::newRow("medium non-existent (styled text)") << QStringLiteral("a") ++ << qint64(0x10000) ++ << qint64(0x10000) ++ << QQuickText::StyledText; ++ QTest::newRow("out-of-bounds non-existent (styled text)") << QStringLiteral("a") ++ << (qint64(INT_MAX) + 1) ++ << (qint64(INT_MAX) + 1) ++ << QQuickText::StyledText; ++ QTest::newRow("large non-existent (rich text)") << QStringLiteral("a") ++ << qint64(0x7FFFFF) ++ << qint64(0x7FFFFF) ++ << QQuickText::RichText; ++ QTest::newRow("medium non-existent (rich text)") << QStringLiteral("a") ++ << qint64(0x10000) ++ << qint64(0x10000) ++ << QQuickText::RichText; ++ ++ // Additional out-of-bounds tests ++ QTest::newRow("out-of-bounds (styled text)") << QStringLiteral("images/starfish_2.png") ++ << (qint64(INT_MAX) + 1) ++ << (qint64(INT_MAX) + 1) ++ << QQuickText::StyledText; ++ QTest::newRow("out-of-bounds (rich text)") << QStringLiteral("images/starfish_2.png") ++ << (qint64(INT_MAX) + 1) ++ << (qint64(INT_MAX) + 1) ++ << QQuickText::RichText; ++ QTest::newRow("negative out-of-bounds (styled text)") << QStringLiteral("images/starfish_2.png") ++ << (qint64(INT_MIN) - 1) ++ << (qint64(INT_MIN) - 1) ++ << QQuickText::StyledText; ++ QTest::newRow("negative out-of-bounds (rich text)") << QStringLiteral("images/starfish_2.png") ++ << (qint64(INT_MIN) - 1) ++ << (qint64(INT_MIN) - 1) ++ << QQuickText::RichText; ++} ++ ++void tst_qquicktext::imgSize() ++{ ++ QFETCH(QString, url); ++ QFETCH(qint64, width); ++ QFETCH(qint64, height); ++ QFETCH(QQuickText::TextFormat, format); ++ ++ // Reusing imgTagsUpdates.qml here, since it is just an empty Text component ++ QScopedPointer window(createView(testFile("imgTagsUpdates.qml"))); ++ window->show(); ++ QVERIFY(QTest::qWaitForWindowExposed(window.data())); ++ ++ QScopedPointer myText(window->rootObject()->findChild("myText")); ++ QVERIFY(myText); ++ ++ myText->setTextFormat(format); ++ ++ QString imgStr = QStringLiteral("") ++ .arg(url) ++ .arg(width) ++ .arg(height); ++ myText->setText(imgStr); ++ ++ QVERIFY(QQuickTest::qWaitForPolish(myText.data())); ++} ++ + } + + void tst_qquicktext::fontSizeMode_data() +@@ -3482,6 +3581,7 @@ void tst_qquicktext::fontSizeMode() + qreal verticalFitHeight = myText->contentHeight(); + QVERIFY(myText->contentWidth() > myText->width()); + QVERIFY(verticalFitHeight <= myText->height() + 2); ++ + QVERIFY(verticalFitHeight > originalHeight); + + // Elide won't affect the height of a single line with VerticalFit but will crop the width. +-- +2.45.4 + diff --git a/SPECS/qtdeclarative/qtdeclarative.spec b/SPECS/qtdeclarative/qtdeclarative.spec index f511a4a042e..66d29fb9a58 100644 --- a/SPECS/qtdeclarative/qtdeclarative.spec +++ b/SPECS/qtdeclarative/qtdeclarative.spec @@ -1,7 +1,7 @@ Summary: Qt6 - QtDeclarative component Name: qtdeclarative Version: 6.6.1 -Release: 1%{?dist} +Release: 2%{?dist} Vendor: Microsoft Corporation Distribution: Azure Linux @@ -10,6 +10,7 @@ License: LGPLv2 with exceptions or GPLv3 with exceptions Url: http://www.qt.io %global majmin %(echo %{version} | cut -d. -f1-2) Source0: https://download.qt.io/archive/qt/%{majmin}/%{version}/submodules/qtdeclarative-everywhere-src-%{version}.tar.xz +Patch0: CVE-2025-12385.patch ## upstream patches @@ -50,7 +51,7 @@ Requires: %{name}%{?_isa} = %{version}-%{release} %{summary}. %prep -%autosetup -n qtdeclarative-everywhere-src-%{version} +%autosetup -p1 -n qtdeclarative-everywhere-src-%{version} %build # HACK so calls to "python" get what we want @@ -219,6 +220,9 @@ popd %{_qt_libdir}/libQt6QmlXmlListModel.prl %changelog +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 6.6.1-2 +- Patch for CVE-2025-12385 + * Tue Jan 02 2024 Sam Meluch - 6.6.1-1 - Upgrade to version 6.6.1 diff --git a/SPECS/rshim/rshim.signatures.json b/SPECS/rshim/rshim.signatures.json index 89bdfe8312e..180a0229f42 100644 --- a/SPECS/rshim/rshim.signatures.json +++ b/SPECS/rshim/rshim.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "rshim-2.1.5.tar.gz": "a7776c66696dd7d1ff2983cfa536673f987394083f9e4c8434fb95f5bea15e8b" + "rshim-2.4.4.tar.gz": "5dc11517e3c4d993db775564313279487db5290c148da645ca99b08125b8fcab" } } \ No newline at end of file diff --git a/SPECS/rshim/rshim.spec b/SPECS/rshim/rshim.spec index a954745b57e..34f5232af75 100644 --- a/SPECS/rshim/rshim.spec +++ b/SPECS/rshim/rshim.spec @@ -3,15 +3,17 @@ # Name: rshim -Version: 2.1.5 +Version: 2.4.4 Release: 1%{?dist} Summary: User-space driver for Mellanox BlueField SoC License: GPLv2 Vendor: Microsoft Corporation Distribution: Azure Linux URL: https://github.com/mellanox/rshim-user-space -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/rshim-2.1.5.tar.gz#/%{name}-%{version}.tar.gz -ExclusiveArch: x86_64 +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz BuildRequires: gcc, autoconf, automake, pkgconfig, make BuildRequires: pkgconfig(libpci), pkgconfig(libusb-1.0) fuse3-devel fuse3-libs @@ -97,6 +99,10 @@ fi %{_mandir}/man8/bf-reg.8.gz %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 2.4.4-1 +- Upgrade version to 2.4.4. +- Update source path + * Tue Dec 17 2024 Binu Jose Philip - Initial Azure Linux import from NVIDIA (license: GPLv2) - License verified diff --git a/SPECS/rsyslog/rsyslog.spec b/SPECS/rsyslog/rsyslog.spec index 88878ac568a..19114214a31 100644 --- a/SPECS/rsyslog/rsyslog.spec +++ b/SPECS/rsyslog/rsyslog.spec @@ -3,7 +3,7 @@ Summary: Rocket-fast system for log processing Name: rsyslog Version: 8.2308.0 -Release: 4%{?dist} +Release: 5%{?dist} License: GPLv3+ AND ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -204,6 +204,9 @@ fi %{_libdir}/rsyslog/omsnmp.so %changelog +* Tue Jan 06 2026 Pawel Winogrodzki - 8.2308.0-5 +- Bumping release to rebuild with new 'net-snmp' libs. + * Tue Feb 04 2025 Andrew Phelps - 8.2308.0-4 - Add patch to fix upstream issue #5158 diff --git a/SPECS/ruby/CVE-2025-61594.patch b/SPECS/ruby/CVE-2025-61594.patch new file mode 100644 index 00000000000..238570a95c0 --- /dev/null +++ b/SPECS/ruby/CVE-2025-61594.patch @@ -0,0 +1,170 @@ +From 612a8aa90e3d4c3eb2f7e6f24d52eac8fd440df3 Mon Sep 17 00:00:00 2001 +From: Hiroshi SHIBATA +Date: Sat, 12 Jul 2025 11:51:31 +0900 +Subject: [PATCH] Clear user info totally at setting any of authority info + +CVE-2025-61594: URI Credential Leakage Bypass over CVE-2025-27221 + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/ruby/uri/commit/20157e3e29b125ff41f1d9662e2e3b1d066f5902 +--- + lib/uri/generic.rb | 29 +++++++++++++++++++++-------- + test/uri/test_generic.rb | 15 ++++++++++----- + 2 files changed, 31 insertions(+), 13 deletions(-) + +diff --git a/lib/uri/generic.rb b/lib/uri/generic.rb +index 2c0a88d..c893ed1 100644 +--- a/lib/uri/generic.rb ++++ b/lib/uri/generic.rb +@@ -186,18 +186,18 @@ module URI + + if arg_check + self.scheme = scheme +- self.userinfo = userinfo + self.hostname = host + self.port = port ++ self.userinfo = userinfo + self.path = path + self.query = query + self.opaque = opaque + self.fragment = fragment + else + self.set_scheme(scheme) +- self.set_userinfo(userinfo) + self.set_host(host) + self.set_port(port) ++ self.set_userinfo(userinfo) + self.set_path(path) + self.query = query + self.set_opaque(opaque) +@@ -511,7 +511,7 @@ module URI + user, password = split_userinfo(user) + end + @user = user +- @password = password if password ++ @password = password + + [@user, @password] + end +@@ -522,7 +522,7 @@ module URI + # See also URI::Generic.user=. + # + def set_user(v) +- set_userinfo(v, @password) ++ set_userinfo(v, nil) + v + end + protected :set_user +@@ -574,6 +574,12 @@ module URI + @password + end + ++ # Returns the authority info (array of user, password, host and ++ # port), if any is set. Or returns +nil+. ++ def authority ++ return @user, @password, @host, @port if @user || @password || @host || @port ++ end ++ + # Returns the user component after URI decoding. + def decoded_user + URI.decode_uri_component(@user) if @user +@@ -615,6 +621,13 @@ module URI + end + protected :set_host + ++ # Protected setter for the authority info (+user+, +password+, +host+ ++ # and +port+). If +port+ is +nil+, +default_port+ will be set. ++ # ++ protected def set_authority(user, password, host, port = nil) ++ @user, @password, @host, @port = user, password, host, port || self.default_port ++ end ++ + # + # == Args + # +@@ -639,6 +652,7 @@ module URI + def host=(v) + check_host(v) + set_host(v) ++ set_userinfo(nil) + v + end + +@@ -729,6 +743,7 @@ module URI + def port=(v) + check_port(v) + set_port(v) ++ set_userinfo(nil) + port + end + +@@ -1121,7 +1136,7 @@ module URI + + base = self.dup + +- authority = rel.userinfo || rel.host || rel.port ++ authority = rel.authority + + # RFC2396, Section 5.2, 2) + if (rel.path.nil? || rel.path.empty?) && !authority && !rel.query +@@ -1134,9 +1149,7 @@ module URI + + # RFC2396, Section 5.2, 4) + if authority +- base.set_userinfo(rel.userinfo) +- base.set_host(rel.host) +- base.set_port(rel.port || base.default_port) ++ base.set_authority(*authority) + base.set_path(rel.path) + elsif base.path && rel.path + base.set_path(merge_path(base.path, rel.path)) +diff --git a/test/uri/test_generic.rb b/test/uri/test_generic.rb +index 1a70dd4..dc41b54 100644 +--- a/test/uri/test_generic.rb ++++ b/test/uri/test_generic.rb +@@ -272,6 +272,9 @@ class URI::TestGeneric < Test::Unit::TestCase + u0 = URI.parse('http://new.example.org/path') + u1 = u.merge('//new.example.org/path') + assert_equal(u0, u1) ++ u0 = URI.parse('http://other@example.net') ++ u1 = u.merge('//other@example.net') ++ assert_equal(u0, u1) + end + + def test_route +@@ -737,17 +740,18 @@ class URI::TestGeneric < Test::Unit::TestCase + def test_set_component + uri = URI.parse('http://foo:bar@baz') + assert_equal('oof', uri.user = 'oof') +- assert_equal('http://oof:bar@baz', uri.to_s) ++ assert_equal('http://oof@baz', uri.to_s) + assert_equal('rab', uri.password = 'rab') + assert_equal('http://oof:rab@baz', uri.to_s) + assert_equal('foo', uri.userinfo = 'foo') +- assert_equal('http://foo:rab@baz', uri.to_s) ++ assert_equal('http://foo@baz', uri.to_s) + assert_equal(['foo', 'bar'], uri.userinfo = ['foo', 'bar']) + assert_equal('http://foo:bar@baz', uri.to_s) + assert_equal(['foo'], uri.userinfo = ['foo']) +- assert_equal('http://foo:bar@baz', uri.to_s) ++ assert_equal('http://foo@baz', uri.to_s) + assert_equal('zab', uri.host = 'zab') +- assert_equal('http://foo:bar@zab', uri.to_s) ++ assert_equal('http://zab', uri.to_s) ++ uri.userinfo = ['foo', 'bar'] + uri.port = "" + assert_nil(uri.port) + uri.port = "80" +@@ -757,7 +761,8 @@ class URI::TestGeneric < Test::Unit::TestCase + uri.port = " 080 " + assert_equal(80, uri.port) + assert_equal(8080, uri.port = 8080) +- assert_equal('http://foo:bar@zab:8080', uri.to_s) ++ assert_equal('http://zab:8080', uri.to_s) ++ uri = URI.parse('http://foo:bar@zab:8080') + assert_equal('/', uri.path = '/') + assert_equal('http://foo:bar@zab:8080/', uri.to_s) + assert_equal('a=1', uri.query = 'a=1') +-- +2.45.4 + diff --git a/SPECS/ruby/ruby.spec b/SPECS/ruby/ruby.spec index 27c6fa92a47..f7f9261d42b 100644 --- a/SPECS/ruby/ruby.spec +++ b/SPECS/ruby/ruby.spec @@ -87,7 +87,7 @@ Name: ruby # provides should be versioned according to the ruby version. # More info: https://stdgems.org/ Version: %{ruby_version} -Release: 6%{?dist} +Release: 7%{?dist} License: (Ruby OR BSD) AND Public Domain AND MIT AND CC0 AND zlib AND UCD Vendor: Microsoft Corporation Distribution: Azure Linux @@ -112,6 +112,7 @@ Patch5: CVE-2025-27220.patch Patch6: CVE-2025-27221.patch Patch7: CVE-2025-6442.patch Patch8: CVE-2025-24294.patch +Patch9: CVE-2025-61594.patch BuildRequires: openssl-devel # Pkgconfig(yaml-0.1) is needed to build the 'psych' gem. BuildRequires: pkgconfig(yaml-0.1) @@ -416,6 +417,9 @@ sudo -u test make test TESTS="-v" %{_rpmconfigdir}/rubygems.con %changelog +* Mon Jan 05 2026 Azure Linux Security Servicing Account - 3.3.5-7 +- Patch for CVE-2025-61594 + * Fri Oct 17 2025 BinduSri Adabala - 3.3.5-6 - Bump release to build with new rubygem-rexml to fix CVE-2025-58767 diff --git a/SPECS/rust/CVE-2025-4574.patch b/SPECS/rust/CVE-2025-4574.patch index 39d9ea6143a..76e51c9e97f 100644 --- a/SPECS/rust/CVE-2025-4574.patch +++ b/SPECS/rust/CVE-2025-4574.patch @@ -1,46 +1,22 @@ -From 599103c7aeae04fe9fd25dd4b7254c4cf456693c Mon Sep 17 00:00:00 2001 -From: akhila-guruju -Date: Mon, 2 Jun 2025 09:39:09 +0000 -Subject: [PATCH] Address CVE-2025-4574 - +From c8035d278308c0043512b50634b583d26f9be9dd Mon Sep 17 00:00:00 2001 +From: Kavya Sree Kaitepalli +Date: Tue, 28 Oct 2025 11:51:40 +0000 +Subject: [PATCH] CVE-2025-4574 Upstream Patch reference: https://github.com/crossbeam-rs/crossbeam/commit/6ec74ecae896df5fc239518b45a1bfd258c9db68 - --- - vendor/crossbeam-channel-0.5.13/.cargo-checksum.json | 2 +- - vendor/crossbeam-channel-0.5.13/src/flavors/list.rs | 2 +- vendor/crossbeam-channel-0.5.14/.cargo-checksum.json | 2 +- vendor/crossbeam-channel-0.5.14/src/flavors/list.rs | 2 +- - 4 files changed, 4 insertions(+), 4 deletions(-) + 2 files changed, 2 insertions(+), 2 deletions(-) -diff --git a/vendor/crossbeam-channel-0.5.13/.cargo-checksum.json b/vendor/crossbeam-channel-0.5.13/.cargo-checksum.json -index 6784db9eb..b0731762f 100644 ---- a/vendor/crossbeam-channel-0.5.13/.cargo-checksum.json -+++ b/vendor/crossbeam-channel-0.5.13/.cargo-checksum.json -@@ -1 +1 @@ --{"files":{"CHANGELOG.md":"6b520b783f5e0c17c6caa975defb9ed6e0ae1254a6a41a9bcd03d249bc942289","Cargo.lock":"605ed4a922e22b42c8a7b75624dfd55d6f0bc96bf76bbf016b003a2c44ddc29a","Cargo.toml":"0f7a8020ede552c5370c101973e8b77cdf5ce6d41f4b6f7b1420b97491fd1e24","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"477cc2b7bac7502fd2459288a58cc76f015b1ec8e87b853cda77ccb1808c6334","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"280f55b51cefe9351a52c8d2186de368b688ad06885d083efe7e831726846520","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"7aa8addb82427141b0a4afa16fa4d23a02becab115a0a5a6d6d327728fd0672f","src/select_macro.rs":"522cfc8155825c1f260922c17ea6ef8ae672cf94863750c1a6115db2cbc9fc18","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"3d1a4ae23bb6b4767242b8109a8efda26f1d3b28c0f90da3368f8eb9ca0eee37","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"4d6d52ad48f385c5b8f5023a590e00e7a4b632e80bd929b6fc89a53f5faee515","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"} -\ No newline at end of file -+{"files":{"CHANGELOG.md":"6b520b783f5e0c17c6caa975defb9ed6e0ae1254a6a41a9bcd03d249bc942289","Cargo.lock":"605ed4a922e22b42c8a7b75624dfd55d6f0bc96bf76bbf016b003a2c44ddc29a","Cargo.toml":"0f7a8020ede552c5370c101973e8b77cdf5ce6d41f4b6f7b1420b97491fd1e24","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"477cc2b7bac7502fd2459288a58cc76f015b1ec8e87b853cda77ccb1808c6334","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"048e31bda49b8d2b7bdbe36cae07065745c69990b6adf73d283b52543429baad","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"7aa8addb82427141b0a4afa16fa4d23a02becab115a0a5a6d6d327728fd0672f","src/select_macro.rs":"522cfc8155825c1f260922c17ea6ef8ae672cf94863750c1a6115db2cbc9fc18","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"3d1a4ae23bb6b4767242b8109a8efda26f1d3b28c0f90da3368f8eb9ca0eee37","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"4d6d52ad48f385c5b8f5023a590e00e7a4b632e80bd929b6fc89a53f5faee515","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"} -diff --git a/vendor/crossbeam-channel-0.5.13/src/flavors/list.rs b/vendor/crossbeam-channel-0.5.13/src/flavors/list.rs -index e7fb6150f..bad76e858 100644 ---- a/vendor/crossbeam-channel-0.5.13/src/flavors/list.rs -+++ b/vendor/crossbeam-channel-0.5.13/src/flavors/list.rs -@@ -596,7 +596,7 @@ impl Channel { - // In that case, just wait until it gets initialized. - while block.is_null() { - backoff.snooze(); -- block = self.head.block.load(Ordering::Acquire); -+ block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel); - } - } - diff --git a/vendor/crossbeam-channel-0.5.14/.cargo-checksum.json b/vendor/crossbeam-channel-0.5.14/.cargo-checksum.json -index b6da487d3..5cba471f5 100644 +index 6de68d6be..a8cb473bf 100644 --- a/vendor/crossbeam-channel-0.5.14/.cargo-checksum.json +++ b/vendor/crossbeam-channel-0.5.14/.cargo-checksum.json @@ -1 +1 @@ --{"files":{"CHANGELOG.md":"4a7e4bc790fa3e9acb9577c489964690aa3a9ef549571fefd9e15362022901c2","Cargo.lock":"a4cbda8f2355ee7e9543e1eb01fb67173c079ae0337146c12fa577a4df81fa83","Cargo.toml":"a61aa427c7e7b3d318db6130cb49e4d1a0a2677853a3f9b6774c0cba93106cf8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"0c5f278572d3db33ed3dfba45f62c8db372c9153db0695a5cdecf700c2ba73a5","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"a1269b2a9b83e688cbd4ba2f06f6ce02763ca5dcb3ed27214d0dc64a97de30f6","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"301c765751586204371bedb69162e23bcf7e094cbc37b72203698a18b889550f","src/select_macro.rs":"f30b726dff104b17c2dfbd67b271758d8c06d63ec4811ffab88b2e1dac43e3df","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"e71d34f790af290e463707c2336ff221f7841767e961b91747aa00e21df0ad32","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"e83bd33b34c47d703abe06420a23868809468516943347bdbfb6af4db0cec65a","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"} +-{"files":{".cargo_vcs_info.json":"3c8d4a7c01150c97d75f3c05bc1cfcb551350a61a52c9dbfa3e8faac9e6aeb21","CHANGELOG.md":"4a7e4bc790fa3e9acb9577c489964690aa3a9ef549571fefd9e15362022901c2","Cargo.lock":"a4cbda8f2355ee7e9543e1eb01fb67173c079ae0337146c12fa577a4df81fa83","Cargo.toml":"a61aa427c7e7b3d318db6130cb49e4d1a0a2677853a3f9b6774c0cba93106cf8","Cargo.toml.orig":"94298c51c4c10bc8462f17e3010bbf0d52af1c25278ed0fa1f5bc6832a972cb5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"0c5f278572d3db33ed3dfba45f62c8db372c9153db0695a5cdecf700c2ba73a5","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"a1269b2a9b83e688cbd4ba2f06f6ce02763ca5dcb3ed27214d0dc64a97de30f6","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"301c765751586204371bedb69162e23bcf7e094cbc37b72203698a18b889550f","src/select_macro.rs":"f30b726dff104b17c2dfbd67b271758d8c06d63ec4811ffab88b2e1dac43e3df","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"e71d34f790af290e463707c2336ff221f7841767e961b91747aa00e21df0ad32","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"e83bd33b34c47d703abe06420a23868809468516943347bdbfb6af4db0cec65a","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"} +\ No newline at end of file ++{"files":{".cargo_vcs_info.json":"3c8d4a7c01150c97d75f3c05bc1cfcb551350a61a52c9dbfa3e8faac9e6aeb21","CHANGELOG.md":"4a7e4bc790fa3e9acb9577c489964690aa3a9ef549571fefd9e15362022901c2","Cargo.lock":"a4cbda8f2355ee7e9543e1eb01fb67173c079ae0337146c12fa577a4df81fa83","Cargo.toml":"a61aa427c7e7b3d318db6130cb49e4d1a0a2677853a3f9b6774c0cba93106cf8","Cargo.toml.orig":"94298c51c4c10bc8462f17e3010bbf0d52af1c25278ed0fa1f5bc6832a972cb5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"0c5f278572d3db33ed3dfba45f62c8db372c9153db0695a5cdecf700c2ba73a5","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"03eda8e9e36022eb7f15b1d17e182efc56c8a1c4a7db5a60c0acd808012ceae8","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"301c765751586204371bedb69162e23bcf7e094cbc37b72203698a18b889550f","src/select_macro.rs":"f30b726dff104b17c2dfbd67b271758d8c06d63ec4811ffab88b2e1dac43e3df","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"e71d34f790af290e463707c2336ff221f7841767e961b91747aa00e21df0ad32","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"e83bd33b34c47d703abe06420a23868809468516943347bdbfb6af4db0cec65a","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"} \ No newline at end of file -+{"files":{"CHANGELOG.md":"4a7e4bc790fa3e9acb9577c489964690aa3a9ef549571fefd9e15362022901c2","Cargo.lock":"a4cbda8f2355ee7e9543e1eb01fb67173c079ae0337146c12fa577a4df81fa83","Cargo.toml":"a61aa427c7e7b3d318db6130cb49e4d1a0a2677853a3f9b6774c0cba93106cf8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"b16db96b93b1d7cf7bea533f572091ec6bca3234fbe0a83038be772ff391a44c","README.md":"5dfb91ebb498dec49948a440a53977109ec532388170e567c3c2a0339589aa4c","benches/crossbeam.rs":"96cb1abd23cac3ef8a7174a802e94609926b555bb02c9658c78723d433f1dd92","examples/fibonacci.rs":"4e88fa40048cdc31e9c7bb60347d46f92543d7ddf39cab3b52bfe44affdb6a02","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"d02121258f08d56f1eb7997e19bcb9bacb6836cfa0abbba90a9e59d8a50ae5cf","src/channel.rs":"13fbbe12d4ec361855af1c3587fc80aea5f537db8dc44dd4f66c9e2b4ae9f5c1","src/context.rs":"0c5f278572d3db33ed3dfba45f62c8db372c9153db0695a5cdecf700c2ba73a5","src/counter.rs":"b8f1e48ec634a7dab8e04c485209161587ecbbd2d57b0825467164d4554c6249","src/err.rs":"44cb2024ee6b0cd6fd24996430e53720769f64b4ac35016bc3e05cb9db48681d","src/flavors/array.rs":"79bc219187c9f40b156b9fe551c1176b66bf73e6d48905b23a2d74c6366a2205","src/flavors/at.rs":"04e07861534f2f7d5b5f884f2f5bc9c008427e6d0afa1c8ad401e1d7e54b57eb","src/flavors/list.rs":"03eda8e9e36022eb7f15b1d17e182efc56c8a1c4a7db5a60c0acd808012ceae8","src/flavors/mod.rs":"3d9d43bc38b0adb18c96c995c2bd3421d8e33ab6c30b20c3c467d21d48e485dc","src/flavors/never.rs":"747da857aa1a7601641f23f4930e6ad00ebaf50456d9be5c7aa270e2ecc24dcb","src/flavors/tick.rs":"0916ca3faef30b8cc591137701c456d5fc5b5b49cb1edad1e3a80d35bae222bb","src/flavors/zero.rs":"f9cbc9e035fadce808a4af86a223cfded89990ba1e9acfe731fb17a7fe12b432","src/lib.rs":"5b1c406fd1ce6140feae9000be361858da2aabe7fc9fffd0eafcb88020d2b268","src/select.rs":"301c765751586204371bedb69162e23bcf7e094cbc37b72203698a18b889550f","src/select_macro.rs":"f30b726dff104b17c2dfbd67b271758d8c06d63ec4811ffab88b2e1dac43e3df","src/utils.rs":"9bd81aeb385a81409a63f4b9edc35444c7fd1d2724725f9c34ad7ca39dd69a18","src/waker.rs":"017f87a120d945502701c0dba79062c7fe55d44e5907cc6f8605b4510c90d529","tests/after.rs":"0154a8e152880db17a20514ecdd49dabc361d3629858d119b9746b5e932c780c","tests/array.rs":"a57ae6264e676f573d7adb5c4b024994e98bc6811352516adb3444f880f7125e","tests/golang.rs":"7b2ef219ba8a21841c133512f3a540f8279a2458304e9bbed7da81d6091ecd82","tests/iter.rs":"25dc02135bbae9d47a30f9047661648e66bdc134e40ba78bc2fbacbb8b3819bc","tests/list.rs":"e71d34f790af290e463707c2336ff221f7841767e961b91747aa00e21df0ad32","tests/mpsc.rs":"5fbb5342fa7c9e4bcda5545255e0979dc6b9ba638edee127acf75372c18c925f","tests/never.rs":"ee40c4fc4dd5af4983fae8de6927f52b81174d222c162f745b26c4a6c7108e4f","tests/ready.rs":"4361352fa94254041e6c73e97b13be032c2d51c741f2a50519efe3000cf4dc28","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"101ea8afd9a40d24c2d2aec29e5f2fdc4faac51aa1d7c9fe077b364f12edd206","tests/select_macro.rs":"e83bd33b34c47d703abe06420a23868809468516943347bdbfb6af4db0cec65a","tests/thread_locals.rs":"f42fcddca959b3b44cd545b92949d65e33a54332b27f490ec92f9f29b7f8290c","tests/tick.rs":"5f697bd14c48505d932e82065b5302ef668e1cc19cac18e8ac22e0c83c221c1d","tests/zero.rs":"9c5af802d5efb2c711f8242b8905ed29cc2601e48dbd95e41c7e6fbfe2918398"},"package":"06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"} diff --git a/vendor/crossbeam-channel-0.5.14/src/flavors/list.rs b/vendor/crossbeam-channel-0.5.14/src/flavors/list.rs index 6c15991f9..8f1faaa8b 100644 --- a/vendor/crossbeam-channel-0.5.14/src/flavors/list.rs @@ -55,5 +31,5 @@ index 6c15991f9..8f1faaa8b 100644 } -- -2.45.2 +2.45.4 diff --git a/SPECS/rust/CVE-2025-53605.patch b/SPECS/rust/CVE-2025-53605.patch index bc13020a557..8d0bc44c7e9 100644 --- a/SPECS/rust/CVE-2025-53605.patch +++ b/SPECS/rust/CVE-2025-53605.patch @@ -1,8 +1,7 @@ -From f06992f46771c0a092593b9ebf7afd48740b3ed6 Mon Sep 17 00:00:00 2001 -From: esrauchg <140440793+esrauchg@users.noreply.github.com> -Date: Sun, 9 Mar 2025 17:23:01 -0400 -Subject: [PATCH] Apply depth limit to unknown groups (#756) - +From 2591e1c695bc67352524f20ea71a6546b333dfbd Mon Sep 17 00:00:00 2001 +From: Kavya Sree Kaitepalli +Date: Tue, 28 Oct 2025 12:06:28 +0000 +Subject: [PATCH] CVE-2025-53605 * Fix issue where a deeply nested unknown group could cause arbitrarily recursion depth. * Add drop(os) to fix tests @@ -20,13 +19,14 @@ Upstream Patch Reference: https://github.com/stepancheg/rust-protobuf/commit/f06 2 files changed, 60 insertions(+), 13 deletions(-) diff --git a/vendor/protobuf-3.7.1/.cargo-checksum.json b/vendor/protobuf-3.7.1/.cargo-checksum.json -index 4a85cefd8..305d4f167 100644 +index 1d8cd04f7..91c898c83 100644 --- a/vendor/protobuf-3.7.1/.cargo-checksum.json +++ b/vendor/protobuf-3.7.1/.cargo-checksum.json @@ -1 +1 @@ --{"files":{"Cargo.toml":"96cda32a56ae7f781b2142812f054e6f31435b30a2f0b2386346cbf277186920","LICENSE.txt":"7f2fa80a60e84f8dc0747abb0e42342f83bded04a20461a636b47c0331b92ddf","README.md":"14dbb3068c031afbd44010a4ff0c8146aa7e02f6051579177767f218fff9cd38","build.rs":"347d9aa6d4b102b6e83c65aeef89b1e1752536bd8ef80fc834a2c78d5cb2ff80","regenerate.sh":"df3bc8537c038fd356367e5af433c284dd5b76505c35f6f89deab0f099a3e3e0","src/byteorder.rs":"9e6b481be82182ac99ff9704468b4d64656fb922f90c54ca83a8d7ca1dfd2e3a","src/cached_size.rs":"895788d7356a1fcd0f2d7446996066f8c53a0f36032174a97273997e65440fa0","src/chars.rs":"816a0af0b830ecd43028e40295fe0bf9eb79263f00fee01678c75d4ac5b7e908","src/coded_input_stream/buf_read_iter.rs":"2cba31136e56dd63c9a17d1bf4627f430b2ed3ddb35abf4479f57bcb912cdb4e","src/coded_input_stream/buf_read_or_reader.rs":"2bf3befcfce8c97faae7563084a7d93931afc5b886419e491111c15b4139058e","src/coded_input_stream/input_buf.rs":"cf71e63d3aef90188c55b6d42aa7cad47bcba16b27e687e44a15bd45e65b8737","src/coded_input_stream/input_source.rs":"8ce41ec8d147d03adf2dbd27ae0fa0b6e33400b62a9c945ab7aa2224bf43a863","src/coded_input_stream/mod.rs":"ee6c11ddd3e224c6d91afe2574b3033525e9d164a15c8ec771cc8ea89de9ded6","src/coded_output_stream/buffer.rs":"cafbbb3f02df26c98a4e5556e99a5a2ce811ffea4c49ba982532a6d9a53ec7d8","src/coded_output_stream/mod.rs":"06289bfaa7971ef275b1017d8b5d4197b864fc881f83d0ed073a28fca894a0ba","src/coded_output_stream/output_target.rs":"ab28889b824b91688cb5c17cf79bdbec96aeeea59f22946b9f359b957cc40580","src/coded_output_stream/with.rs":"47541db9f4f51cacd406571f02d6abe8f4837413c7cecbe511fc0c24ef2384f2","src/descriptor.rs":"4b3f1a458d5e105c01c03671bce753b564fcddefabe36ab41f986ac070a33614","src/doctest_pb.rs":"74ffeba6811126ab8ed076e8d3debbb634f8f9bba3bd77f9c288e88d7937591a","src/enum_full.rs":"ca658951c42ef539ce6221d8f1b1e5005a1a14393460078c40611bb7091629d0","src/enum_or_unknown.rs":"3088b9d139f296284a44d4f9da9c75476dfa00168358328f46b1c52c73572521","src/enums.rs":"e0af03e21b48d3bb44d06a7971229e8e5ee6c8230323534f774f4874de3c9760","src/error.rs":"1839b319f456b56e2bb3c59f119f4c0bec53a02d52c92619b887bfdb1656183b","src/ext.rs":"b5c43e19ca56612e81560e520831da1746520be7944e506e44e07356b1415bbf","src/fixed.rs":"40b32b11dd53f0dc3de2c73f9003c6c0f762cf802e30e16beb5a22a18f8f2f50","src/lazy.rs":"b202a8cd252b11b013983f27c1ed5eac046674ed156a4e5e63357e15ead746df","src/lib.rs":"f22f0d3c3372cc68450071bb2abf8a1542d0f3348f5ec1133e9b785445494f56","src/message.rs":"a112b1d5c769d866a586a4c9af9245fa8029991563d2ff33c47e9d4c2b32fef7","src/message_dyn.rs":"c2d06029139a1ef09409227c0c501dd361b485ff1e4fcbf1d2b0cb579cd80fba","src/message_field.rs":"8456bcc3118a0f62d8eb3e5495be678ad75f5164e5dc67658667c7611e2099d9","src/message_full.rs":"4bbbb917a86aa3b14f63e93db41522c78223036b6e7d219769927059ff70205e","src/misc.rs":"1679b6f8b5c2b4375e71835fb7ca83a4de6db498f092abf5ab3a9f5eaccf0d5a","src/oneof.rs":"de5a694a56931308fc1a790678aaaf8b67d7c6b56c9f7b2fde36a14312863e05","src/oneof_full.rs":"b9d5d95db115b1ebf6e7c222373008d4f9f86e21347ddf50ef23f8cd11b8f777","src/owning_ref.rs":"1face1307d85ef98f5b9752eb45de47884c3ce68d31cec315ebfac6559ab501f","src/plugin.rs":"5bbb2aaecc59c590755e0fe972c4f231d506bbc1893d5f3e800d2e69ce805ec0","src/reflect/acc/mod.rs":"23500dd605f11f8def7d0f858a00cf1c85a7b95c293bc125ba0804a949b35162","src/reflect/acc/v2/map.rs":"46dab64d941e61fd61aa3794b2fab60bbd588a21ca9f1a378cdc022bbdfb60a1","src/reflect/acc/v2/mod.rs":"86639cfa45e3b2d08020c289001d87910fa972e9fb6a28a38880ccee51002a0c","src/reflect/acc/v2/repeated.rs":"07b62beb3bb81d1fa1de486c7cdce20ae2f4f46c2e93ed6f104b41d3a3a5beba","src/reflect/acc/v2/singular/mod.rs":"85bace3cf99fe0b05dce61bf19433077cf29506c6641b001935064fd37ab658f","src/reflect/acc/v2/singular/oneof.rs":"f70db73a0316185b8ae43b82cd29665d1140d920e7d28bb789a438eb06f9c62a","src/reflect/dynamic/map.rs":"565376a2db71cf607cb692b984acb41d16dfb04df59c9ad0eca8ba1fc85017cc","src/reflect/dynamic/mod.rs":"3ee7a82dbd5410d10eee44fdf3ae8b5f198185d7ff4a608f10a668eba6af3a73","src/reflect/dynamic/optional.rs":"db923e3d343f869c2bf4f157559686251ff9744dfd0560ba1d1b1b46ae1b81fd","src/reflect/dynamic/repeated.rs":"61a7c698b59280564a1deb7200884875a8844120058c3d69ea4d6aa5f6c4266e","src/reflect/enums/generated.rs":"44e5dbe08a1a15067744580c87c6d09f66dc364f2791fc1ecab919e1dacdec09","src/reflect/enums/mod.rs":"aed1b29e4e42f34b26476a6013e64b7ec8876cfa53633583a751c344fd3ab34c","src/reflect/error.rs":"532a9c117606e8597a40d60b3efebc9371c4f746919cc611aaaddf105cbb3608","src/reflect/field/dynamic.rs":"8e81f7b6f684ed58287eb2de20f3abb8dabb062601715421d3d1e4c041101602","src/reflect/field/index.rs":"4aeef75560e52bf865718f9323fc5b2b2318a6e4bb66fadc57f4875999cf15b3","src/reflect/field/mod.rs":"6c646e59e64e327a961e680f5b189cdb1d540b61b26cd345d623456a4f764a22","src/reflect/field/protobuf_field_type.rs":"6ec9ca95c25a9c9fe70fad51b1221676e9d3d5a6daac37d5633379471d8c4257","src/reflect/field/runtime_field_type.rs":"26166bb9d48caa0287dfe6235937e5bd647692ca75e8ee4855d0e198f9a79d73","src/reflect/file/building.rs":"53806efda5872c32f63f55582b767be8becff1d7bfb4ed0c11947c912ad55a75","src/reflect/file/dynamic.rs":"3e26271c31816dae043eb70c9990a6fc621c9b5241a555d968f731dfd4762489","src/reflect/file/fds.rs":"9a369eaea05a000710bf977cce28d3fad1463f8ffa42df35e4f5ac5de306f9e6","src/reflect/file/generated.rs":"88f3d88ddbcfa01812398ddc5c350c54cc42b15d99223a642574f55d4d6cdf69","src/reflect/file/index.rs":"3d53af11b39b164b488e3850c3a5be5ae4628c172b4475e4ae5b993225bdeae9","src/reflect/file/mod.rs":"b7aa1c4154677630b843c81d35c60f1374d00d44d3477e6e899e18cb7ae97db1","src/reflect/file/syntax.rs":"8d6d6c3a7bbf9217df3550410a9ba1eb9c08295aa410cc5d2e65efe1eec3ca3a","src/reflect/find_message_or_enum.rs":"e8b10159819cce4414da7681cb3ce0b4e62a45adf4e3e7933a3c1b4f8e97cfb8","src/reflect/map/empty.rs":"230cbcda25bfd3c6f348043eef032252b8a0d86a0c71d93d6206adc59d688732","src/reflect/map/generated.rs":"f1b332e97d267c3272b26be03bee80fe9420bb6fc203ae6f3f9dd3044d564778","src/reflect/map/mod.rs":"7648fa897f4a8acf1ab48b8bba8f165cb4b09a46125e645d600a7b9ced55e1a2","src/reflect/message/generated.rs":"c76f5e887534bc9648dd105718f79bb93465549d57b25c4a00957e603749721c","src/reflect/message/is_initialized_is_always_true.rs":"af716e9d0ce233fda9c7dee13814c24c188ea195cf907d81f74fb198ef2760ae","src/reflect/message/message_ref.rs":"80472f804a4dd3b91f6fec4451639ca356f2b33c502775e0fd6b2c3bfbe1be0a","src/reflect/message/mod.rs":"5ef7f5ecdc2de7c0789b8558711a976e2376fcaae67975a10d9f1bd4179703e5","src/reflect/mod.rs":"620cab65b696a13144ed54d589ca8c4176ecb8127b2ba2a294806f649c0fbd9f","src/reflect/name.rs":"0377dcf871ca5add5e168a3bff04d9f01fe5970db4dfb66272def6484dc7d54b","src/reflect/oneof/generated.rs":"c02b7cd7415f52366f6092559643869812db842bd1c383ce7d8759e519ab453a","src/reflect/oneof/mod.rs":"55c906888e89a7bfd1f8865cced5078905b512f3ce9af20d16614fdf5791c31d","src/reflect/optional/mod.rs":"5dada97750209aeddf1506eea0a59d709aeb3e44bae5443214e0c2950c870952","src/reflect/protobuf_type_box.rs":"5ed50bdefa5eebe8bf0547cb37def38d814653ac7a0d401eb4f05b8a72ebf509","src/reflect/reflect_eq.rs":"1352d0be16ff7dc2089d499b3fbcf40d501406e8648649092aa2cb21f207aac0","src/reflect/repeated/drain_iter.rs":"0f065e5ef884ee90189594b8a92d814c40a4b3ff80ed659f2f8a0ac56795011d","src/reflect/repeated/iter.rs":"f7f7bf56347850f567612feab9542c4658f251ce74b7b8fb7ed6612cb85584f0","src/reflect/repeated/mod.rs":"6084482af4936340e1bfd43ff8d06351c3d0316f26cb9f8b73bd8c16f3e9df98","src/reflect/repeated/transmute.rs":"ecd5b5b2772670b030a6d96b47b54bf8500ec0996920ef0db7d5f4b6f338c493","src/reflect/repeated/vec_downcast.rs":"7f4c2997d313f45bc46a097fad7c579d663c642cba425a7851f590282d58309d","src/reflect/rt/mod.rs":"4f0266be9bd092a6ee49a1f3453ff08eabfcebb65473b6c8552a260ac7a4817b","src/reflect/rt/v2.rs":"3faa866b4aa766875062071eb6db36c7c42a3d9145f66162a85aac91e200e354","src/reflect/runtime_type_box.rs":"6d8988ed25218f13da61db5dbbefa621df3fd7f1141669854c6ec9571b3eee6c","src/reflect/runtime_types.rs":"07b8eeac30f666c890ccac14c5076b77d010abf322b8f23883032e2ad003476e","src/reflect/service/index.rs":"4a41f90b6c8b3f5c8349075aec84fcbb90ab3028451d2ba40cb83257ff4d90c7","src/reflect/service/mod.rs":"1d0b5b3d9cd1968731971137ca320a91591ee9ca45828d3e4284da87397044f6","src/reflect/type_dynamic.rs":"76c9e764978c66444a4ffb5b558cbce241d1e1123c5dd6eb250f56b48b7b0a5c","src/reflect/types.rs":"fb6a18354a7a8fa7dc6a4db51793af8a5c41680bc49c1d157145a21a75f5f3e4","src/reflect/value/mod.rs":"56f7ff8c4541108fff20f83f7f12ef173ce398e642b482dc3a4cf92c9e1cea17","src/reflect/value/value_box.rs":"1037d01c52a4f0432e42a2c023f5c68ed458ed60b196597ca78f81b6207ecb83","src/reflect/value/value_ref.rs":"7a3490eb4918ee725ad59219b0fc5810b231eaf2ddf798ab75085b4acc145b2e","src/rt/map.rs":"c4bd4246181a43dc9cf1735ec5882955af595fba8ef839a06e0e1df399848520","src/rt/message.rs":"c9b9b3b8f25b6813b8ca2411f015ae80b2abba377d44f9f9b9c05cb45366229a","src/rt/mod.rs":"db610d871d8fb022ba4634199896534ecb3d6ad22c7e2cabbf4d7ad79e1c8c66","src/rt/packed.rs":"be2fae85812c39d815bcb0463e3ea67774770c25014b764b8712dd2b90d360c6","src/rt/repeated.rs":"213d08349efb21bc02fb5abd1d1c3f039ae1d4368f53f548cdf1999897c60f1c","src/rt/singular.rs":"2c982de7a686a8d0c430ce690297a524e892a70bca33d288c6e9b912d19e994c","src/rt/unknown_or_group.rs":"a0bf9af0bdb6ee4261bdc9d5136288e3d17f7de611b7f61943caf6d8eb38367d","src/rustproto.rs":"4a49fac5c9caaca991dd5505c154941e8f94708c254269119e64cf053f7aaea9","src/special.rs":"2f64cfbb0659249cf4a951cefb51b1a17ddf85785eb868b68af7546cd31a5101","src/text_format/mod.rs":"da0aeb839963afcba1923b68a06264185a927cef3f3075ca958c11fa1e780535","src/text_format/parse.rs":"c7be3464fa8f6624ed2001b450e999f93bea5f6118132b7f372110c7af5e5e71","src/text_format/print.rs":"7bd28696ce2a98f9520e2303b0f70fe1d46b045d550f55064a3b524b58f9dfab","src/timestamp.rs":"f0590e19fd7740bdc65a0dc6f9d73bf55559638515ca7849481022288a9bee43","src/unknown.rs":"fd6091ad04dadbde5793ea42af50fa51cf2e7737696029a9e0d1f001f0c1423d","src/varint/decode.rs":"5e9fdf9fb5fe82ddc223feaf5867c0a081bd67731635f88cb9a5b1faeeb79f82","src/varint/encode.rs":"bc0969a152aff774976216f9f2bdbc273a24da07d57b8e3ec96ebe691d4559c1","src/varint/generic.rs":"98e31da80c278cff672ddc8231241cc789ad6de138fa6ca6c0483ff1783f4957","src/varint/mod.rs":"643b5b2443b4e103fc4eeac7844dcda4b9c6b2bab3cfe9fba00145ccea9a4505","src/well_known_types/any.rs":"7db9c4f0df3f0809821e09bb0bd2ddaa07ff4471be005fc02f2be9828a1aedd1","src/well_known_types/api.rs":"80bf5fe39c7263a440d5c1bec8bb6c5a0dd274f73c3f702c4e223cfdf02f74eb","src/well_known_types/duration.rs":"33c4039d594eb8df4a35f1bae1ad2a5dc36a5bf167369d99faf480cc7e1cb284","src/well_known_types/empty.rs":"47f56d10483e9c6c3e276e54d877e70aaf3b2a57c269a636dd9948d0e4ff419f","src/well_known_types/field_mask.rs":"7b4d883c03ec89d83b919271d03273def230c30caae36b7247cba1b325ccc551","src/well_known_types/mod.rs":"b141483834c860b221d0946a584389ebcefc2c5f7692ce1f95869c9f83ff2c16","src/well_known_types/source_context.rs":"fbec3ec4e1e59be865d0b7cb4d3b08aa197b46ca27fc3d90ed7da30514df6355","src/well_known_types/struct_.rs":"00bfebd64c851a7e0b0b26d3fc1319fd072975cb84169066b5aa00a4871ac6c8","src/well_known_types/timestamp.rs":"bc8b3a27f7b1ec134aa5a9d1187b63e02d5d2e72b153a9b6153e0b7a078c003e","src/well_known_types/type_.rs":"789fa7e0ec2fe7fc5f68a06636ade107fc305780b597c7c9687dbe3560252514","src/well_known_types/wrappers.rs":"56cbbf290be81ce7d62fd33b883015ef3de2abc1d5f8c683e38e96397f1d056d","src/well_known_types_util/any.rs":"2b2e5cdf1d413bc13485bfc78c84d8403168d6b1a6dbc10d585bf10326120c81","src/well_known_types_util/duration.rs":"e0d9de89f8c7c4b2075f23c2a4451dfec4ae1f28c9784ea39a626a8c3aa9e005","src/well_known_types_util/mod.rs":"81fb1c0721602ffe91c4587f727457b59c8697863e3f853cd9569db5cee973e9","src/well_known_types_util/timestamp.rs":"f55906fef3190fa1786ed736ded16f3ac6de2095cb974af5a476c2a2f91260b3","src/wire_format.rs":"f1d09b0bd1e4c5e4072b5c943e749f7b727737bd08a6d82f81d4f2a60e2ab94e","src/zigzag.rs":"0dcbdf54d4bc8141fdc64d074e6f6f7633bbb66cc782cd4bd6d343ce0569c3de"},"package":"a3a7c64d9bf75b1b8d981124c14c179074e8caa7dfe7b6a12e6222ddcd0c8f72"} +-{"files":{".cargo_vcs_info.json":"f0fe98431b6d748bab808544c162886f8f7457a57f571e43be4c9276c69bd823","Cargo.toml":"96cda32a56ae7f781b2142812f054e6f31435b30a2f0b2386346cbf277186920","Cargo.toml.orig":"d5de446b2f66bab383f5d81372e74c205bc4a2f11783f2acca9c5c9eef2b06f8","LICENSE.txt":"7f2fa80a60e84f8dc0747abb0e42342f83bded04a20461a636b47c0331b92ddf","README.md":"14dbb3068c031afbd44010a4ff0c8146aa7e02f6051579177767f218fff9cd38","build.rs":"347d9aa6d4b102b6e83c65aeef89b1e1752536bd8ef80fc834a2c78d5cb2ff80","regenerate.sh":"df3bc8537c038fd356367e5af433c284dd5b76505c35f6f89deab0f099a3e3e0","src/byteorder.rs":"9e6b481be82182ac99ff9704468b4d64656fb922f90c54ca83a8d7ca1dfd2e3a","src/cached_size.rs":"895788d7356a1fcd0f2d7446996066f8c53a0f36032174a97273997e65440fa0","src/chars.rs":"816a0af0b830ecd43028e40295fe0bf9eb79263f00fee01678c75d4ac5b7e908","src/coded_input_stream/buf_read_iter.rs":"2cba31136e56dd63c9a17d1bf4627f430b2ed3ddb35abf4479f57bcb912cdb4e","src/coded_input_stream/buf_read_or_reader.rs":"2bf3befcfce8c97faae7563084a7d93931afc5b886419e491111c15b4139058e","src/coded_input_stream/input_buf.rs":"cf71e63d3aef90188c55b6d42aa7cad47bcba16b27e687e44a15bd45e65b8737","src/coded_input_stream/input_source.rs":"8ce41ec8d147d03adf2dbd27ae0fa0b6e33400b62a9c945ab7aa2224bf43a863","src/coded_input_stream/mod.rs":"ee6c11ddd3e224c6d91afe2574b3033525e9d164a15c8ec771cc8ea89de9ded6","src/coded_output_stream/buffer.rs":"cafbbb3f02df26c98a4e5556e99a5a2ce811ffea4c49ba982532a6d9a53ec7d8","src/coded_output_stream/mod.rs":"06289bfaa7971ef275b1017d8b5d4197b864fc881f83d0ed073a28fca894a0ba","src/coded_output_stream/output_target.rs":"ab28889b824b91688cb5c17cf79bdbec96aeeea59f22946b9f359b957cc40580","src/coded_output_stream/with.rs":"47541db9f4f51cacd406571f02d6abe8f4837413c7cecbe511fc0c24ef2384f2","src/descriptor.rs":"4b3f1a458d5e105c01c03671bce753b564fcddefabe36ab41f986ac070a33614","src/doctest_pb.rs":"74ffeba6811126ab8ed076e8d3debbb634f8f9bba3bd77f9c288e88d7937591a","src/enum_full.rs":"ca658951c42ef539ce6221d8f1b1e5005a1a14393460078c40611bb7091629d0","src/enum_or_unknown.rs":"3088b9d139f296284a44d4f9da9c75476dfa00168358328f46b1c52c73572521","src/enums.rs":"e0af03e21b48d3bb44d06a7971229e8e5ee6c8230323534f774f4874de3c9760","src/error.rs":"1839b319f456b56e2bb3c59f119f4c0bec53a02d52c92619b887bfdb1656183b","src/ext.rs":"b5c43e19ca56612e81560e520831da1746520be7944e506e44e07356b1415bbf","src/fixed.rs":"40b32b11dd53f0dc3de2c73f9003c6c0f762cf802e30e16beb5a22a18f8f2f50","src/lazy.rs":"b202a8cd252b11b013983f27c1ed5eac046674ed156a4e5e63357e15ead746df","src/lib.rs":"f22f0d3c3372cc68450071bb2abf8a1542d0f3348f5ec1133e9b785445494f56","src/message.rs":"a112b1d5c769d866a586a4c9af9245fa8029991563d2ff33c47e9d4c2b32fef7","src/message_dyn.rs":"c2d06029139a1ef09409227c0c501dd361b485ff1e4fcbf1d2b0cb579cd80fba","src/message_field.rs":"8456bcc3118a0f62d8eb3e5495be678ad75f5164e5dc67658667c7611e2099d9","src/message_full.rs":"4bbbb917a86aa3b14f63e93db41522c78223036b6e7d219769927059ff70205e","src/misc.rs":"1679b6f8b5c2b4375e71835fb7ca83a4de6db498f092abf5ab3a9f5eaccf0d5a","src/oneof.rs":"de5a694a56931308fc1a790678aaaf8b67d7c6b56c9f7b2fde36a14312863e05","src/oneof_full.rs":"b9d5d95db115b1ebf6e7c222373008d4f9f86e21347ddf50ef23f8cd11b8f777","src/owning_ref.rs":"1face1307d85ef98f5b9752eb45de47884c3ce68d31cec315ebfac6559ab501f","src/plugin.rs":"5bbb2aaecc59c590755e0fe972c4f231d506bbc1893d5f3e800d2e69ce805ec0","src/reflect/acc/mod.rs":"23500dd605f11f8def7d0f858a00cf1c85a7b95c293bc125ba0804a949b35162","src/reflect/acc/v2/map.rs":"46dab64d941e61fd61aa3794b2fab60bbd588a21ca9f1a378cdc022bbdfb60a1","src/reflect/acc/v2/mod.rs":"86639cfa45e3b2d08020c289001d87910fa972e9fb6a28a38880ccee51002a0c","src/reflect/acc/v2/repeated.rs":"07b62beb3bb81d1fa1de486c7cdce20ae2f4f46c2e93ed6f104b41d3a3a5beba","src/reflect/acc/v2/singular/mod.rs":"85bace3cf99fe0b05dce61bf19433077cf29506c6641b001935064fd37ab658f","src/reflect/acc/v2/singular/oneof.rs":"f70db73a0316185b8ae43b82cd29665d1140d920e7d28bb789a438eb06f9c62a","src/reflect/dynamic/map.rs":"565376a2db71cf607cb692b984acb41d16dfb04df59c9ad0eca8ba1fc85017cc","src/reflect/dynamic/mod.rs":"3ee7a82dbd5410d10eee44fdf3ae8b5f198185d7ff4a608f10a668eba6af3a73","src/reflect/dynamic/optional.rs":"db923e3d343f869c2bf4f157559686251ff9744dfd0560ba1d1b1b46ae1b81fd","src/reflect/dynamic/repeated.rs":"61a7c698b59280564a1deb7200884875a8844120058c3d69ea4d6aa5f6c4266e","src/reflect/enums/generated.rs":"44e5dbe08a1a15067744580c87c6d09f66dc364f2791fc1ecab919e1dacdec09","src/reflect/enums/mod.rs":"aed1b29e4e42f34b26476a6013e64b7ec8876cfa53633583a751c344fd3ab34c","src/reflect/error.rs":"532a9c117606e8597a40d60b3efebc9371c4f746919cc611aaaddf105cbb3608","src/reflect/field/dynamic.rs":"8e81f7b6f684ed58287eb2de20f3abb8dabb062601715421d3d1e4c041101602","src/reflect/field/index.rs":"4aeef75560e52bf865718f9323fc5b2b2318a6e4bb66fadc57f4875999cf15b3","src/reflect/field/mod.rs":"6c646e59e64e327a961e680f5b189cdb1d540b61b26cd345d623456a4f764a22","src/reflect/field/protobuf_field_type.rs":"6ec9ca95c25a9c9fe70fad51b1221676e9d3d5a6daac37d5633379471d8c4257","src/reflect/field/runtime_field_type.rs":"26166bb9d48caa0287dfe6235937e5bd647692ca75e8ee4855d0e198f9a79d73","src/reflect/file/building.rs":"53806efda5872c32f63f55582b767be8becff1d7bfb4ed0c11947c912ad55a75","src/reflect/file/dynamic.rs":"3e26271c31816dae043eb70c9990a6fc621c9b5241a555d968f731dfd4762489","src/reflect/file/fds.rs":"9a369eaea05a000710bf977cce28d3fad1463f8ffa42df35e4f5ac5de306f9e6","src/reflect/file/generated.rs":"88f3d88ddbcfa01812398ddc5c350c54cc42b15d99223a642574f55d4d6cdf69","src/reflect/file/index.rs":"3d53af11b39b164b488e3850c3a5be5ae4628c172b4475e4ae5b993225bdeae9","src/reflect/file/mod.rs":"b7aa1c4154677630b843c81d35c60f1374d00d44d3477e6e899e18cb7ae97db1","src/reflect/file/syntax.rs":"8d6d6c3a7bbf9217df3550410a9ba1eb9c08295aa410cc5d2e65efe1eec3ca3a","src/reflect/find_message_or_enum.rs":"e8b10159819cce4414da7681cb3ce0b4e62a45adf4e3e7933a3c1b4f8e97cfb8","src/reflect/map/empty.rs":"230cbcda25bfd3c6f348043eef032252b8a0d86a0c71d93d6206adc59d688732","src/reflect/map/generated.rs":"f1b332e97d267c3272b26be03bee80fe9420bb6fc203ae6f3f9dd3044d564778","src/reflect/map/mod.rs":"7648fa897f4a8acf1ab48b8bba8f165cb4b09a46125e645d600a7b9ced55e1a2","src/reflect/message/generated.rs":"c76f5e887534bc9648dd105718f79bb93465549d57b25c4a00957e603749721c","src/reflect/message/is_initialized_is_always_true.rs":"af716e9d0ce233fda9c7dee13814c24c188ea195cf907d81f74fb198ef2760ae","src/reflect/message/message_ref.rs":"80472f804a4dd3b91f6fec4451639ca356f2b33c502775e0fd6b2c3bfbe1be0a","src/reflect/message/mod.rs":"5ef7f5ecdc2de7c0789b8558711a976e2376fcaae67975a10d9f1bd4179703e5","src/reflect/mod.rs":"620cab65b696a13144ed54d589ca8c4176ecb8127b2ba2a294806f649c0fbd9f","src/reflect/name.rs":"0377dcf871ca5add5e168a3bff04d9f01fe5970db4dfb66272def6484dc7d54b","src/reflect/oneof/generated.rs":"c02b7cd7415f52366f6092559643869812db842bd1c383ce7d8759e519ab453a","src/reflect/oneof/mod.rs":"55c906888e89a7bfd1f8865cced5078905b512f3ce9af20d16614fdf5791c31d","src/reflect/optional/mod.rs":"5dada97750209aeddf1506eea0a59d709aeb3e44bae5443214e0c2950c870952","src/reflect/protobuf_type_box.rs":"5ed50bdefa5eebe8bf0547cb37def38d814653ac7a0d401eb4f05b8a72ebf509","src/reflect/reflect_eq.rs":"1352d0be16ff7dc2089d499b3fbcf40d501406e8648649092aa2cb21f207aac0","src/reflect/repeated/drain_iter.rs":"0f065e5ef884ee90189594b8a92d814c40a4b3ff80ed659f2f8a0ac56795011d","src/reflect/repeated/iter.rs":"f7f7bf56347850f567612feab9542c4658f251ce74b7b8fb7ed6612cb85584f0","src/reflect/repeated/mod.rs":"6084482af4936340e1bfd43ff8d06351c3d0316f26cb9f8b73bd8c16f3e9df98","src/reflect/repeated/transmute.rs":"ecd5b5b2772670b030a6d96b47b54bf8500ec0996920ef0db7d5f4b6f338c493","src/reflect/repeated/vec_downcast.rs":"7f4c2997d313f45bc46a097fad7c579d663c642cba425a7851f590282d58309d","src/reflect/rt/mod.rs":"4f0266be9bd092a6ee49a1f3453ff08eabfcebb65473b6c8552a260ac7a4817b","src/reflect/rt/v2.rs":"3faa866b4aa766875062071eb6db36c7c42a3d9145f66162a85aac91e200e354","src/reflect/runtime_type_box.rs":"6d8988ed25218f13da61db5dbbefa621df3fd7f1141669854c6ec9571b3eee6c","src/reflect/runtime_types.rs":"07b8eeac30f666c890ccac14c5076b77d010abf322b8f23883032e2ad003476e","src/reflect/service/index.rs":"4a41f90b6c8b3f5c8349075aec84fcbb90ab3028451d2ba40cb83257ff4d90c7","src/reflect/service/mod.rs":"1d0b5b3d9cd1968731971137ca320a91591ee9ca45828d3e4284da87397044f6","src/reflect/type_dynamic.rs":"76c9e764978c66444a4ffb5b558cbce241d1e1123c5dd6eb250f56b48b7b0a5c","src/reflect/types.rs":"fb6a18354a7a8fa7dc6a4db51793af8a5c41680bc49c1d157145a21a75f5f3e4","src/reflect/value/mod.rs":"56f7ff8c4541108fff20f83f7f12ef173ce398e642b482dc3a4cf92c9e1cea17","src/reflect/value/value_box.rs":"1037d01c52a4f0432e42a2c023f5c68ed458ed60b196597ca78f81b6207ecb83","src/reflect/value/value_ref.rs":"7a3490eb4918ee725ad59219b0fc5810b231eaf2ddf798ab75085b4acc145b2e","src/rt/map.rs":"c4bd4246181a43dc9cf1735ec5882955af595fba8ef839a06e0e1df399848520","src/rt/message.rs":"c9b9b3b8f25b6813b8ca2411f015ae80b2abba377d44f9f9b9c05cb45366229a","src/rt/mod.rs":"db610d871d8fb022ba4634199896534ecb3d6ad22c7e2cabbf4d7ad79e1c8c66","src/rt/packed.rs":"be2fae85812c39d815bcb0463e3ea67774770c25014b764b8712dd2b90d360c6","src/rt/repeated.rs":"213d08349efb21bc02fb5abd1d1c3f039ae1d4368f53f548cdf1999897c60f1c","src/rt/singular.rs":"2c982de7a686a8d0c430ce690297a524e892a70bca33d288c6e9b912d19e994c","src/rt/unknown_or_group.rs":"a0bf9af0bdb6ee4261bdc9d5136288e3d17f7de611b7f61943caf6d8eb38367d","src/rustproto.rs":"4a49fac5c9caaca991dd5505c154941e8f94708c254269119e64cf053f7aaea9","src/special.rs":"2f64cfbb0659249cf4a951cefb51b1a17ddf85785eb868b68af7546cd31a5101","src/text_format/mod.rs":"da0aeb839963afcba1923b68a06264185a927cef3f3075ca958c11fa1e780535","src/text_format/parse.rs":"c7be3464fa8f6624ed2001b450e999f93bea5f6118132b7f372110c7af5e5e71","src/text_format/print.rs":"7bd28696ce2a98f9520e2303b0f70fe1d46b045d550f55064a3b524b58f9dfab","src/timestamp.rs":"f0590e19fd7740bdc65a0dc6f9d73bf55559638515ca7849481022288a9bee43","src/unknown.rs":"fd6091ad04dadbde5793ea42af50fa51cf2e7737696029a9e0d1f001f0c1423d","src/varint/decode.rs":"5e9fdf9fb5fe82ddc223feaf5867c0a081bd67731635f88cb9a5b1faeeb79f82","src/varint/encode.rs":"bc0969a152aff774976216f9f2bdbc273a24da07d57b8e3ec96ebe691d4559c1","src/varint/generic.rs":"98e31da80c278cff672ddc8231241cc789ad6de138fa6ca6c0483ff1783f4957","src/varint/mod.rs":"643b5b2443b4e103fc4eeac7844dcda4b9c6b2bab3cfe9fba00145ccea9a4505","src/well_known_types/any.rs":"7db9c4f0df3f0809821e09bb0bd2ddaa07ff4471be005fc02f2be9828a1aedd1","src/well_known_types/api.rs":"80bf5fe39c7263a440d5c1bec8bb6c5a0dd274f73c3f702c4e223cfdf02f74eb","src/well_known_types/duration.rs":"33c4039d594eb8df4a35f1bae1ad2a5dc36a5bf167369d99faf480cc7e1cb284","src/well_known_types/empty.rs":"47f56d10483e9c6c3e276e54d877e70aaf3b2a57c269a636dd9948d0e4ff419f","src/well_known_types/field_mask.rs":"7b4d883c03ec89d83b919271d03273def230c30caae36b7247cba1b325ccc551","src/well_known_types/mod.rs":"b141483834c860b221d0946a584389ebcefc2c5f7692ce1f95869c9f83ff2c16","src/well_known_types/source_context.rs":"fbec3ec4e1e59be865d0b7cb4d3b08aa197b46ca27fc3d90ed7da30514df6355","src/well_known_types/struct_.rs":"00bfebd64c851a7e0b0b26d3fc1319fd072975cb84169066b5aa00a4871ac6c8","src/well_known_types/timestamp.rs":"bc8b3a27f7b1ec134aa5a9d1187b63e02d5d2e72b153a9b6153e0b7a078c003e","src/well_known_types/type_.rs":"789fa7e0ec2fe7fc5f68a06636ade107fc305780b597c7c9687dbe3560252514","src/well_known_types/wrappers.rs":"56cbbf290be81ce7d62fd33b883015ef3de2abc1d5f8c683e38e96397f1d056d","src/well_known_types_util/any.rs":"2b2e5cdf1d413bc13485bfc78c84d8403168d6b1a6dbc10d585bf10326120c81","src/well_known_types_util/duration.rs":"e0d9de89f8c7c4b2075f23c2a4451dfec4ae1f28c9784ea39a626a8c3aa9e005","src/well_known_types_util/mod.rs":"81fb1c0721602ffe91c4587f727457b59c8697863e3f853cd9569db5cee973e9","src/well_known_types_util/timestamp.rs":"f55906fef3190fa1786ed736ded16f3ac6de2095cb974af5a476c2a2f91260b3","src/wire_format.rs":"f1d09b0bd1e4c5e4072b5c943e749f7b727737bd08a6d82f81d4f2a60e2ab94e","src/zigzag.rs":"0dcbdf54d4bc8141fdc64d074e6f6f7633bbb66cc782cd4bd6d343ce0569c3de"},"package":"a3a7c64d9bf75b1b8d981124c14c179074e8caa7dfe7b6a12e6222ddcd0c8f72"} +\ No newline at end of file ++{"files":{".cargo_vcs_info.json":"f0fe98431b6d748bab808544c162886f8f7457a57f571e43be4c9276c69bd823","Cargo.toml":"96cda32a56ae7f781b2142812f054e6f31435b30a2f0b2386346cbf277186920","Cargo.toml.orig":"d5de446b2f66bab383f5d81372e74c205bc4a2f11783f2acca9c5c9eef2b06f8","LICENSE.txt":"7f2fa80a60e84f8dc0747abb0e42342f83bded04a20461a636b47c0331b92ddf","README.md":"14dbb3068c031afbd44010a4ff0c8146aa7e02f6051579177767f218fff9cd38","build.rs":"347d9aa6d4b102b6e83c65aeef89b1e1752536bd8ef80fc834a2c78d5cb2ff80","regenerate.sh":"df3bc8537c038fd356367e5af433c284dd5b76505c35f6f89deab0f099a3e3e0","src/byteorder.rs":"9e6b481be82182ac99ff9704468b4d64656fb922f90c54ca83a8d7ca1dfd2e3a","src/cached_size.rs":"895788d7356a1fcd0f2d7446996066f8c53a0f36032174a97273997e65440fa0","src/chars.rs":"816a0af0b830ecd43028e40295fe0bf9eb79263f00fee01678c75d4ac5b7e908","src/coded_input_stream/buf_read_iter.rs":"2cba31136e56dd63c9a17d1bf4627f430b2ed3ddb35abf4479f57bcb912cdb4e","src/coded_input_stream/buf_read_or_reader.rs":"2bf3befcfce8c97faae7563084a7d93931afc5b886419e491111c15b4139058e","src/coded_input_stream/input_buf.rs":"cf71e63d3aef90188c55b6d42aa7cad47bcba16b27e687e44a15bd45e65b8737","src/coded_input_stream/input_source.rs":"8ce41ec8d147d03adf2dbd27ae0fa0b6e33400b62a9c945ab7aa2224bf43a863","src/coded_input_stream/mod.rs":"9c9eef558aec08a5071303896703aae82a1fa1358d50784e836319e9dcdd2789","src/coded_output_stream/buffer.rs":"cafbbb3f02df26c98a4e5556e99a5a2ce811ffea4c49ba982532a6d9a53ec7d8","src/coded_output_stream/mod.rs":"06289bfaa7971ef275b1017d8b5d4197b864fc881f83d0ed073a28fca894a0ba","src/coded_output_stream/output_target.rs":"ab28889b824b91688cb5c17cf79bdbec96aeeea59f22946b9f359b957cc40580","src/coded_output_stream/with.rs":"47541db9f4f51cacd406571f02d6abe8f4837413c7cecbe511fc0c24ef2384f2","src/descriptor.rs":"4b3f1a458d5e105c01c03671bce753b564fcddefabe36ab41f986ac070a33614","src/doctest_pb.rs":"74ffeba6811126ab8ed076e8d3debbb634f8f9bba3bd77f9c288e88d7937591a","src/enum_full.rs":"ca658951c42ef539ce6221d8f1b1e5005a1a14393460078c40611bb7091629d0","src/enum_or_unknown.rs":"3088b9d139f296284a44d4f9da9c75476dfa00168358328f46b1c52c73572521","src/enums.rs":"e0af03e21b48d3bb44d06a7971229e8e5ee6c8230323534f774f4874de3c9760","src/error.rs":"1839b319f456b56e2bb3c59f119f4c0bec53a02d52c92619b887bfdb1656183b","src/ext.rs":"b5c43e19ca56612e81560e520831da1746520be7944e506e44e07356b1415bbf","src/fixed.rs":"40b32b11dd53f0dc3de2c73f9003c6c0f762cf802e30e16beb5a22a18f8f2f50","src/lazy.rs":"b202a8cd252b11b013983f27c1ed5eac046674ed156a4e5e63357e15ead746df","src/lib.rs":"f22f0d3c3372cc68450071bb2abf8a1542d0f3348f5ec1133e9b785445494f56","src/message.rs":"a112b1d5c769d866a586a4c9af9245fa8029991563d2ff33c47e9d4c2b32fef7","src/message_dyn.rs":"c2d06029139a1ef09409227c0c501dd361b485ff1e4fcbf1d2b0cb579cd80fba","src/message_field.rs":"8456bcc3118a0f62d8eb3e5495be678ad75f5164e5dc67658667c7611e2099d9","src/message_full.rs":"4bbbb917a86aa3b14f63e93db41522c78223036b6e7d219769927059ff70205e","src/misc.rs":"1679b6f8b5c2b4375e71835fb7ca83a4de6db498f092abf5ab3a9f5eaccf0d5a","src/oneof.rs":"de5a694a56931308fc1a790678aaaf8b67d7c6b56c9f7b2fde36a14312863e05","src/oneof_full.rs":"b9d5d95db115b1ebf6e7c222373008d4f9f86e21347ddf50ef23f8cd11b8f777","src/owning_ref.rs":"1face1307d85ef98f5b9752eb45de47884c3ce68d31cec315ebfac6559ab501f","src/plugin.rs":"5bbb2aaecc59c590755e0fe972c4f231d506bbc1893d5f3e800d2e69ce805ec0","src/reflect/acc/mod.rs":"23500dd605f11f8def7d0f858a00cf1c85a7b95c293bc125ba0804a949b35162","src/reflect/acc/v2/map.rs":"46dab64d941e61fd61aa3794b2fab60bbd588a21ca9f1a378cdc022bbdfb60a1","src/reflect/acc/v2/mod.rs":"86639cfa45e3b2d08020c289001d87910fa972e9fb6a28a38880ccee51002a0c","src/reflect/acc/v2/repeated.rs":"07b62beb3bb81d1fa1de486c7cdce20ae2f4f46c2e93ed6f104b41d3a3a5beba","src/reflect/acc/v2/singular/mod.rs":"85bace3cf99fe0b05dce61bf19433077cf29506c6641b001935064fd37ab658f","src/reflect/acc/v2/singular/oneof.rs":"f70db73a0316185b8ae43b82cd29665d1140d920e7d28bb789a438eb06f9c62a","src/reflect/dynamic/map.rs":"565376a2db71cf607cb692b984acb41d16dfb04df59c9ad0eca8ba1fc85017cc","src/reflect/dynamic/mod.rs":"3ee7a82dbd5410d10eee44fdf3ae8b5f198185d7ff4a608f10a668eba6af3a73","src/reflect/dynamic/optional.rs":"db923e3d343f869c2bf4f157559686251ff9744dfd0560ba1d1b1b46ae1b81fd","src/reflect/dynamic/repeated.rs":"61a7c698b59280564a1deb7200884875a8844120058c3d69ea4d6aa5f6c4266e","src/reflect/enums/generated.rs":"44e5dbe08a1a15067744580c87c6d09f66dc364f2791fc1ecab919e1dacdec09","src/reflect/enums/mod.rs":"aed1b29e4e42f34b26476a6013e64b7ec8876cfa53633583a751c344fd3ab34c","src/reflect/error.rs":"532a9c117606e8597a40d60b3efebc9371c4f746919cc611aaaddf105cbb3608","src/reflect/field/dynamic.rs":"8e81f7b6f684ed58287eb2de20f3abb8dabb062601715421d3d1e4c041101602","src/reflect/field/index.rs":"4aeef75560e52bf865718f9323fc5b2b2318a6e4bb66fadc57f4875999cf15b3","src/reflect/field/mod.rs":"6c646e59e64e327a961e680f5b189cdb1d540b61b26cd345d623456a4f764a22","src/reflect/field/protobuf_field_type.rs":"6ec9ca95c25a9c9fe70fad51b1221676e9d3d5a6daac37d5633379471d8c4257","src/reflect/field/runtime_field_type.rs":"26166bb9d48caa0287dfe6235937e5bd647692ca75e8ee4855d0e198f9a79d73","src/reflect/file/building.rs":"53806efda5872c32f63f55582b767be8becff1d7bfb4ed0c11947c912ad55a75","src/reflect/file/dynamic.rs":"3e26271c31816dae043eb70c9990a6fc621c9b5241a555d968f731dfd4762489","src/reflect/file/fds.rs":"9a369eaea05a000710bf977cce28d3fad1463f8ffa42df35e4f5ac5de306f9e6","src/reflect/file/generated.rs":"88f3d88ddbcfa01812398ddc5c350c54cc42b15d99223a642574f55d4d6cdf69","src/reflect/file/index.rs":"3d53af11b39b164b488e3850c3a5be5ae4628c172b4475e4ae5b993225bdeae9","src/reflect/file/mod.rs":"b7aa1c4154677630b843c81d35c60f1374d00d44d3477e6e899e18cb7ae97db1","src/reflect/file/syntax.rs":"8d6d6c3a7bbf9217df3550410a9ba1eb9c08295aa410cc5d2e65efe1eec3ca3a","src/reflect/find_message_or_enum.rs":"e8b10159819cce4414da7681cb3ce0b4e62a45adf4e3e7933a3c1b4f8e97cfb8","src/reflect/map/empty.rs":"230cbcda25bfd3c6f348043eef032252b8a0d86a0c71d93d6206adc59d688732","src/reflect/map/generated.rs":"f1b332e97d267c3272b26be03bee80fe9420bb6fc203ae6f3f9dd3044d564778","src/reflect/map/mod.rs":"7648fa897f4a8acf1ab48b8bba8f165cb4b09a46125e645d600a7b9ced55e1a2","src/reflect/message/generated.rs":"c76f5e887534bc9648dd105718f79bb93465549d57b25c4a00957e603749721c","src/reflect/message/is_initialized_is_always_true.rs":"af716e9d0ce233fda9c7dee13814c24c188ea195cf907d81f74fb198ef2760ae","src/reflect/message/message_ref.rs":"80472f804a4dd3b91f6fec4451639ca356f2b33c502775e0fd6b2c3bfbe1be0a","src/reflect/message/mod.rs":"5ef7f5ecdc2de7c0789b8558711a976e2376fcaae67975a10d9f1bd4179703e5","src/reflect/mod.rs":"620cab65b696a13144ed54d589ca8c4176ecb8127b2ba2a294806f649c0fbd9f","src/reflect/name.rs":"0377dcf871ca5add5e168a3bff04d9f01fe5970db4dfb66272def6484dc7d54b","src/reflect/oneof/generated.rs":"c02b7cd7415f52366f6092559643869812db842bd1c383ce7d8759e519ab453a","src/reflect/oneof/mod.rs":"55c906888e89a7bfd1f8865cced5078905b512f3ce9af20d16614fdf5791c31d","src/reflect/optional/mod.rs":"5dada97750209aeddf1506eea0a59d709aeb3e44bae5443214e0c2950c870952","src/reflect/protobuf_type_box.rs":"5ed50bdefa5eebe8bf0547cb37def38d814653ac7a0d401eb4f05b8a72ebf509","src/reflect/reflect_eq.rs":"1352d0be16ff7dc2089d499b3fbcf40d501406e8648649092aa2cb21f207aac0","src/reflect/repeated/drain_iter.rs":"0f065e5ef884ee90189594b8a92d814c40a4b3ff80ed659f2f8a0ac56795011d","src/reflect/repeated/iter.rs":"f7f7bf56347850f567612feab9542c4658f251ce74b7b8fb7ed6612cb85584f0","src/reflect/repeated/mod.rs":"6084482af4936340e1bfd43ff8d06351c3d0316f26cb9f8b73bd8c16f3e9df98","src/reflect/repeated/transmute.rs":"ecd5b5b2772670b030a6d96b47b54bf8500ec0996920ef0db7d5f4b6f338c493","src/reflect/repeated/vec_downcast.rs":"7f4c2997d313f45bc46a097fad7c579d663c642cba425a7851f590282d58309d","src/reflect/rt/mod.rs":"4f0266be9bd092a6ee49a1f3453ff08eabfcebb65473b6c8552a260ac7a4817b","src/reflect/rt/v2.rs":"3faa866b4aa766875062071eb6db36c7c42a3d9145f66162a85aac91e200e354","src/reflect/runtime_type_box.rs":"6d8988ed25218f13da61db5dbbefa621df3fd7f1141669854c6ec9571b3eee6c","src/reflect/runtime_types.rs":"07b8eeac30f666c890ccac14c5076b77d010abf322b8f23883032e2ad003476e","src/reflect/service/index.rs":"4a41f90b6c8b3f5c8349075aec84fcbb90ab3028451d2ba40cb83257ff4d90c7","src/reflect/service/mod.rs":"1d0b5b3d9cd1968731971137ca320a91591ee9ca45828d3e4284da87397044f6","src/reflect/type_dynamic.rs":"76c9e764978c66444a4ffb5b558cbce241d1e1123c5dd6eb250f56b48b7b0a5c","src/reflect/types.rs":"fb6a18354a7a8fa7dc6a4db51793af8a5c41680bc49c1d157145a21a75f5f3e4","src/reflect/value/mod.rs":"56f7ff8c4541108fff20f83f7f12ef173ce398e642b482dc3a4cf92c9e1cea17","src/reflect/value/value_box.rs":"1037d01c52a4f0432e42a2c023f5c68ed458ed60b196597ca78f81b6207ecb83","src/reflect/value/value_ref.rs":"7a3490eb4918ee725ad59219b0fc5810b231eaf2ddf798ab75085b4acc145b2e","src/rt/map.rs":"c4bd4246181a43dc9cf1735ec5882955af595fba8ef839a06e0e1df399848520","src/rt/message.rs":"c9b9b3b8f25b6813b8ca2411f015ae80b2abba377d44f9f9b9c05cb45366229a","src/rt/mod.rs":"db610d871d8fb022ba4634199896534ecb3d6ad22c7e2cabbf4d7ad79e1c8c66","src/rt/packed.rs":"be2fae85812c39d815bcb0463e3ea67774770c25014b764b8712dd2b90d360c6","src/rt/repeated.rs":"213d08349efb21bc02fb5abd1d1c3f039ae1d4368f53f548cdf1999897c60f1c","src/rt/singular.rs":"2c982de7a686a8d0c430ce690297a524e892a70bca33d288c6e9b912d19e994c","src/rt/unknown_or_group.rs":"a0bf9af0bdb6ee4261bdc9d5136288e3d17f7de611b7f61943caf6d8eb38367d","src/rustproto.rs":"4a49fac5c9caaca991dd5505c154941e8f94708c254269119e64cf053f7aaea9","src/special.rs":"2f64cfbb0659249cf4a951cefb51b1a17ddf85785eb868b68af7546cd31a5101","src/text_format/mod.rs":"da0aeb839963afcba1923b68a06264185a927cef3f3075ca958c11fa1e780535","src/text_format/parse.rs":"c7be3464fa8f6624ed2001b450e999f93bea5f6118132b7f372110c7af5e5e71","src/text_format/print.rs":"7bd28696ce2a98f9520e2303b0f70fe1d46b045d550f55064a3b524b58f9dfab","src/timestamp.rs":"f0590e19fd7740bdc65a0dc6f9d73bf55559638515ca7849481022288a9bee43","src/unknown.rs":"fd6091ad04dadbde5793ea42af50fa51cf2e7737696029a9e0d1f001f0c1423d","src/varint/decode.rs":"5e9fdf9fb5fe82ddc223feaf5867c0a081bd67731635f88cb9a5b1faeeb79f82","src/varint/encode.rs":"bc0969a152aff774976216f9f2bdbc273a24da07d57b8e3ec96ebe691d4559c1","src/varint/generic.rs":"98e31da80c278cff672ddc8231241cc789ad6de138fa6ca6c0483ff1783f4957","src/varint/mod.rs":"643b5b2443b4e103fc4eeac7844dcda4b9c6b2bab3cfe9fba00145ccea9a4505","src/well_known_types/any.rs":"7db9c4f0df3f0809821e09bb0bd2ddaa07ff4471be005fc02f2be9828a1aedd1","src/well_known_types/api.rs":"80bf5fe39c7263a440d5c1bec8bb6c5a0dd274f73c3f702c4e223cfdf02f74eb","src/well_known_types/duration.rs":"33c4039d594eb8df4a35f1bae1ad2a5dc36a5bf167369d99faf480cc7e1cb284","src/well_known_types/empty.rs":"47f56d10483e9c6c3e276e54d877e70aaf3b2a57c269a636dd9948d0e4ff419f","src/well_known_types/field_mask.rs":"7b4d883c03ec89d83b919271d03273def230c30caae36b7247cba1b325ccc551","src/well_known_types/mod.rs":"b141483834c860b221d0946a584389ebcefc2c5f7692ce1f95869c9f83ff2c16","src/well_known_types/source_context.rs":"fbec3ec4e1e59be865d0b7cb4d3b08aa197b46ca27fc3d90ed7da30514df6355","src/well_known_types/struct_.rs":"00bfebd64c851a7e0b0b26d3fc1319fd072975cb84169066b5aa00a4871ac6c8","src/well_known_types/timestamp.rs":"bc8b3a27f7b1ec134aa5a9d1187b63e02d5d2e72b153a9b6153e0b7a078c003e","src/well_known_types/type_.rs":"789fa7e0ec2fe7fc5f68a06636ade107fc305780b597c7c9687dbe3560252514","src/well_known_types/wrappers.rs":"56cbbf290be81ce7d62fd33b883015ef3de2abc1d5f8c683e38e96397f1d056d","src/well_known_types_util/any.rs":"2b2e5cdf1d413bc13485bfc78c84d8403168d6b1a6dbc10d585bf10326120c81","src/well_known_types_util/duration.rs":"e0d9de89f8c7c4b2075f23c2a4451dfec4ae1f28c9784ea39a626a8c3aa9e005","src/well_known_types_util/mod.rs":"81fb1c0721602ffe91c4587f727457b59c8697863e3f853cd9569db5cee973e9","src/well_known_types_util/timestamp.rs":"f55906fef3190fa1786ed736ded16f3ac6de2095cb974af5a476c2a2f91260b3","src/wire_format.rs":"f1d09b0bd1e4c5e4072b5c943e749f7b727737bd08a6d82f81d4f2a60e2ab94e","src/zigzag.rs":"0dcbdf54d4bc8141fdc64d074e6f6f7633bbb66cc782cd4bd6d343ce0569c3de"},"package":"a3a7c64d9bf75b1b8d981124c14c179074e8caa7dfe7b6a12e6222ddcd0c8f72"} \ No newline at end of file -+{"files":{"Cargo.toml":"96cda32a56ae7f781b2142812f054e6f31435b30a2f0b2386346cbf277186920","LICENSE.txt":"7f2fa80a60e84f8dc0747abb0e42342f83bded04a20461a636b47c0331b92ddf","README.md":"14dbb3068c031afbd44010a4ff0c8146aa7e02f6051579177767f218fff9cd38","build.rs":"347d9aa6d4b102b6e83c65aeef89b1e1752536bd8ef80fc834a2c78d5cb2ff80","regenerate.sh":"df3bc8537c038fd356367e5af433c284dd5b76505c35f6f89deab0f099a3e3e0","src/byteorder.rs":"9e6b481be82182ac99ff9704468b4d64656fb922f90c54ca83a8d7ca1dfd2e3a","src/cached_size.rs":"895788d7356a1fcd0f2d7446996066f8c53a0f36032174a97273997e65440fa0","src/chars.rs":"816a0af0b830ecd43028e40295fe0bf9eb79263f00fee01678c75d4ac5b7e908","src/coded_input_stream/buf_read_iter.rs":"2cba31136e56dd63c9a17d1bf4627f430b2ed3ddb35abf4479f57bcb912cdb4e","src/coded_input_stream/buf_read_or_reader.rs":"2bf3befcfce8c97faae7563084a7d93931afc5b886419e491111c15b4139058e","src/coded_input_stream/input_buf.rs":"cf71e63d3aef90188c55b6d42aa7cad47bcba16b27e687e44a15bd45e65b8737","src/coded_input_stream/input_source.rs":"8ce41ec8d147d03adf2dbd27ae0fa0b6e33400b62a9c945ab7aa2224bf43a863","src/coded_input_stream/mod.rs":"9c9eef558aec08a5071303896703aae82a1fa1358d50784e836319e9dcdd2789","src/coded_output_stream/buffer.rs":"cafbbb3f02df26c98a4e5556e99a5a2ce811ffea4c49ba982532a6d9a53ec7d8","src/coded_output_stream/mod.rs":"06289bfaa7971ef275b1017d8b5d4197b864fc881f83d0ed073a28fca894a0ba","src/coded_output_stream/output_target.rs":"ab28889b824b91688cb5c17cf79bdbec96aeeea59f22946b9f359b957cc40580","src/coded_output_stream/with.rs":"47541db9f4f51cacd406571f02d6abe8f4837413c7cecbe511fc0c24ef2384f2","src/descriptor.rs":"4b3f1a458d5e105c01c03671bce753b564fcddefabe36ab41f986ac070a33614","src/doctest_pb.rs":"74ffeba6811126ab8ed076e8d3debbb634f8f9bba3bd77f9c288e88d7937591a","src/enum_full.rs":"ca658951c42ef539ce6221d8f1b1e5005a1a14393460078c40611bb7091629d0","src/enum_or_unknown.rs":"3088b9d139f296284a44d4f9da9c75476dfa00168358328f46b1c52c73572521","src/enums.rs":"e0af03e21b48d3bb44d06a7971229e8e5ee6c8230323534f774f4874de3c9760","src/error.rs":"1839b319f456b56e2bb3c59f119f4c0bec53a02d52c92619b887bfdb1656183b","src/ext.rs":"b5c43e19ca56612e81560e520831da1746520be7944e506e44e07356b1415bbf","src/fixed.rs":"40b32b11dd53f0dc3de2c73f9003c6c0f762cf802e30e16beb5a22a18f8f2f50","src/lazy.rs":"b202a8cd252b11b013983f27c1ed5eac046674ed156a4e5e63357e15ead746df","src/lib.rs":"f22f0d3c3372cc68450071bb2abf8a1542d0f3348f5ec1133e9b785445494f56","src/message.rs":"a112b1d5c769d866a586a4c9af9245fa8029991563d2ff33c47e9d4c2b32fef7","src/message_dyn.rs":"c2d06029139a1ef09409227c0c501dd361b485ff1e4fcbf1d2b0cb579cd80fba","src/message_field.rs":"8456bcc3118a0f62d8eb3e5495be678ad75f5164e5dc67658667c7611e2099d9","src/message_full.rs":"4bbbb917a86aa3b14f63e93db41522c78223036b6e7d219769927059ff70205e","src/misc.rs":"1679b6f8b5c2b4375e71835fb7ca83a4de6db498f092abf5ab3a9f5eaccf0d5a","src/oneof.rs":"de5a694a56931308fc1a790678aaaf8b67d7c6b56c9f7b2fde36a14312863e05","src/oneof_full.rs":"b9d5d95db115b1ebf6e7c222373008d4f9f86e21347ddf50ef23f8cd11b8f777","src/owning_ref.rs":"1face1307d85ef98f5b9752eb45de47884c3ce68d31cec315ebfac6559ab501f","src/plugin.rs":"5bbb2aaecc59c590755e0fe972c4f231d506bbc1893d5f3e800d2e69ce805ec0","src/reflect/acc/mod.rs":"23500dd605f11f8def7d0f858a00cf1c85a7b95c293bc125ba0804a949b35162","src/reflect/acc/v2/map.rs":"46dab64d941e61fd61aa3794b2fab60bbd588a21ca9f1a378cdc022bbdfb60a1","src/reflect/acc/v2/mod.rs":"86639cfa45e3b2d08020c289001d87910fa972e9fb6a28a38880ccee51002a0c","src/reflect/acc/v2/repeated.rs":"07b62beb3bb81d1fa1de486c7cdce20ae2f4f46c2e93ed6f104b41d3a3a5beba","src/reflect/acc/v2/singular/mod.rs":"85bace3cf99fe0b05dce61bf19433077cf29506c6641b001935064fd37ab658f","src/reflect/acc/v2/singular/oneof.rs":"f70db73a0316185b8ae43b82cd29665d1140d920e7d28bb789a438eb06f9c62a","src/reflect/dynamic/map.rs":"565376a2db71cf607cb692b984acb41d16dfb04df59c9ad0eca8ba1fc85017cc","src/reflect/dynamic/mod.rs":"3ee7a82dbd5410d10eee44fdf3ae8b5f198185d7ff4a608f10a668eba6af3a73","src/reflect/dynamic/optional.rs":"db923e3d343f869c2bf4f157559686251ff9744dfd0560ba1d1b1b46ae1b81fd","src/reflect/dynamic/repeated.rs":"61a7c698b59280564a1deb7200884875a8844120058c3d69ea4d6aa5f6c4266e","src/reflect/enums/generated.rs":"44e5dbe08a1a15067744580c87c6d09f66dc364f2791fc1ecab919e1dacdec09","src/reflect/enums/mod.rs":"aed1b29e4e42f34b26476a6013e64b7ec8876cfa53633583a751c344fd3ab34c","src/reflect/error.rs":"532a9c117606e8597a40d60b3efebc9371c4f746919cc611aaaddf105cbb3608","src/reflect/field/dynamic.rs":"8e81f7b6f684ed58287eb2de20f3abb8dabb062601715421d3d1e4c041101602","src/reflect/field/index.rs":"4aeef75560e52bf865718f9323fc5b2b2318a6e4bb66fadc57f4875999cf15b3","src/reflect/field/mod.rs":"6c646e59e64e327a961e680f5b189cdb1d540b61b26cd345d623456a4f764a22","src/reflect/field/protobuf_field_type.rs":"6ec9ca95c25a9c9fe70fad51b1221676e9d3d5a6daac37d5633379471d8c4257","src/reflect/field/runtime_field_type.rs":"26166bb9d48caa0287dfe6235937e5bd647692ca75e8ee4855d0e198f9a79d73","src/reflect/file/building.rs":"53806efda5872c32f63f55582b767be8becff1d7bfb4ed0c11947c912ad55a75","src/reflect/file/dynamic.rs":"3e26271c31816dae043eb70c9990a6fc621c9b5241a555d968f731dfd4762489","src/reflect/file/fds.rs":"9a369eaea05a000710bf977cce28d3fad1463f8ffa42df35e4f5ac5de306f9e6","src/reflect/file/generated.rs":"88f3d88ddbcfa01812398ddc5c350c54cc42b15d99223a642574f55d4d6cdf69","src/reflect/file/index.rs":"3d53af11b39b164b488e3850c3a5be5ae4628c172b4475e4ae5b993225bdeae9","src/reflect/file/mod.rs":"b7aa1c4154677630b843c81d35c60f1374d00d44d3477e6e899e18cb7ae97db1","src/reflect/file/syntax.rs":"8d6d6c3a7bbf9217df3550410a9ba1eb9c08295aa410cc5d2e65efe1eec3ca3a","src/reflect/find_message_or_enum.rs":"e8b10159819cce4414da7681cb3ce0b4e62a45adf4e3e7933a3c1b4f8e97cfb8","src/reflect/map/empty.rs":"230cbcda25bfd3c6f348043eef032252b8a0d86a0c71d93d6206adc59d688732","src/reflect/map/generated.rs":"f1b332e97d267c3272b26be03bee80fe9420bb6fc203ae6f3f9dd3044d564778","src/reflect/map/mod.rs":"7648fa897f4a8acf1ab48b8bba8f165cb4b09a46125e645d600a7b9ced55e1a2","src/reflect/message/generated.rs":"c76f5e887534bc9648dd105718f79bb93465549d57b25c4a00957e603749721c","src/reflect/message/is_initialized_is_always_true.rs":"af716e9d0ce233fda9c7dee13814c24c188ea195cf907d81f74fb198ef2760ae","src/reflect/message/message_ref.rs":"80472f804a4dd3b91f6fec4451639ca356f2b33c502775e0fd6b2c3bfbe1be0a","src/reflect/message/mod.rs":"5ef7f5ecdc2de7c0789b8558711a976e2376fcaae67975a10d9f1bd4179703e5","src/reflect/mod.rs":"620cab65b696a13144ed54d589ca8c4176ecb8127b2ba2a294806f649c0fbd9f","src/reflect/name.rs":"0377dcf871ca5add5e168a3bff04d9f01fe5970db4dfb66272def6484dc7d54b","src/reflect/oneof/generated.rs":"c02b7cd7415f52366f6092559643869812db842bd1c383ce7d8759e519ab453a","src/reflect/oneof/mod.rs":"55c906888e89a7bfd1f8865cced5078905b512f3ce9af20d16614fdf5791c31d","src/reflect/optional/mod.rs":"5dada97750209aeddf1506eea0a59d709aeb3e44bae5443214e0c2950c870952","src/reflect/protobuf_type_box.rs":"5ed50bdefa5eebe8bf0547cb37def38d814653ac7a0d401eb4f05b8a72ebf509","src/reflect/reflect_eq.rs":"1352d0be16ff7dc2089d499b3fbcf40d501406e8648649092aa2cb21f207aac0","src/reflect/repeated/drain_iter.rs":"0f065e5ef884ee90189594b8a92d814c40a4b3ff80ed659f2f8a0ac56795011d","src/reflect/repeated/iter.rs":"f7f7bf56347850f567612feab9542c4658f251ce74b7b8fb7ed6612cb85584f0","src/reflect/repeated/mod.rs":"6084482af4936340e1bfd43ff8d06351c3d0316f26cb9f8b73bd8c16f3e9df98","src/reflect/repeated/transmute.rs":"ecd5b5b2772670b030a6d96b47b54bf8500ec0996920ef0db7d5f4b6f338c493","src/reflect/repeated/vec_downcast.rs":"7f4c2997d313f45bc46a097fad7c579d663c642cba425a7851f590282d58309d","src/reflect/rt/mod.rs":"4f0266be9bd092a6ee49a1f3453ff08eabfcebb65473b6c8552a260ac7a4817b","src/reflect/rt/v2.rs":"3faa866b4aa766875062071eb6db36c7c42a3d9145f66162a85aac91e200e354","src/reflect/runtime_type_box.rs":"6d8988ed25218f13da61db5dbbefa621df3fd7f1141669854c6ec9571b3eee6c","src/reflect/runtime_types.rs":"07b8eeac30f666c890ccac14c5076b77d010abf322b8f23883032e2ad003476e","src/reflect/service/index.rs":"4a41f90b6c8b3f5c8349075aec84fcbb90ab3028451d2ba40cb83257ff4d90c7","src/reflect/service/mod.rs":"1d0b5b3d9cd1968731971137ca320a91591ee9ca45828d3e4284da87397044f6","src/reflect/type_dynamic.rs":"76c9e764978c66444a4ffb5b558cbce241d1e1123c5dd6eb250f56b48b7b0a5c","src/reflect/types.rs":"fb6a18354a7a8fa7dc6a4db51793af8a5c41680bc49c1d157145a21a75f5f3e4","src/reflect/value/mod.rs":"56f7ff8c4541108fff20f83f7f12ef173ce398e642b482dc3a4cf92c9e1cea17","src/reflect/value/value_box.rs":"1037d01c52a4f0432e42a2c023f5c68ed458ed60b196597ca78f81b6207ecb83","src/reflect/value/value_ref.rs":"7a3490eb4918ee725ad59219b0fc5810b231eaf2ddf798ab75085b4acc145b2e","src/rt/map.rs":"c4bd4246181a43dc9cf1735ec5882955af595fba8ef839a06e0e1df399848520","src/rt/message.rs":"c9b9b3b8f25b6813b8ca2411f015ae80b2abba377d44f9f9b9c05cb45366229a","src/rt/mod.rs":"db610d871d8fb022ba4634199896534ecb3d6ad22c7e2cabbf4d7ad79e1c8c66","src/rt/packed.rs":"be2fae85812c39d815bcb0463e3ea67774770c25014b764b8712dd2b90d360c6","src/rt/repeated.rs":"213d08349efb21bc02fb5abd1d1c3f039ae1d4368f53f548cdf1999897c60f1c","src/rt/singular.rs":"2c982de7a686a8d0c430ce690297a524e892a70bca33d288c6e9b912d19e994c","src/rt/unknown_or_group.rs":"a0bf9af0bdb6ee4261bdc9d5136288e3d17f7de611b7f61943caf6d8eb38367d","src/rustproto.rs":"4a49fac5c9caaca991dd5505c154941e8f94708c254269119e64cf053f7aaea9","src/special.rs":"2f64cfbb0659249cf4a951cefb51b1a17ddf85785eb868b68af7546cd31a5101","src/text_format/mod.rs":"da0aeb839963afcba1923b68a06264185a927cef3f3075ca958c11fa1e780535","src/text_format/parse.rs":"c7be3464fa8f6624ed2001b450e999f93bea5f6118132b7f372110c7af5e5e71","src/text_format/print.rs":"7bd28696ce2a98f9520e2303b0f70fe1d46b045d550f55064a3b524b58f9dfab","src/timestamp.rs":"f0590e19fd7740bdc65a0dc6f9d73bf55559638515ca7849481022288a9bee43","src/unknown.rs":"fd6091ad04dadbde5793ea42af50fa51cf2e7737696029a9e0d1f001f0c1423d","src/varint/decode.rs":"5e9fdf9fb5fe82ddc223feaf5867c0a081bd67731635f88cb9a5b1faeeb79f82","src/varint/encode.rs":"bc0969a152aff774976216f9f2bdbc273a24da07d57b8e3ec96ebe691d4559c1","src/varint/generic.rs":"98e31da80c278cff672ddc8231241cc789ad6de138fa6ca6c0483ff1783f4957","src/varint/mod.rs":"643b5b2443b4e103fc4eeac7844dcda4b9c6b2bab3cfe9fba00145ccea9a4505","src/well_known_types/any.rs":"7db9c4f0df3f0809821e09bb0bd2ddaa07ff4471be005fc02f2be9828a1aedd1","src/well_known_types/api.rs":"80bf5fe39c7263a440d5c1bec8bb6c5a0dd274f73c3f702c4e223cfdf02f74eb","src/well_known_types/duration.rs":"33c4039d594eb8df4a35f1bae1ad2a5dc36a5bf167369d99faf480cc7e1cb284","src/well_known_types/empty.rs":"47f56d10483e9c6c3e276e54d877e70aaf3b2a57c269a636dd9948d0e4ff419f","src/well_known_types/field_mask.rs":"7b4d883c03ec89d83b919271d03273def230c30caae36b7247cba1b325ccc551","src/well_known_types/mod.rs":"b141483834c860b221d0946a584389ebcefc2c5f7692ce1f95869c9f83ff2c16","src/well_known_types/source_context.rs":"fbec3ec4e1e59be865d0b7cb4d3b08aa197b46ca27fc3d90ed7da30514df6355","src/well_known_types/struct_.rs":"00bfebd64c851a7e0b0b26d3fc1319fd072975cb84169066b5aa00a4871ac6c8","src/well_known_types/timestamp.rs":"bc8b3a27f7b1ec134aa5a9d1187b63e02d5d2e72b153a9b6153e0b7a078c003e","src/well_known_types/type_.rs":"789fa7e0ec2fe7fc5f68a06636ade107fc305780b597c7c9687dbe3560252514","src/well_known_types/wrappers.rs":"56cbbf290be81ce7d62fd33b883015ef3de2abc1d5f8c683e38e96397f1d056d","src/well_known_types_util/any.rs":"2b2e5cdf1d413bc13485bfc78c84d8403168d6b1a6dbc10d585bf10326120c81","src/well_known_types_util/duration.rs":"e0d9de89f8c7c4b2075f23c2a4451dfec4ae1f28c9784ea39a626a8c3aa9e005","src/well_known_types_util/mod.rs":"81fb1c0721602ffe91c4587f727457b59c8697863e3f853cd9569db5cee973e9","src/well_known_types_util/timestamp.rs":"f55906fef3190fa1786ed736ded16f3ac6de2095cb974af5a476c2a2f91260b3","src/wire_format.rs":"f1d09b0bd1e4c5e4072b5c943e749f7b727737bd08a6d82f81d4f2a60e2ab94e","src/zigzag.rs":"0dcbdf54d4bc8141fdc64d074e6f6f7633bbb66cc782cd4bd6d343ce0569c3de"},"package":"a3a7c64d9bf75b1b8d981124c14c179074e8caa7dfe7b6a12e6222ddcd0c8f72"} diff --git a/vendor/protobuf-3.7.1/src/coded_input_stream/mod.rs b/vendor/protobuf-3.7.1/src/coded_input_stream/mod.rs index a979df19c..dc8029c51 100644 --- a/vendor/protobuf-3.7.1/src/coded_input_stream/mod.rs @@ -123,5 +123,5 @@ index a979df19c..dc8029c51 100644 + } } -- -2.45.2 +2.45.4 diff --git a/SPECS/rust/rust-1.75.spec b/SPECS/rust/rust-1.75.spec index ca5d35f7c90..a4a657701bd 100644 --- a/SPECS/rust/rust-1.75.spec +++ b/SPECS/rust/rust-1.75.spec @@ -9,7 +9,7 @@ Summary: Rust Programming Language Name: rust Version: 1.75.0 -Release: 22%{?dist} +Release: 24%{?dist} License: (ASL 2.0 OR MIT) AND BSD AND CC-BY-3.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -64,7 +64,7 @@ BuildRequires: python3 # make sure rust depends on system zlib BuildRequires: zlib-devel %if 0%{?with_check} -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: sudo %endif # rustc uses a C compiler to invoke the linker, and links to glibc in most cases @@ -180,6 +180,12 @@ rm %{buildroot}%{_bindir}/*.old %{_mandir}/man1/* %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.75.0-24 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.75.0-23 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 1.75.0-22 - Bump to rebuild with updated glibc diff --git a/SPECS/rust/rust.signatures.json b/SPECS/rust/rust.signatures.json index d88781170af..0538e0da355 100644 --- a/SPECS/rust/rust.signatures.json +++ b/SPECS/rust/rust.signatures.json @@ -1,12 +1,12 @@ { "Signatures": { - "cargo-1.85.0-aarch64-unknown-linux-gnu.tar.xz": "cdebe48b066d512d664c13441e8fae2d0f67106c2080aa44289d98b24192b8bc", - "cargo-1.85.0-x86_64-unknown-linux-gnu.tar.xz": "0aff33b57b0e0b102d762a2b53042846c1ca346cff4b7bd96b5c03c9e8e51d81", - "rust-std-1.85.0-aarch64-unknown-linux-gnu.tar.xz": "8af1d793f7820e9ad0ee23247a9123542c3ea23f8857a018651c7788af9bc5b7", - "rust-std-1.85.0-x86_64-unknown-linux-gnu.tar.xz": "285e105d25ebdf501341238d4c0594ecdda50ec9078f45095f793a736b1f1ac2", - "rustc-1.85.0-aarch64-unknown-linux-gnu.tar.xz": "e742b768f67303010b002b515f6613c639e69ffcc78cd0857d6fe7989e9880f6", - "rustc-1.85.0-x86_64-unknown-linux-gnu.tar.xz": "7436f13797475082cd87aa65547449e01659d6a810b4cd5f8aedc48bb9f89dfb", - "rustc-1.86.0-src-cargo.tar.gz": "65af8d68e71d9ee7849d680434bffc527125442da38bba9a14bb3c12abef0595", - "rustc-1.86.0-src.tar.xz": "d939eada065dc827a9d4dbb55bd48533ad14c16e7f0a42e70147029c82a7707b" + "cargo-1.89.0-aarch64-unknown-linux-gnu.tar.xz": "f9df3ee6d55a2387459b843477743fa386c3c0f126bd0be01691ee49309681b8", + "cargo-1.89.0-x86_64-unknown-linux-gnu.tar.xz": "99fc10be2aeedf2c23a484f217bfa76458494495a0eee33e280d3616bb08282d", + "rust-std-1.89.0-aarch64-unknown-linux-gnu.tar.xz": "abea0955dded88c68d731524ab9d29b162fae23bf5805b9f1dec063cba37c2aa", + "rust-std-1.89.0-x86_64-unknown-linux-gnu.tar.xz": "2719470dcd78b3f97d78b978c8f85a1a58d84ff11b62558294621c01bca34d49", + "rustc-1.89.0-aarch64-unknown-linux-gnu.tar.xz": "16ed8d8c7628a481c8501e7cd1022a123269b297bdedbb7f211f37a15e937e0e", + "rustc-1.89.0-x86_64-unknown-linux-gnu.tar.xz": "b42c254e1349df86bd40bc28fdf386172a1a46f2eeabe3c7a08a75cf1fb60e27", + "rustc-1.90.0-src-cargo.tar.gz": "72f6a52e8c4df6047b51bea1e6231faf7ff43a0cedddad6a91e14f9a6f924c17", + "rustc-1.90.0-src.tar.xz": "6bfeaddd90ffda2f063492b092bfed925c4b8c701579baf4b1316e021470daac" } } \ No newline at end of file diff --git a/SPECS/rust/rust.spec b/SPECS/rust/rust.spec index 7d438733090..2ce0a8da6cb 100644 --- a/SPECS/rust/rust.spec +++ b/SPECS/rust/rust.spec @@ -3,13 +3,13 @@ # Release date and version of stage 0 compiler can be found in "src/stage0" inside the extracted "Source0". # Look for "date:" and "rustc:". -%define release_date 2025-02-20 -%define stage0_version 1.85.0 +%define release_date 2025-08-07 +%define stage0_version 1.89.0 Summary: Rust Programming Language Name: rust -Version: 1.86.0 -Release: 10%{?dist} +Version: 1.90.0 +Release: 3%{?dist} License: (ASL 2.0 OR MIT) AND BSD AND CC-BY-3.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -41,7 +41,7 @@ Source4: https://static.rust-lang.org/dist/%{release_date}/rust-std-%{sta Source5: https://static.rust-lang.org/dist/%{release_date}/cargo-%{stage0_version}-aarch64-unknown-linux-gnu.tar.xz Source6: https://static.rust-lang.org/dist/%{release_date}/rustc-%{stage0_version}-aarch64-unknown-linux-gnu.tar.xz Source7: https://static.rust-lang.org/dist/%{release_date}/rust-std-%{stage0_version}-aarch64-unknown-linux-gnu.tar.xz -Patch0: CVE-2025-4574.patch +Patch0: CVE-2025-4574.patch Patch1: CVE-2025-53605.patch Patch2: CVE-2024-11738.patch BuildRequires: binutils @@ -61,7 +61,7 @@ BuildRequires: python3 # make sure rust depends on system zlib BuildRequires: zlib-devel %if 0%{?with_check} -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: sudo %endif # rustc uses a C compiler to invoke the linker, and links to glibc in most cases @@ -181,6 +181,15 @@ rm %{buildroot}%{_docdir}/docs/html/.lock %{_mandir}/man1/* %changelog +* Thu Jan 22 2026 Kanishk Bansal - 1.90.0-3 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 1.90.0-2 +- Bump to rebuild with updated glibc + +* Tue Oct 28 2025 Kavya Sree Kaitepalli - 1.90.0-1 +- Upgrade to 1.90.0 + * Mon Nov 10 2025 Andrew Phelps - 1.86.0-10 - Bump to rebuild with updated glibc diff --git a/SPECS/srp-hwe/srp-hwe.signatures.json b/SPECS/srp-hwe/srp-hwe.signatures.json index 226e0a58452..ee3b9bcb96b 100644 --- a/SPECS/srp-hwe/srp-hwe.signatures.json +++ b/SPECS/srp-hwe/srp-hwe.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "srp-24.10.tgz": "bc4897a8317fe2204109cffe935ae64aeb7dd5f09df3ac9a0317ddfb6ed71286" + "srp-25.07.tgz": "8f16e4b46f402fda960c63f603fc10c2db4a5cce86f9847cb50a4e720144ba65" } } diff --git a/SPECS/srp-hwe/srp-hwe.spec b/SPECS/srp-hwe/srp-hwe.spec index d3b9ea52797..98ddaf71656 100644 --- a/SPECS/srp-hwe/srp-hwe.spec +++ b/SPECS/srp-hwe/srp-hwe.spec @@ -29,7 +29,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -40,9 +40,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name srp-hwe} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-24%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-2%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -66,16 +66,18 @@ Summary: srp driver Name: srp-hwe -Version: 24.10 -Release: 24%{release_suffix}%{?dist} +Version: 25.07 +Release: 2%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/srp-24.10.tgz#/srp-%{version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/srp-%{version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux -ExclusiveArch: aarch64 BuildRequires: gcc BuildRequires: make @@ -255,6 +257,14 @@ fi %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 25.07-2_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 18 2025 Suresh Babu Chalamalasetty - 25.07-1_6.12.57.1.1 +- Upgrade version to 25.07. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 24.10-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/srp/srp.signatures.json b/SPECS/srp/srp.signatures.json index 226e0a58452..ee3b9bcb96b 100644 --- a/SPECS/srp/srp.signatures.json +++ b/SPECS/srp/srp.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "srp-24.10.tgz": "bc4897a8317fe2204109cffe935ae64aeb7dd5f09df3ac9a0317ddfb6ed71286" + "srp-25.07.tgz": "8f16e4b46f402fda960c63f603fc10c2db4a5cce86f9847cb50a4e720144ba65" } } diff --git a/SPECS/srp/srp.spec b/SPECS/srp/srp.spec index 69fcc6496ab..7c235b802e1 100644 --- a/SPECS/srp/srp.spec +++ b/SPECS/srp/srp.spec @@ -39,9 +39,9 @@ %global K_SRC /lib/modules/%{target_kernel_version_full}/build %{!?_name: %define _name srp} -%{!?_version: %define _version 24.10} -%{!?_mofed_full_version: %define _mofed_full_version %{_version}-21%{release_suffix}%{?dist}} -%{!?_release: %define _release OFED.24.10.0.6.7.1} +%{!?_version: %define _version 25.07} +%{!?_mofed_full_version: %define _mofed_full_version %{_version}-1%{release_suffix}%{?dist}} +%{!?_release: %define _release OFED.25.07.0.9.7.1} # KMP is disabled by default %{!?KMP: %global KMP 0} @@ -65,12 +65,15 @@ Summary: srp driver Name: srp -Version: 24.10 -Release: 21%{release_suffix}%{?dist} +Version: 25.07 +Release: 1%{release_suffix}%{?dist} License: GPLv2 Url: http://www.mellanox.com Group: System Environment/Base -Source: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/srp-24.10.tgz#/%{name}-%{version}.tgz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tgz BuildRoot: /var/tmp/%{name}-%{version}-build Vendor: Microsoft Corporation Distribution: Azure Linux @@ -255,6 +258,10 @@ fi %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 25.07-1 +- Upgrade version to 25.07. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 24.10-21 - Bump mofed release number diff --git a/SPECS/strongswan/CVE-2025-62291.patch b/SPECS/strongswan/CVE-2025-62291.patch new file mode 100644 index 00000000000..2875e9f05cf --- /dev/null +++ b/SPECS/strongswan/CVE-2025-62291.patch @@ -0,0 +1,46 @@ +From ae75a582a276ca4b1f6b9b68fe602f41a6e93109 Mon Sep 17 00:00:00 2001 +From: Tobias Brunner +Date: Thu, 9 Oct 2025 11:33:45 +0200 +Subject: [PATCH] eap-mschapv2: Fix length check for Failure Request packets on + the client + +For message lengths between 6 and 8, subtracting HEADER_LEN (9) causes +`message_len` to become negative, which is then used in calls to malloc() +and memcpy() that both take size_t arguments, causing an integer +underflow. + +For 6 and 7, the huge size requested from malloc() will fail (it exceeds +PTRDIFF_MAX) and the returned NULL pointer will cause a segmentation +fault in memcpy(). + +However, for 8, the allocation is 0, which succeeds. But then the -1 +passed to memcpy() causes a heap-based buffer overflow (and possibly a +segmentation fault when attempting to read/write that much data). +Fortunately, if compiled with -D_FORTIFY_SOURCE=3 (the default on e.g. +Ubuntu), the compiler will use __memcpy_chk(), which prevents that buffer +overflow and causes the daemon to get aborted immediately instead. + +Fixes: f98cdf7a4765 ("adding plugin for EAP-MS-CHAPv2") +Fixes: CVE-2025-62291 +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://download.strongswan.org/security/CVE-2025-62291/strongswan-4.4.0-6.0.2_eap_mschapv2_failure_request_len.patch +--- + src/libcharon/plugins/eap_mschapv2/eap_mschapv2.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/libcharon/plugins/eap_mschapv2/eap_mschapv2.c b/src/libcharon/plugins/eap_mschapv2/eap_mschapv2.c +index 1bb54c8..9ad509a 100644 +--- a/src/libcharon/plugins/eap_mschapv2/eap_mschapv2.c ++++ b/src/libcharon/plugins/eap_mschapv2/eap_mschapv2.c +@@ -974,7 +974,7 @@ static status_t process_peer_failure(private_eap_mschapv2_t *this, + data = in->get_data(in); + eap = (eap_mschapv2_header_t*)data.ptr; + +- if (data.len < 3) /* we want at least an error code: E=e */ ++ if (data.len < HEADER_LEN + 3) /* we want at least an error code: E=e */ + { + DBG1(DBG_IKE, "received invalid EAP-MS-CHAPv2 message: too short"); + return FAILED; +-- +2.45.4 + diff --git a/SPECS/strongswan/strongswan.spec b/SPECS/strongswan/strongswan.spec index 15c33fd16d9..d1184723268 100644 --- a/SPECS/strongswan/strongswan.spec +++ b/SPECS/strongswan/strongswan.spec @@ -12,7 +12,7 @@ Name: strongswan Version: 5.9.14 -Release: 7%{?dist} +Release: 8%{?dist} Summary: An OpenSource IPsec-based VPN and TNC solution # Automatically converted from old format: GPLv2+ - review is highly recommended. License: GPL-2.0-or-later @@ -31,6 +31,7 @@ Patch2: strongswan-6.0.0-gcc15.patch Patch3: strongswan-6.0.1-gcc15.patch Patch4: strongswan-fix-make-check.patch Patch5: 0001-Extending-timeout-for-test-cases-with-multiple-read-.patch +Patch6: CVE-2025-62291.patch BuildRequires: autoconf BuildRequires: automake @@ -425,6 +426,9 @@ install -D -m 0644 %{SOURCE3} %{buildroot}/%{_tmpfilesdir}/strongswan-starter.co %endif %changelog +* Mon Jan 19 2026 Azure Linux Security Servicing Account - 5.9.14-8 +- Patch for CVE-2025-62291 + * Fri May 23 2025 Mayank Singh - 5.9.14-7 - Initial Azure Linux import from Fedora 42 (license: MIT). - License verified diff --git a/SPECS/supermin/supermin.spec b/SPECS/supermin/supermin.spec index 40091c3aef4..6517836bde6 100644 --- a/SPECS/supermin/supermin.spec +++ b/SPECS/supermin/supermin.spec @@ -21,7 +21,7 @@ Summary: Tool for creating supermin appliances Name: supermin Version: 5.3.4 -Release: 11%{?dist} +Release: 13%{?dist} License: GPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -54,7 +54,7 @@ BuildRequires: systemd-udev %if %{with dietlibc} BuildRequires: dietlibc-devel %else -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} %endif %if 0%{?with_check} @@ -129,6 +129,12 @@ make check || { %{_rpmconfigdir}/supermin-find-requires %changelog +* Thu Jan 22 2026 Kanishk Bansal - 5.3.4-13 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 5.3.4-12 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 5.3.4-11 - Bump to rebuild with updated glibc diff --git a/SPECS/systemd/fix-stackoverflow-when-dropping-tclass-or-qdisc.patch b/SPECS/systemd/fix-stackoverflow-when-dropping-tclass-or-qdisc.patch new file mode 100644 index 00000000000..e9c237d3eb5 --- /dev/null +++ b/SPECS/systemd/fix-stackoverflow-when-dropping-tclass-or-qdisc.patch @@ -0,0 +1,201 @@ +From a5afac6ee6535b2bcb5214beb648c16b3fd3e99b Mon Sep 17 00:00:00 2001 +From: Rohit Rawat +Date: Tue, 25 Nov 2025 12:22:08 +0000 +Subject: [PATCH] network/tc: fix stack overflow when dropping tclass or qdisc + +This patch is combination of two patches that fixed the issue in v255-stable: +1. network/tc: fix stack overflow when dropping tclass or qdisc by Yu Watanabe at + https://github.com/systemd/systemd-stable/commit/af95833d49ee4a77511d5ce9a507f9c74352347c +2. network/tc: Avoid concurrent set modification in tclass_drop()/qdisc_drop() by Daan De Meyer at + https://github.com/systemd/systemd-stable/commit/b3fd8fa1cae7837043bfb1096c413a086f720a4b + +Note: This patch is needed only in systemd v255 and is already present in + later releases(v256 and above) + +--- + src/network/tc/qdisc.c | 52 ++++++++++++++++++++++++++++++----------- + src/network/tc/qdisc.h | 2 ++ + src/network/tc/tclass.c | 51 +++++++++++++++++++++++++++++----------- + src/network/tc/tclass.h | 2 ++ + 4 files changed, 81 insertions(+), 26 deletions(-) + +diff --git a/src/network/tc/qdisc.c b/src/network/tc/qdisc.c +index f20f410..43f5c73 100644 +--- a/src/network/tc/qdisc.c ++++ b/src/network/tc/qdisc.c +@@ -285,31 +285,57 @@ int link_find_qdisc(Link *link, uint32_t handle, const char *kind, QDisc **ret) + return -ENOENT; + } + +-QDisc* qdisc_drop(QDisc *qdisc) { ++void qdisc_mark_recursive(QDisc *qdisc) { + TClass *tclass; +- Link *link; + + assert(qdisc); ++ assert(qdisc->link); ++ ++ if (qdisc_is_marked(qdisc)) ++ return; + +- link = ASSERT_PTR(qdisc->link); ++ qdisc_mark(qdisc); + +- /* also drop all child classes assigned to the qdisc. */ +- SET_FOREACH(tclass, link->tclasses) { ++ /* also mark all child classes assigned to the qdisc. */ ++ SET_FOREACH(tclass, qdisc->link->tclasses) { + if (TC_H_MAJ(tclass->classid) != qdisc->handle) + continue; + +- tclass_drop(tclass); ++ tclass_mark_recursive(tclass); + } ++} + +- qdisc_enter_removed(qdisc); ++void link_qdisc_drop_marked(Link *link) { ++ QDisc *qdisc; + +- if (qdisc->state == 0) { +- log_qdisc_debug(qdisc, link, "Forgetting"); +- qdisc = qdisc_free(qdisc); +- } else +- log_qdisc_debug(qdisc, link, "Removed"); ++ assert(link); ++ ++ SET_FOREACH(qdisc, link->qdiscs) { ++ if (!qdisc_is_marked(qdisc)) ++ continue; ++ ++ qdisc_unmark(qdisc); ++ qdisc_enter_removed(qdisc); ++ ++ if (qdisc->state == 0) { ++ log_qdisc_debug(qdisc, link, "Forgetting"); ++ qdisc_free(qdisc); ++ } else ++ log_qdisc_debug(qdisc, link, "Removed"); ++ } ++} ++ ++QDisc* qdisc_drop(QDisc *qdisc) { ++ assert(qdisc); ++ assert(qdisc->link); ++ ++ qdisc_mark_recursive(qdisc); ++ ++ /* link_qdisc_drop_marked() may invalidate qdisc, so run link_tclass_drop_marked() first. */ ++ link_tclass_drop_marked(qdisc->link); ++ link_qdisc_drop_marked(qdisc->link); + +- return qdisc; ++ return NULL; + } + + static int qdisc_handler(sd_netlink *rtnl, sd_netlink_message *m, Request *req, Link *link, QDisc *qdisc) { +diff --git a/src/network/tc/qdisc.h b/src/network/tc/qdisc.h +index a62b941..cbba1be 100644 +--- a/src/network/tc/qdisc.h ++++ b/src/network/tc/qdisc.h +@@ -77,7 +77,9 @@ DEFINE_NETWORK_CONFIG_STATE_FUNCTIONS(QDisc, qdisc); + QDisc* qdisc_free(QDisc *qdisc); + int qdisc_new_static(QDiscKind kind, Network *network, const char *filename, unsigned section_line, QDisc **ret); + ++void qdisc_mark_recursive(QDisc *qdisc); + QDisc* qdisc_drop(QDisc *qdisc); ++void link_qdisc_drop_marked(Link *link); + + int link_find_qdisc(Link *link, uint32_t handle, const char *kind, QDisc **qdisc); + +diff --git a/src/network/tc/tclass.c b/src/network/tc/tclass.c +index 0a5fec0..fc74c48 100644 +--- a/src/network/tc/tclass.c ++++ b/src/network/tc/tclass.c +@@ -252,31 +252,56 @@ static void log_tclass_debug(TClass *tclass, Link *link, const char *str) { + strna(tclass_get_tca_kind(tclass))); + } + +-TClass* tclass_drop(TClass *tclass) { ++void tclass_mark_recursive(TClass *tclass) { + QDisc *qdisc; +- Link *link; + + assert(tclass); ++ assert(tclass->link); ++ ++ if (tclass_is_marked(tclass)) ++ return; + +- link = ASSERT_PTR(tclass->link); ++ tclass_mark(tclass); + +- /* Also drop all child qdiscs assigned to the class. */ +- SET_FOREACH(qdisc, link->qdiscs) { ++ /* Also mark all child qdiscs assigned to the class. */ ++ SET_FOREACH(qdisc, tclass->link->qdiscs) { + if (qdisc->parent != tclass->classid) + continue; + +- qdisc_drop(qdisc); ++ qdisc_mark_recursive(qdisc); + } ++} + +- tclass_enter_removed(tclass); ++void link_tclass_drop_marked(Link *link) { ++ TClass *tclass; + +- if (tclass->state == 0) { +- log_tclass_debug(tclass, link, "Forgetting"); +- tclass = tclass_free(tclass); +- } else +- log_tclass_debug(tclass, link, "Removed"); ++ assert(link); ++ ++ SET_FOREACH(tclass, link->tclasses) { ++ if (!tclass_is_marked(tclass)) ++ continue; ++ ++ tclass_unmark(tclass); ++ tclass_enter_removed(tclass); ++ ++ if (tclass->state == 0) { ++ log_tclass_debug(tclass, link, "Forgetting"); ++ tclass_free(tclass); ++ } else ++ log_tclass_debug(tclass, link, "Removed"); ++ } ++} ++ ++TClass* tclass_drop(TClass *tclass) { ++ assert(tclass); ++ ++ tclass_mark_recursive(tclass); ++ ++ /* link_tclass_drop_marked() may invalidate tclass, so run link_qdisc_drop_marked() first. */ ++ link_qdisc_drop_marked(tclass->link); ++ link_tclass_drop_marked(tclass->link); + +- return tclass; ++ return NULL; + } + + static int tclass_handler(sd_netlink *rtnl, sd_netlink_message *m, Request *req, Link *link, TClass *tclass) { +diff --git a/src/network/tc/tclass.h b/src/network/tc/tclass.h +index e73e23c..85df57d 100644 +--- a/src/network/tc/tclass.h ++++ b/src/network/tc/tclass.h +@@ -58,7 +58,9 @@ DEFINE_NETWORK_CONFIG_STATE_FUNCTIONS(TClass, tclass); + TClass* tclass_free(TClass *tclass); + int tclass_new_static(TClassKind kind, Network *network, const char *filename, unsigned section_line, TClass **ret); + ++void tclass_mark_recursive(TClass *tclass); + TClass* tclass_drop(TClass *tclass); ++void link_tclass_drop_marked(Link *link); + + int link_find_tclass(Link *link, uint32_t classid, TClass **ret); + +-- +2.45.4 + diff --git a/SPECS/systemd/systemd.spec b/SPECS/systemd/systemd.spec index cde8c65f3ba..a0360202f18 100644 --- a/SPECS/systemd/systemd.spec +++ b/SPECS/systemd/systemd.spec @@ -50,7 +50,7 @@ Version: 255 # determine the build information from local checkout Version: %(tools/meson-vcs-tag.sh . error | sed -r 's/-([0-9])/.^\1/; s/-g/_g/') %endif -Release: 24%{?dist} +Release: 25%{?dist} # FIXME - hardcode to 'stable' for now as that's what we have in our blobstore %global stable 1 @@ -144,6 +144,7 @@ Patch0900: do-not-test-openssl-sm3.patch Patch0901: networkd-default-use-domains.patch Patch0902: CVE-2023-7008.patch Patch0903: CVE-2025-4598.patch +Patch0904: fix-stackoverflow-when-dropping-tclass-or-qdisc.patch %ifarch %{ix86} x86_64 aarch64 %global want_bootloader 1 @@ -1229,6 +1230,9 @@ rm -f %{name}.lang # %autochangelog. So we need to continue manually maintaining the # changelog here. %changelog +* Tue Nov 25 2025 Rohit Rawat - 255-25 +- Add fix-stackoverflow-when-dropping-tclass-or-qdisc.patch + * Tue Sep 16 2025 Akhila Guruju - 255-24 - Patch CVE-2025-4598 diff --git a/SPECS/telegraf/CVE-2025-10543.patch b/SPECS/telegraf/CVE-2025-10543.patch new file mode 100644 index 00000000000..6afbba5e2c5 --- /dev/null +++ b/SPECS/telegraf/CVE-2025-10543.patch @@ -0,0 +1,33 @@ +From dc05ddc627c4d247ffa5e401b3763b6465a3446c Mon Sep 17 00:00:00 2001 +From: AllSpark +Date: Mon, 8 Dec 2025 13:24:03 +0000 +Subject: [PATCH] Fields over 65535 bytes noe encoded correctly + +When encoding strings (1.5.3 in spec), and some other variable length fields, if the user passed in more then 65535 bytes the ouput would not be as expected (due to 16 byte header there is a hard limit). This change truncates output to 65535 bytes. + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: AI Backport of https://github.com/eclipse-paho/paho.mqtt.golang/commit/3162447fa892038e82256e918b681dc0c63a21ff.patch +--- + .../github.com/eclipse/paho.mqtt.golang/packets/packets.go | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go +index b2d7ed1b..0f876c79 100644 +--- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go ++++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go +@@ -330,6 +330,12 @@ func decodeBytes(b io.Reader) ([]byte, error) { + } + + func encodeBytes(field []byte) []byte { ++ // Attempting to encode more than 65,535 bytes would lead to an unexpected 16-bit length and extra data written ++ // (which would be parsed as later parts of the message). The safest option is to truncate. ++ if len(field) > 65535 { ++ field = field[0:65535] ++ } ++ + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +-- +2.45.4 + diff --git a/SPECS/telegraf/telegraf.spec b/SPECS/telegraf/telegraf.spec index a2bce2cf342..d9a8e024de2 100644 --- a/SPECS/telegraf/telegraf.spec +++ b/SPECS/telegraf/telegraf.spec @@ -1,7 +1,7 @@ Summary: agent for collecting, processing, aggregating, and writing metrics. Name: telegraf Version: 1.31.0 -Release: 11%{?dist} +Release: 12%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -24,6 +24,7 @@ Patch9: CVE-2025-27144.patch Patch10: CVE-2025-30215.patch Patch11: CVE-2025-22872.patch Patch12: CVE-2025-47913.patch +Patch13: CVE-2025-10543.patch BuildRequires: golang BuildRequires: systemd-devel @@ -88,6 +89,9 @@ fi %dir %{_sysconfdir}/%{name}/telegraf.d %changelog +* Mon Dec 08 2025 Azure Linux Security Servicing Account - 1.31.0-12 +- Patch for CVE-2025-10543 + * Tue Nov 18 2025 Azure Linux Security Servicing Account - 1.31.0-11 - Patch for CVE-2025-47913 diff --git a/SPECS/tensorflow/CVE-2026-21441.patch b/SPECS/tensorflow/CVE-2026-21441.patch new file mode 100644 index 00000000000..ef1888e623e --- /dev/null +++ b/SPECS/tensorflow/CVE-2026-21441.patch @@ -0,0 +1,32 @@ +From 8864ac407bba8607950025e0979c4c69bc7abc7b Mon Sep 17 00:00:00 2001 +From: Illia Volochii +Date: Wed, 7 Jan 2026 18:07:30 +0200 +Subject: [PATCH] Merge commit from fork + +* Stop decoding response content during redirects needlessly + +Upstream Patch Reference: https://github.com/urllib3/urllib3/commit/8864ac407bba8607950025e0979c4c69bc7abc7b.patch +--- + pypi_urllib3/site-packages/urllib3/response.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/pypi_urllib3/site-packages/urllib3/response.py b/pypi_urllib3/site-packages/urllib3/response.py +index 47e75fd1..0c0e20ff 100644 +--- a/pypi_urllib3/site-packages/urllib3/response.py ++++ b/pypi_urllib3/site-packages/urllib3/response.py +@@ -635,7 +635,11 @@ class HTTPResponse(BaseHTTPResponse): + Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. + """ + try: +- self.read() ++ self.read( ++ # Do not spend resources decoding the content unless ++ # decoding has already been initiated. ++ decode_content=self._has_decoded_content, ++ ) + except (HTTPError, OSError, BaseSSLError, HTTPException): + pass + +-- +2.45.4 + diff --git a/SPECS/tensorflow/tensorflow.spec b/SPECS/tensorflow/tensorflow.spec index ff5a5649ef1..1dac0140536 100644 --- a/SPECS/tensorflow/tensorflow.spec +++ b/SPECS/tensorflow/tensorflow.spec @@ -1,7 +1,7 @@ Summary: TensorFlow is an open source machine learning framework for everyone. Name: tensorflow Version: 2.16.1 -Release: 9%{?dist} +Release: 10%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -14,8 +14,9 @@ Patch1: CVE-2024-6232.patch Patch2: CVE-2024-8088.patch Patch3: CVE-2024-3651.patch Patch4: CVE-2024-35195.patch -Patch5: CVE-2024-5569.patch -Patch6: CVE-2024-6923.patch +Patch5: CVE-2024-5569.patch +Patch6: CVE-2024-6923.patch +Patch7: CVE-2026-21441.patch BuildRequires: bazel BuildRequires: binutils BuildRequires: build-essential @@ -96,6 +97,7 @@ popd pushd /root/.cache/bazel/_bazel_$USER/$MD5_HASH/external/ patch -p1 < %{PATCH4} patch -p1 < %{PATCH5} +patch -p1 < %{PATCH7} pushd python_x86_64-unknown-linux-gnu/lib/python3.12/email/ patch -p1 < %{PATCH6} @@ -132,6 +134,9 @@ bazel --batch build //tensorflow/tools/pip_package:build_pip_package %{_bindir}/toco_from_protos %changelog +* Tue Jan 13 2026 Aditya Singh - 2.16.1-10 +- Patch for CVE-2026-21441 + * Tue Jan 28 2025 Kavya Sree Kaitepalli - 2.16.1-9 - Patch CVE-2024-5569 , CVE-2023-45803 and CVE-2024-6923 @@ -165,7 +170,7 @@ bazel --batch build //tensorflow/tools/pip_package:build_pip_package * Tue Aug 01 2023 Riken Maharjan - 2.11.0-4 - Remove .bazelversion file. -* Thu Jan 03 2022 Riken Maharjan - 2.11.0-3 +* Tue Jan 03 2023 Riken Maharjan - 2.11.0-3 - Add tf-nightly subpackage. * Thu Dec 08 2022 Riken Maharjan - 2.11.0-2 diff --git a/SPECS/tini/tini.spec b/SPECS/tini/tini.spec index 757c3f023fd..173bdf1706d 100644 --- a/SPECS/tini/tini.spec +++ b/SPECS/tini/tini.spec @@ -1,7 +1,7 @@ Summary: A tiny but valid init for containers Name: tini Version: 0.19.0 -Release: 26%{?dist} +Release: 28%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Azure Linux @@ -13,7 +13,7 @@ BuildRequires: diffutils BuildRequires: file BuildRequires: gcc BuildRequires: glibc-devel -BuildRequires: glibc-static >= 2.38-16%{?dist} +BuildRequires: glibc-static >= 2.38-18%{?dist} BuildRequires: kernel-headers BuildRequires: make BuildRequires: sed @@ -66,6 +66,12 @@ ln -s %{_bindir}/tini-static %{buildroot}%{_bindir}/docker-init %{_bindir}/docker-init %changelog +* Thu Jan 22 2026 Kanishk Bansal - 0.19.0-28 +- Bump to rebuild with updated glibc + +* Mon Jan 19 2026 Kanishk Bansal - 0.19.0-27 +- Bump to rebuild with updated glibc + * Mon Nov 10 2025 Andrew Phelps - 0.19.0-26 - Bump to rebuild with updated glibc diff --git a/SPECS/tzdata/tzdata.signatures.json b/SPECS/tzdata/tzdata.signatures.json index b75ead4481c..8101a98aafe 100644 --- a/SPECS/tzdata/tzdata.signatures.json +++ b/SPECS/tzdata/tzdata.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "tzdata2025b.tar.gz": "11810413345fc7805017e27ea9fa4885fd74cd61b2911711ad038f5d28d71474" + "tzdata2025c.tar.gz": "4aa79e4effee53fc4029ffe5f6ebe97937282ebcdf386d5d2da91ce84142f957" } } diff --git a/SPECS/tzdata/tzdata.spec b/SPECS/tzdata/tzdata.spec index 29a4a8a9520..4c4f5958bab 100644 --- a/SPECS/tzdata/tzdata.spec +++ b/SPECS/tzdata/tzdata.spec @@ -1,6 +1,6 @@ Summary: Time zone data Name: tzdata -Version: 2025b +Version: 2025c Release: 1%{?dist} License: Public Domain Vendor: Microsoft Corporation @@ -45,6 +45,9 @@ ln -svf %{_datarootdir}/zoneinfo/UTC %{buildroot}%{_sysconfdir}/localtime %{_datadir}/* %changelog +* Thu Dec 11 2025 CBL-Mariner Servicing Account - 2025c-1 +- Auto-upgrade to 2025c - upgrade to version 2025c + * Mon Nov 24 2025 CBL-Mariner Servicing Account - 2025b-1 - Auto-upgrade to 2025b - upgrade to version 2025b diff --git a/SPECS/util-linux/CVE-2025-14104.patch b/SPECS/util-linux/CVE-2025-14104.patch new file mode 100644 index 00000000000..ddad2112bc2 --- /dev/null +++ b/SPECS/util-linux/CVE-2025-14104.patch @@ -0,0 +1,64 @@ +From f8dd915487ea2bc8ed683d4a332b5651c2c0e638 Mon Sep 17 00:00:00 2001 +From: Mohamed Maatallah +Date: Sat, 24 May 2025 03:16:09 +0100 +Subject: [PATCH 1/2] Update setpwnam.c + +--- + login-utils/setpwnam.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/login-utils/setpwnam.c b/login-utils/setpwnam.c +index 3e3c1ab..95e470b 100644 +--- a/login-utils/setpwnam.c ++++ b/login-utils/setpwnam.c +@@ -126,10 +126,12 @@ int setpwnam(struct passwd *pwd, const char *prefix) + } + + /* Is this the username we were sent to change? */ +- if (!found && linebuf[namelen] == ':' && +- !strncmp(linebuf, pwd->pw_name, namelen)) { +- /* Yes! So go forth in the name of the Lord and +- * change it! */ ++ if (!found && ++ strncmp(linebuf, pwd->pw_name, namelen) == 0 && ++ strlen(linebuf) > namelen && ++ linebuf[namelen] == ':') { ++ /* Yes! But this time let’s not walk past the end of the buffer ++ * in the name of the Lord, SUID, or anything else. */ + if (putpwent(pwd, fp) < 0) + goto fail; + found = 1; +-- +2.45.4 + + +From d3ff9b834096645a07af58a4f2e78e39ed2e87ce Mon Sep 17 00:00:00 2001 +From: Mohamed Maatallah +Date: Mon, 26 May 2025 10:06:02 +0100 +Subject: [PATCH 2/2] Update bufflen + +Update buflen + +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://github.com/util-linux/util-linux/pull/3586.patch +--- + login-utils/setpwnam.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/login-utils/setpwnam.c b/login-utils/setpwnam.c +index 95e470b..7778e98 100644 +--- a/login-utils/setpwnam.c ++++ b/login-utils/setpwnam.c +@@ -99,7 +99,8 @@ int setpwnam(struct passwd *pwd, const char *prefix) + goto fail; + + namelen = strlen(pwd->pw_name); +- ++ if (namelen > buflen) ++ buflen += namelen; + linebuf = malloc(buflen); + if (!linebuf) + goto fail; +-- +2.45.4 + diff --git a/SPECS/util-linux/util-linux.spec b/SPECS/util-linux/util-linux.spec index 349c9d6263a..95ae14022c9 100644 --- a/SPECS/util-linux/util-linux.spec +++ b/SPECS/util-linux/util-linux.spec @@ -1,8 +1,11 @@ +%global pypkg python3 +%global pyver 3 + %define majminorver %(echo %{version} | cut -d. -f1-2) Summary: Utilities for file systems, consoles, partitions, and messages Name: util-linux Version: 2.40.2 -Release: 1%{?dist} +Release: 3%{?dist} License: GPLv2+ Vendor: Microsoft Corporation Distribution: Azure Linux @@ -13,6 +16,7 @@ Source1: runuser Source2: runuser-l Source3: su Source4: su-l +Patch0: CVE-2025-14104.patch BuildRequires: audit-devel BuildRequires: libcap-ng-devel BuildRequires: libselinux-devel @@ -26,6 +30,7 @@ Provides: hardlink = 1.3-9 Provides: uuidd = %{version}-%{release} %if 0%{?with_check} BuildRequires: ncurses-term +BuildRequires: sudo %endif %description @@ -63,6 +68,17 @@ Group: Development/Libraries %description libs These are library files of util-linux. +%package -n %{pypkg}-libmount +Summary: Python bindings for the libmount library +Requires: %{name}-libs = %{version}-%{release} +License: LGPL-2.1-or-later + +%description -n %{pypkg}-libmount +The libmount-python package contains a module that permits applications +written in the Python programming language to use the interface +supplied by the libmount library to work with mount tables (fstab, +mountinfo, etc) and mount filesystems. + %prep %autosetup -p1 sed -i -e 's@etc/adjtime@var/lib/hwclock/adjtime@g' $(grep -rl '%{_sysconfdir}/adjtime' .) @@ -78,7 +94,7 @@ autoreconf -fi --disable-static \ --disable-use-tty-group \ --disable-liblastlog2 \ - --without-python \ + --with-python=%{pyver} \ --with-selinux \ --with-audit make %{?_smp_mflags} @@ -103,7 +119,7 @@ install -vm644 %{SOURCE4} %{buildroot}%{_sysconfdir}/pam.d/ %check chown -Rv nobody . -sudo -u nobody -s /bin/bash -c "PATH=$PATH make -k check" +sudo -u nobody -s /bin/bash -c "PATH=$PATH make -k check" || exit 1 rm -rf %{buildroot}/lib/systemd/system %post -p /sbin/ldconfig @@ -143,6 +159,10 @@ rm -rf %{buildroot}/lib/systemd/system /lib/libsmartcols.so.* /lib/libfdisk.so.* +%files -n %{pypkg}-libmount +%license Documentation/licenses/COPYING.LGPL-2.1-or-later +%{_libdir}/python*/site-packages/libmount/ + %files devel %defattr(-,root,root) %license Documentation/licenses/COPYING.LGPL-2.1-or-later libsmartcols/COPYING @@ -152,6 +172,13 @@ rm -rf %{buildroot}/lib/systemd/system %{_mandir}/man3/* %changelog +* Tue Dec 30 2025 Sandeep Karambelkar - 2.40.2-3 +- Compiled with python +- Added the package python3-libmount + +* Wed Dec 17 2025 Azure Linux Security Servicing Account - 2.40.2-2 +- Patch for CVE-2025-14104 + * Wed Sep 18 2024 Vince Perri - 2.40.2-1 - Upgrade to 2.40.2: - Added --disable-liblastlog2 to avoid building new liblastlog2 libraries diff --git a/SPECS/wget/CVE-2025-69194.patch b/SPECS/wget/CVE-2025-69194.patch new file mode 100644 index 00000000000..b53cc8dba5d --- /dev/null +++ b/SPECS/wget/CVE-2025-69194.patch @@ -0,0 +1,118 @@ +From 123ff418b984d84bdabb7fb7002719d4ebe19d72 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Tim=20R=C3=BChsen?= +Date: Fri, 26 Dec 2025 19:03:35 +0100 +Subject: [PATCH] Fix file overwrite issue with metalink +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Fix a remotely triggered arbitrary file write/overwrite abusing +metalink path traversal. + +Reported-by: Arkadi +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.com/gnuwget/wget2/-/commit/684be4785280fbe6b8666080bbdd87e7e5299ac5.patch +--- + libwget/metalink.c | 23 +++++++++++++++++++++-- + src/wget.c | 25 ++++++++++++++++++------- + 2 files changed, 39 insertions(+), 9 deletions(-) + +diff --git a/libwget/metalink.c b/libwget/metalink.c +index ecac46c..8d35065 100644 +--- a/libwget/metalink.c ++++ b/libwget/metalink.c +@@ -167,6 +167,25 @@ static void add_mirror(metalink_context *ctx, const char *value) + ctx->priority = 999999; + } + ++static const char *sanitized_filename(const char *in) ++{ ++ // RFC 5854: ++ // The path MUST NOT contain any directory traversal ++ // directives or information. The path MUST be relative. The path ++ // MUST NOT begin with a "/", "./", or "../"; contain "/../"; or end ++ // with "/..". ++ if (*in == '/' ++ || !strncmp(in, "./", 2) ++ || !strncmp(in, "../", 3) ++ || strstr(in, "/../") ++ || wget_match_tail(in, "/../")) ++ { ++ return NULL; ++ } ++ ++ return wget_strdup(in); ++} ++ + static void metalink_parse(void *context, int flags, const char *dir, const char *attr, const char *val, size_t len, size_t pos WGET_GCC_UNUSED) + { + metalink_context *ctx = context; +@@ -192,7 +211,7 @@ static void metalink_parse(void *context, int flags, const char *dir, const char + if (attr) { + if (*dir == 0) { // /metalink/file + if (!ctx->metalink->name && !wget_strcasecmp_ascii(attr, "name")) { +- ctx->metalink->name = wget_strdup(value); ++ ctx->metalink->name = sanitized_filename(value); + } + } else if (!wget_strcasecmp_ascii(dir, "/verification/pieces")) { + if (!wget_strcasecmp_ascii(attr, "type")) { +@@ -237,7 +256,7 @@ static void metalink_parse(void *context, int flags, const char *dir, const char + if (attr) { + if (*dir == 0) { // /metalink/file + if (!ctx->metalink->name && !wget_strcasecmp_ascii(attr, "name")) { +- ctx->metalink->name = wget_strdup(value); ++ ctx->metalink->name = sanitized_filename(value); + } + } else if (!wget_strcasecmp_ascii(dir, "/pieces")) { + if (!wget_strcasecmp_ascii(attr, "type")) { +diff --git a/src/wget.c b/src/wget.c +index b5e0f14..bbf9583 100644 +--- a/src/wget.c ++++ b/src/wget.c +@@ -2085,18 +2085,26 @@ static void process_response(wget_http_response *resp) + error_printf(_("File length %llu - remove job\n"), (unsigned long long)job->metalink->size); + } else if (!job->metalink->mirrors) { + error_printf(_("No download mirrors found - remove job\n")); ++ } else if (!job->metalink->name || !*job->metalink->name) { ++ error_printf(_("Metalink file name is invalid, missing or empty - remove job\n")); + } else { + // just loaded a metalink description, create parts and sort mirrors + + // start or resume downloading + if (!job_validate_file(job)) { +- // sort mirrors by priority to download from highest priority first +- wget_metalink_sort_mirrors(job->metalink); +- +- // wake up sleeping workers +- wget_thread_cond_signal(worker_cond); +- +- job->done = 0; // do not remove this job from queue yet ++ // Account for retries ++ if (config.tries && ++job->failures > config.tries) { ++ error_printf(_("Metalink validation failed: max tries reached - remove job\n")); ++ job->done = 1; ++ } else { ++ // sort mirrors by priority to download from highest priority first ++ wget_metalink_sort_mirrors(job->metalink); ++ ++ // wake up sleeping workers ++ wget_thread_cond_signal(worker_cond); ++ ++ job->done = 0; // do not remove this job from queue yet ++ } + } // else file already downloaded and checksum ok + } + return; +@@ -2981,6 +2989,9 @@ void metalink_parse_localfile(const char *fname) + } else if (!metalink->mirrors) { + error_printf(_("No download mirrors found\n")); + wget_metalink_free(&metalink); ++ } else if (!metalink->name || !*metalink->name) { ++ error_printf(_("Metalink file name is missing or empty\n")); ++ wget_metalink_free(&metalink); + } else { + // create parts and sort mirrors + JOB job = { .metalink = metalink }; +-- +2.45.4 + diff --git a/SPECS/wget/CVE-2025-69195.patch b/SPECS/wget/CVE-2025-69195.patch new file mode 100644 index 00000000000..5d27e7a12da --- /dev/null +++ b/SPECS/wget/CVE-2025-69195.patch @@ -0,0 +1,37 @@ +From 0b49beb33a0f870b3677a5a4cff772f4428e867d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Tim=20R=C3=BChsen?= +Date: Fri, 26 Dec 2025 18:27:24 +0100 +Subject: [PATCH] Fix remote buffer overflow in get_local_filename_real() + +In src/blacklist.c:get_local_filename_real(), the stack/heap selection for +fname_esc was inverted, so the code used `char tmp[1024]` exactly when the +required size is >1024, leading to stack corruption when +wget_restrict_file_name() writes into it. + +This was introduced by commit 3dc30f5f0c6f8feae97f866c537324f821ea05d. + +Reported-by: Arkadi +Signed-off-by: Azure Linux Security Servicing Account +Upstream-reference: https://gitlab.com/gnuwget/wget2/-/commit/fc7fcbc00e0a2c8606d44ab216195afb3f08cc98.patch +--- + src/blacklist.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/blacklist.c b/src/blacklist.c +index ca8d18e..71a74c3 100644 +--- a/src/blacklist.c ++++ b/src/blacklist.c +@@ -135,8 +135,8 @@ static char * get_local_filename_real(const wget_iri *iri) + char tmp[1024]; + + char *fname_esc = (sizeof(tmp) < buf.length * 3 + 1) +- ? tmp +- : wget_malloc(buf.length * 3 + 1); ++ ? wget_malloc(buf.length * 3 + 1) ++ : tmp; + + if (wget_restrict_file_name(fname, fname_esc, config.restrict_file_names) != fname) { + // escaping was really done, replace fname +-- +2.45.4 + diff --git a/SPECS/wget/wget.spec b/SPECS/wget/wget.spec index 4b4a5eb4015..b8a4ee57715 100644 --- a/SPECS/wget/wget.spec +++ b/SPECS/wget/wget.spec @@ -3,7 +3,7 @@ Summary: An advanced file and recursive website downloader Name: wget Version: 2.1.0 -Release: 6%{?dist} +Release: 7%{?dist} License: GPL-3.0-or-later AND LGPL-3.0-or-later AND GFDL-1.3-or-later URL: https://gitlab.com/gnuwget/wget2 Group: System Environment/NetworkingPrograms @@ -31,6 +31,8 @@ Patch0006: 0006-Disable-TCP-Fast-Open-by-default.patch Patch0007: fix-ssl-read-and-write-error-check.patch # https://github.com/rockdaboot/wget2/issues/344 Patch0008: set-debug_skip_body-for-OCSP-requests-in-openssl-tls-provider.patch +Patch9: CVE-2025-69194.patch +Patch10: CVE-2025-69195.patch BuildRequires: autoconf BuildRequires: automake @@ -163,6 +165,9 @@ echo ".so man1/%{name}.1" > %{buildroot}%{_mandir}/man1/wget.1 %{_mandir}/man3/libwget*.3* %changelog +* Mon Jan 12 2026 Azure Linux Security Servicing Account - 2.1.0-7 +- Patch for CVE-2025-69194, CVE-2025-69195 + * Mon Feb 24 2025 Sam Meluch - 2.1.0-6 - Add %check section from Fedora upstream. diff --git a/SPECS/xpmem-hwe/xpmem-hwe.spec b/SPECS/xpmem-hwe/xpmem-hwe.spec index c9836ff827e..5f2ad042d01 100644 --- a/SPECS/xpmem-hwe/xpmem-hwe.spec +++ b/SPECS/xpmem-hwe/xpmem-hwe.spec @@ -3,7 +3,7 @@ %if 0%{azl} # hard code versions due to ADO bug:58993948 %global target_azl_build_kernel_version 6.12.57.1 -%global target_kernel_release 1 +%global target_kernel_release 2 %global target_kernel_version_full %{target_azl_build_kernel_version}-%{target_kernel_release}%{?dist} %global release_suffix _%{target_azl_build_kernel_version}.%{target_kernel_release} %else @@ -13,7 +13,7 @@ %global KVERSION %{target_kernel_version_full} %global K_SRC /lib/modules/%{target_kernel_version_full}/build -%{!?_mofed_full_version: %define _mofed_full_version 24.10-24%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-2%{release_suffix}%{?dist}} # %{!?KVERSION: %global KVERSION %(uname -r)} %{!?KVERSION: %global KVERSION %{target_kernel_version_full}} @@ -22,7 +22,7 @@ %{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} # A separate variable _release is required because of the odd way the # script append_number_to_package_release.sh works: -%global _release 1.2410068 +%global _release 1.2507097 %bcond_without kernel_only @@ -43,14 +43,17 @@ Summary: Cross-partition memory Name: xpmem-hwe Version: 2.7.4 -Release: 24%{release_suffix}%{?dist} +Release: 26%{release_suffix}%{?dist} License: GPLv2 and LGPLv2.1 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux BuildRequires: automake autoconf URL: https://github.com/openucx/xpmem -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-2.7.4.tar.gz#/xpmem-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/xpmem-%{version}.tar.gz # name gets a different value in subpackages %global kernel_suffix hwe @@ -101,7 +104,6 @@ EOF) # munge the release version here as well: Summary: XPMEM: kernel modules Group: System Environment/Libraries -ExclusiveArch: aarch64 Requires: mlnx-ofa_kernel Requires: mlnx-ofa_kernel-hwe-modules = %{_mofed_full_version} @@ -204,6 +206,14 @@ fi %endif %changelog +* Mon Jan 19 2026 Suresh Babu Chalamalasetty - 2.7.4-26_6.12.57.1.2 +- Bump to match kernel-hwe. + +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 2.7.4-25_6.12.57.1.1 +- Build with OFED 25.07.0.9.7.1. +- Enable build on x86_64 kernel hwe. +- Update source path + * Wed Nov 05 2025 Siddharth Chintamaneni - 2.7.4-24_6.12.57.1.1 - Bump to match kernel-hwe diff --git a/SPECS/xpmem-lib/xpmem-lib.spec b/SPECS/xpmem-lib/xpmem-lib.spec index ab962c9e8b2..bd5c2a57732 100644 --- a/SPECS/xpmem-lib/xpmem-lib.spec +++ b/SPECS/xpmem-lib/xpmem-lib.spec @@ -5,13 +5,15 @@ Summary: XPMEM: Cross-partition memory Name: xpmem-lib Version: 2.7 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-lib-2.7.tar.gz#/%{name}-%{version}.tar.gz -ExclusiveArch: x86_64 +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz BuildRequires: automake BuildRequires: autoconf @@ -83,6 +85,10 @@ rm -rf ${RPM_BUILD_ROOT}/etc # /etc/.version , udev rules %{_libdir}/pkgconfig/cray-xpmem.pc %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 2.7-2 +- Build with MOFED 25.07-0.9.7.1. +- Update source path + * Tue Dec 17 2024 Binu Jose Philip - Initial Azure Linux import from NVIDIA (license: GPLv2) - License verified diff --git a/SPECS/xpmem/xpmem.spec b/SPECS/xpmem/xpmem.spec index 15d7f4e303c..ed3d70569f8 100644 --- a/SPECS/xpmem/xpmem.spec +++ b/SPECS/xpmem/xpmem.spec @@ -12,7 +12,7 @@ %global KVERSION %{target_kernel_version_full} %global K_SRC /lib/modules/%{target_kernel_version_full}/build -%{!?_mofed_full_version: %define _mofed_full_version 24.10-21%{release_suffix}%{?dist}} +%{!?_mofed_full_version: %define _mofed_full_version 25.07-1%{release_suffix}%{?dist}} # %{!?KVERSION: %global KVERSION %(uname -r)} %{!?KVERSION: %global KVERSION %{target_kernel_version_full}} @@ -21,7 +21,7 @@ %{!?K_SRC: %global K_SRC /lib/modules/%{KVERSION}/build} # A separate variable _release is required because of the odd way the # script append_number_to_package_release.sh works: -%global _release 1.2410068 +%global _release 1.2507097 %bcond_with kernel_only @@ -42,14 +42,17 @@ Summary: Cross-partition memory Name: xpmem Version: 2.7.4 -Release: 21%{release_suffix}%{?dist} +Release: 22%{release_suffix}%{?dist} License: GPLv2 and LGPLv2.1 Group: System Environment/Libraries Vendor: Microsoft Corporation Distribution: Azure Linux BuildRequires: automake autoconf URL: https://github.com/openucx/xpmem -Source0: https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-2.7.4.tar.gz#/%{name}-%{version}.tar.gz +# DOCA OFED feature sources come from the following MLNX_OFED_SRC tgz. +# This archive contains the SRPMs for each feature and each SRPM includes the source tarball and the SPEC file. +# https://linux.mellanox.com/public/repo/doca/3.1.0/SOURCES/mlnx_ofed/MLNX_OFED_SRC-25.07-0.9.7.0.tgz +Source0: %{_distro_sources_url}/%{name}-%{version}.tar.gz # name gets a different value in subpackages %global _name %{name} @@ -271,6 +274,10 @@ fi %endif %changelog +* Tue Nov 04 2025 Suresh Babu Chalamalasetty - 2.7.4-22 +- Build with OFED 25.07.0.9.7.1. +- Update source path + * Fri Oct 10 2025 Pawel Winogrodzki - 2.7.4-21 - Adjusted package dependencies on user space components. - Align %%post* scripts with other kmod packages. diff --git a/cgmanifest.json b/cgmanifest.json index 4b91db445df..ae958f34d4b 100644 --- a/cgmanifest.json +++ b/cgmanifest.json @@ -905,8 +905,8 @@ "type": "other", "other": { "name": "azurelinux-image-tools", - "version": "1.0.0", - "downloadUrl": "https://github.com/microsoft/azure-linux-image-tools/archive/refs/tags/v1.0.0.tar.gz" + "version": "1.1.0", + "downloadUrl": "https://github.com/microsoft/azure-linux-image-tools/archive/refs/tags/v1.1.0.tar.gz" } } }, @@ -1107,8 +1107,8 @@ "type": "other", "other": { "name": "bind", - "version": "9.20.15", - "downloadUrl": "https://ftp.isc.org/isc/bind9/9.20.15/bind-9.20.15.tar.xz" + "version": "9.20.18", + "downloadUrl": "https://ftp.isc.org/isc/bind9/9.20.18/bind-9.20.18.tar.xz" } } }, @@ -1227,8 +1227,8 @@ "type": "other", "other": { "name": "booth", - "version": "1.0", - "downloadUrl": "https://github.com/ClusterLabs/booth/archive/5d837d2b5bf1c240a5f1c5efe4e8d79f55727cca/booth-5d837d2.tar.gz" + "version": "1.2", + "downloadUrl": "https://github.com/ClusterLabs/booth/releases/download/v1.2/booth-1.2.tar.gz" } } }, @@ -1797,8 +1797,8 @@ "type": "other", "other": { "name": "cloud-hypervisor", - "version": "41.0.139", - "downloadUrl": "https://github.com/microsoft/cloud-hypervisor/archive/refs/tags/msft/v41.0.139.tar.gz" + "version": "48.0.246", + "downloadUrl": "https://github.com/microsoft/cloud-hypervisor/archive/refs/tags/msft/v48.0.246.tar.gz" } } }, @@ -2057,8 +2057,8 @@ "type": "other", "other": { "name": "containerized-data-importer", - "version": "1.57.0", - "downloadUrl": "https://github.com/kubevirt/containerized-data-importer/archive/refs/tags/v1.57.0.tar.gz" + "version": "1.62.0", + "downloadUrl": "https://github.com/kubevirt/containerized-data-importer/archive/refs/tags/v1.62.0.tar.gz" } } }, @@ -2237,8 +2237,8 @@ "type": "other", "other": { "name": "crash", - "version": "8.0.4", - "downloadUrl": "https://github.com/crash-utility/crash/archive/8.0.4.tar.gz" + "version": "9.0.0", + "downloadUrl": "https://github.com/crash-utility/crash/archive/9.0.0.tar.gz" } } }, @@ -2297,8 +2297,8 @@ "type": "other", "other": { "name": "criu", - "version": "3.15", - "downloadUrl": "http://download.openvz.org/criu/criu-3.15.tar.bz2" + "version": "4.1.1", + "downloadUrl": "https://github.com/checkpoint-restore/criu/archive/v4.1.1/criu-4.1.1.tar.gz" } } }, @@ -2322,6 +2322,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "crun", + "version": "1.24", + "downloadUrl": "https://github.com/containers/crun/releases/download/1.24/crun-1.24.tar.gz" + } + } + }, { "component": { "type": "other", @@ -3163,6 +3173,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "ed25519-java", + "version": "0.3.0", + "downloadUrl": "https://github.com/str4d/ed25519-java/archive/v0.3.0/ed25519-java-0.3.0.tar.gz" + } + } + }, { "component": { "type": "other", @@ -4025,26 +4045,6 @@ } } }, - { - "component": { - "type": "other", - "other": { - "name": "fwctl", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/fwctl-24.10.tgz" - } - } - }, - { - "component": { - "type": "other", - "other": { - "name": "fwctl-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/fwctl-24.10.tgz" - } - } - }, { "component": { "type": "other", @@ -4520,8 +4520,8 @@ "type": "other", "other": { "name": "gnome-desktop-testing", - "version": "2018.1", - "downloadUrl": "https://gitlab.gnome.org/GNOME/gnome-desktop-testing/-/archive/v2018.1/gnome-desktop-testing-v2018.1.tar.gz" + "version": "2021.1", + "downloadUrl": "https://gitlab.gnome.org/GNOME/gnome-desktop-testing/-/archive/v2021.1/gnome-desktop-testing-v2021.1.tar.gz" } } }, @@ -4610,8 +4610,8 @@ "type": "other", "other": { "name": "gnupg2", - "version": "2.4.7", - "downloadUrl": "https://gnupg.org/ftp/gcrypt/gnupg/gnupg-2.4.7.tar.bz2" + "version": "2.4.9", + "downloadUrl": "https://gnupg.org/ftp/gcrypt/gnupg/gnupg-2.4.9.tar.bz2" } } }, @@ -4680,8 +4680,8 @@ "type": "other", "other": { "name": "golang", - "version": "1.25.5", - "downloadUrl": "https://github.com/microsoft/go/releases/download/v1.25.5-1/go1.25.5-20251202.2.src.tar.gz" + "version": "1.24.12", + "downloadUrl": "https://github.com/microsoft/go/releases/download/v1.24.12-1/go1.24.12-20260116.10.src.tar.gz" } } }, @@ -4690,8 +4690,8 @@ "type": "other", "other": { "name": "golang", - "version": "1.24.11", - "downloadUrl": "https://github.com/microsoft/go/releases/download/v1.24.11-1/go1.24.11-20251202.3.src.tar.gz" + "version": "1.25.6", + "downloadUrl": "https://github.com/microsoft/go/releases/download/v1.25.6-1/go1.25.6-20260116.7.src.tar.gz" } } }, @@ -5270,8 +5270,8 @@ "type": "other", "other": { "name": "hdf5", - "version": "1.14.4.3", - "downloadUrl": "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.14/hdf5-1.14.4/src/hdf5-1.14.4-3.tar.gz" + "version": "1.14.6", + "downloadUrl": "https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_6/downloads/hdf5-1.14.6.tar.gz" } } }, @@ -5340,8 +5340,8 @@ "type": "other", "other": { "name": "highlight", - "version": "3.54", - "downloadUrl": "http://www.andre-simon.de/zip/highlight-3.54.tar.bz2" + "version": "4.18", + "downloadUrl": "https://gitlab.com/saalen/highlight/-/archive/v4.18/highlight-v4.18.tar.bz2" } } }, @@ -5430,8 +5430,8 @@ "type": "other", "other": { "name": "httpd", - "version": "2.4.65", - "downloadUrl": "https://archive.apache.org/dist/httpd/httpd-2.4.65.tar.bz2" + "version": "2.4.66", + "downloadUrl": "https://archive.apache.org/dist/httpd/httpd-2.4.66.tar.bz2" } } }, @@ -6600,8 +6600,8 @@ "type": "other", "other": { "name": "hyperv-daemons", - "version": "6.6.117.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.117.1.tar.gz" + "version": "6.6.121.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.121.1.tar.gz" } } }, @@ -7060,8 +7060,8 @@ "type": "other", "other": { "name": "ibarr", - "version": "0.1.3", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ibarr-0.1.3.tar.gz" + "version": "0.1.5", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/ibarr-0.1.5.tar.gz" } } }, @@ -7070,8 +7070,8 @@ "type": "other", "other": { "name": "ibsim", - "version": "0.12", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ibsim-0.12.tar.gz" + "version": "0.12.1", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/ibsim-0.12.1.tar.gz" } } }, @@ -7120,8 +7120,8 @@ "type": "other", "other": { "name": "ibus-libzhuyin", - "version": "1.9.1", - "downloadUrl": "http://downloads.sourceforge.net/libzhuyin/ibus-libzhuyin/ibus-libzhuyin-1.9.1.tar.gz" + "version": "1.10.4", + "downloadUrl": "https://downloads.sourceforge.net/libzhuyin/ibus-libzhuyin/ibus-libzhuyin-1.10.4.tar.gz" } } }, @@ -7160,8 +7160,8 @@ "type": "other", "other": { "name": "ibus-table", - "version": "1.12.4", - "downloadUrl": "https://github.com/mike-fabian/ibus-table/archive/refs/tags/1.12.4.tar.gz" + "version": "1.17.16", + "downloadUrl": "https://github.com/mike-fabian/ibus-table/releases/download/1.17.16/ibus-table-1.17.16.tar.gz" } } }, @@ -7601,8 +7601,8 @@ "type": "other", "other": { "name": "iser", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/iser-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/iser-25.07.tgz" } } }, @@ -7611,8 +7611,8 @@ "type": "other", "other": { "name": "iser-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/iser-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/iser-25.07.tgz" } } }, @@ -7621,8 +7621,8 @@ "type": "other", "other": { "name": "isert", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/isert-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/isert-25.07.tgz" } } }, @@ -7631,8 +7631,8 @@ "type": "other", "other": { "name": "isert-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/isert-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/isert-25.07.tgz" } } }, @@ -7856,6 +7856,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "jbcrypt", + "version": "1.0.2", + "downloadUrl": "https://github.com/kruton/jbcrypt/archive/refs/tags/1.0.2.tar.gz" + } + } + }, { "component": { "type": "other", @@ -8101,8 +8111,8 @@ "type": "other", "other": { "name": "jtidy", - "version": "8.0", - "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/jtidy-r813.tar.bz2" + "version": "1.0.4", + "downloadUrl": "https://github.com/jtidy/jtidy/archive/refs/tags/jtidy-1.0.4.tar.gz" } } }, @@ -8251,8 +8261,8 @@ "type": "other", "other": { "name": "kernel", - "version": "6.6.117.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.117.1.tar.gz" + "version": "6.6.121.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.121.1.tar.gz" } } }, @@ -8261,8 +8271,8 @@ "type": "other", "other": { "name": "kernel-64k", - "version": "6.6.117.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.117.1.tar.gz" + "version": "6.6.121.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.121.1.tar.gz" } } }, @@ -8270,9 +8280,9 @@ "component": { "type": "other", "other": { - "name": "kernel-hwe", - "version": "6.12.57.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/hwe/6.12.57.1.tar.gz" + "name": "kernel-headers", + "version": "6.6.121.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.121.1.tar.gz" } } }, @@ -8280,9 +8290,9 @@ "component": { "type": "other", "other": { - "name": "kernel-headers", - "version": "6.6.117.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.117.1.tar.gz" + "name": "kernel-hwe", + "version": "6.12.57.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/hwe/6.12.57.1.tar.gz" } } }, @@ -8301,8 +8311,8 @@ "type": "other", "other": { "name": "kernel-ipe", - "version": "6.6.117.1", - "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.117.1.tar.gz" + "version": "6.6.121.1", + "downloadUrl": "https://github.com/microsoft/CBL-Mariner-Linux-Kernel/archive/rolling-lts/mariner-3/6.6.121.1.tar.gz" } } }, @@ -8452,7 +8462,7 @@ "other": { "name": "knem", "version": "1.1.4.90mlnx3", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/knem-1.1.4.90mlnx3.tar.gz" + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/knem-1.1.4.90mlnx3.tar.gz" } } }, @@ -8462,7 +8472,7 @@ "other": { "name": "knem-hwe", "version": "1.1.4.90mlnx3", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/knem-1.1.4.90mlnx3.tar.gz" + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/knem-1.1.4.90mlnx3.tar.gz" } } }, @@ -8561,8 +8571,8 @@ "type": "other", "other": { "name": "kubevirt", - "version": "1.5.3", - "downloadUrl": "https://github.com/kubevirt/kubevirt/archive/refs/tags/v1.5.3.tar.gz" + "version": "1.6.3", + "downloadUrl": "https://github.com/kubevirt/kubevirt/archive/refs/tags/v1.6.3.tar.gz" } } }, @@ -8641,8 +8651,8 @@ "type": "other", "other": { "name": "lasso", - "version": "2.8.0", - "downloadUrl": "http://dev.entrouvert.org/lasso/lasso-2.8.0.tar.gz" + "version": "2.9.0", + "downloadUrl": "https://git.entrouvert.org/entrouvert/lasso/archive/v2.9.0.tar.gz" } } }, @@ -10741,8 +10751,8 @@ "type": "other", "other": { "name": "libpcap", - "version": "1.10.5", - "downloadUrl": "https://github.com/the-tcpdump-group/libpcap/archive/libpcap-1.10.5.tar.gz" + "version": "1.10.6", + "downloadUrl": "https://github.com/the-tcpdump-group/libpcap/archive/libpcap-1.10.6.tar.gz" } } }, @@ -10781,8 +10791,8 @@ "type": "other", "other": { "name": "libpinyin", - "version": "2.9.92", - "downloadUrl": "https://downloads.sourceforge.net/libpinyin/libpinyin/libpinyin-2.9.92.tar.gz" + "version": "2.10.3", + "downloadUrl": "https://downloads.sourceforge.net/libpinyin/libpinyin/libpinyin-2.10.3.tar.gz" } } }, @@ -10821,8 +10831,8 @@ "type": "other", "other": { "name": "libpng", - "version": "1.6.52", - "downloadUrl": "https://downloads.sourceforge.net/libpng/libpng-1.6.52.tar.xz" + "version": "1.6.54", + "downloadUrl": "https://downloads.sourceforge.net/libpng/libpng-1.6.54.tar.xz" } } }, @@ -10991,8 +11001,8 @@ "type": "other", "other": { "name": "libreport", - "version": "2.13.1", - "downloadUrl": "https://github.com/abrt/libreport/archive/2.13.1/libreport-2.13.1.tar.gz" + "version": "2.17.15", + "downloadUrl": "https://github.com/abrt/libreport/archive/2.17.15/libreport-2.17.15.tar.gz" } } }, @@ -11222,7 +11232,7 @@ "other": { "name": "libsodium", "version": "1.0.19", - "downloadUrl": "https://download.libsodium.org/libsodium/releases/libsodium-1.0.19.tar.gz" + "downloadUrl": "https://github.com/jedisct1/libsodium/archive/refs/tags/1.0.19-FINAL.tar.gz" } } }, @@ -11736,6 +11746,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "libvma", + "version": "9.8.72", + "downloadUrl": "https://github.com/Mellanox/libvma/archive/9.8.72/libvma-9.8.72.tar.gz" + } + } + }, { "component": { "type": "other", @@ -12346,6 +12366,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "llhttp", + "version": "9.3.0", + "downloadUrl": "https://github.com/nodejs/llhttp/archive/refs/tags/release/v9.3.0/llhttp-release-v9.3.0.tar.gz" + } + } + }, { "component": { "type": "other", @@ -12961,8 +12991,8 @@ "type": "other", "other": { "name": "mariadb", - "version": "10.11.11", - "downloadUrl": "https://downloads.mariadb.org/interstitial/mariadb-10.11.11/source/mariadb-10.11.11.tar.gz" + "version": "10.11.15", + "downloadUrl": "https://downloads.mariadb.org/interstitial/mariadb-10.11.15/source/mariadb-10.11.15.tar.gz" } } }, @@ -13271,8 +13301,8 @@ "type": "other", "other": { "name": "mft_kernel", - "version": "4.30.0", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/kernel-mft-4.30.0.tgz" + "version": "4.33.0", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/kernel-mft-4.33.0.tgz" } } }, @@ -13281,8 +13311,8 @@ "type": "other", "other": { "name": "mft_kernel-hwe", - "version": "4.30.0", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/kernel-mft-4.30.0.tgz" + "version": "4.33.0", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/kernel-mft-4.33.0.tgz" } } }, @@ -13372,8 +13402,8 @@ "type": "other", "other": { "name": "mlnx-ethtool", - "version": "6.9", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ethtool-6.9.tar.gz" + "version": "6.14", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-ethtool-6.14.tar.gz" } } }, @@ -13382,8 +13412,8 @@ "type": "other", "other": { "name": "mlnx-iproute2", - "version": "6.10.0", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-iproute2-6.10.0.tar.gz" + "version": "6.15.0", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-iproute2-6.15.0.tar.gz" } } }, @@ -13392,8 +13422,8 @@ "type": "other", "other": { "name": "mlnx-nfsrdma", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-nfsrdma-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-nfsrdma-25.07.tgz" } } }, @@ -13402,8 +13432,8 @@ "type": "other", "other": { "name": "mlnx-nfsrdma-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-nfsrdma-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-nfsrdma-25.07.tgz" } } }, @@ -13412,8 +13442,8 @@ "type": "other", "other": { "name": "mlnx-ofa_kernel", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ofa_kernel-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-ofa_kernel-25.07.tgz" } } }, @@ -13422,8 +13452,8 @@ "type": "other", "other": { "name": "mlnx-ofa_kernel-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-ofa_kernel-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-ofa_kernel-25.07.tgz" } } }, @@ -13432,8 +13462,8 @@ "type": "other", "other": { "name": "mlnx-tools", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/mlnx-tools-24.10.tar.gz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/mlnx-tools-25.07.tar.gz" } } }, @@ -13852,8 +13882,8 @@ "type": "other", "other": { "name": "mysql", - "version": "8.0.44", - "downloadUrl": "https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-boost-8.0.44.tar.gz" + "version": "8.0.45", + "downloadUrl": "https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-boost-8.0.45.tar.gz" } } }, @@ -14192,8 +14222,8 @@ "type": "other", "other": { "name": "net-snmp", - "version": "5.9.4", - "downloadUrl": "https://sourceforge.net/projects/net-snmp/files/net-snmp/5.9.4/net-snmp-5.9.4.tar.gz" + "version": "5.9.5.2", + "downloadUrl": "https://sourceforge.net/projects/net-snmp/files/net-snmp/5.9.5.2/net-snmp-5.9.5.2.tar.gz" } } }, @@ -14352,8 +14382,8 @@ "type": "other", "other": { "name": "nginx", - "version": "1.25.4", - "downloadUrl": "https://nginx.org/download/nginx-1.25.4.tar.gz" + "version": "1.28.1", + "downloadUrl": "https://nginx.org/download/nginx-1.28.1.tar.gz" } } }, @@ -14437,6 +14467,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "nodejs24", + "version": "24.13.0", + "downloadUrl": "https://nodejs.org/download/release/v24.13.0/node-v24.13.0.tar.xz" + } + } + }, { "component": { "type": "other", @@ -15283,8 +15323,8 @@ "type": "other", "other": { "name": "ofed-docs", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ofed-docs-24.10.tar.gz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/ofed-docs-25.07.tar.gz" } } }, @@ -15293,8 +15333,8 @@ "type": "other", "other": { "name": "ofed-scripts", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/ofed-scripts-24.10.tar.gz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/ofed-scripts-25.07.tar.gz" } } }, @@ -15723,8 +15763,8 @@ "type": "other", "other": { "name": "osgi-core", - "version": "7.0.0", - "downloadUrl": "https://repo1.maven.org/maven2/org/osgi/osgi.core/7.0.0/osgi.core-7.0.0-sources.jar" + "version": "8.0.0", + "downloadUrl": "https://repo1.maven.org/maven2/org/osgi/osgi.core/8.0.0/osgi.core-8.0.0-sources.jar" } } }, @@ -15793,8 +15833,8 @@ "type": "other", "other": { "name": "pacemaker", - "version": "2.1.5", - "downloadUrl": "https://github.com/ClusterLabs/pacemaker/archive/refs/tags/Pacemaker-2.1.5.tar.gz" + "version": "3.0.1", + "downloadUrl": "https://github.com/ClusterLabs/pacemaker/archive/refs/tags/Pacemaker-3.0.1.tar.gz" } } }, @@ -16053,8 +16093,8 @@ "type": "other", "other": { "name": "perftest", - "version": "24.10.0", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/perftest-24.10.0-0.65.g9093bae.tar.gz" + "version": "25.07.0", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/perftest-25.07.0-0.104.g0c03534.tar.gz" } } }, @@ -16113,8 +16153,8 @@ "type": "other", "other": { "name": "perl-Alien-pkgconf", - "version": "0.20", - "downloadUrl": "https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Alien-pkgconf-0.20.tar.gz" + "version": "0.21", + "downloadUrl": "https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Alien-pkgconf-0.21.tar.gz" } } }, @@ -17583,8 +17623,8 @@ "type": "other", "other": { "name": "perl-FFI-CheckLib", - "version": "0.26", - "downloadUrl": "https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/FFI-CheckLib-0.26.tar.gz" + "version": "0.31", + "downloadUrl": "https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/FFI-CheckLib-0.31.tar.gz" } } }, @@ -19303,8 +19343,8 @@ "type": "other", "other": { "name": "perl-Params-ValidationCompiler", - "version": "0.30", - "downloadUrl": "https://cpan.metacpan.org/modules/by-module/Params/Params-ValidationCompiler-0.30.tar.gz" + "version": "0.31", + "downloadUrl": "https://cpan.metacpan.org/modules/by-module/Params/Params-ValidationCompiler-0.31.tar.gz" } } }, @@ -19673,8 +19713,8 @@ "type": "other", "other": { "name": "perl-Return-MultiLevel", - "version": "0.05", - "downloadUrl": "https://cpan.metacpan.org/authors/id/M/MA/MAUKE/Return-MultiLevel-0.05.tar.gz" + "version": "0.08", + "downloadUrl": "https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/Return-MultiLevel-0.08.tar.gz" } } }, @@ -21153,8 +21193,8 @@ "type": "other", "other": { "name": "pgbouncer", - "version": "1.24.1", - "downloadUrl": "https://pgbouncer.github.io/downloads/files/1.24.1/pgbouncer-1.24.1.tar.gz" + "version": "1.25.1", + "downloadUrl": "https://pgbouncer.github.io/downloads/files/1.25.1/pgbouncer-1.25.1.tar.gz" } } }, @@ -21173,8 +21213,8 @@ "type": "other", "other": { "name": "php", - "version": "8.3.23", - "downloadUrl": "https://www.php.net/distributions/php-8.3.23.tar.xz" + "version": "8.3.29", + "downloadUrl": "https://www.php.net/distributions/php-8.3.29.tar.xz" } } }, @@ -22603,8 +22643,8 @@ "type": "other", "other": { "name": "python-debtcollector", - "version": "1.22.0", - "downloadUrl": "https://tarballs.openstack.org/debtcollector/debtcollector-1.22.0.tar.gz" + "version": "3.0.0", + "downloadUrl": "https://tarballs.openstack.org/debtcollector/debtcollector-3.0.0.tar.gz" } } }, @@ -22833,8 +22873,8 @@ "type": "other", "other": { "name": "python-filelock", - "version": "3.14.0", - "downloadUrl": "https://files.pythonhosted.org/packages/source/f/filelock/filelock-3.14.0.tar.gz" + "version": "3.20.3", + "downloadUrl": "https://files.pythonhosted.org/packages/source/f/filelock/filelock-3.20.3.tar.gz" } } }, @@ -22853,8 +22893,8 @@ "type": "other", "other": { "name": "python-flake8", - "version": "3.7.7", - "downloadUrl": "https://files.pythonhosted.org/packages/source/f/flake8/flake8-3.7.7.tar.gz" + "version": "7.3.0", + "downloadUrl": "https://github.com/PyCQA/flake8/archive/refs/tags/7.3.0.tar.gz" } } }, @@ -23913,8 +23953,8 @@ "type": "other", "other": { "name": "python-oslo-i18n", - "version": "5.1.0", - "downloadUrl": "https://tarballs.openstack.org/oslo.i18n/oslo.i18n-5.1.0.tar.gz" + "version": "6.7.1", + "downloadUrl": "https://files.pythonhosted.org/packages/source/o/oslo_i18n/oslo_i18n-6.7.1.tar.gz" } } }, @@ -24168,16 +24208,6 @@ } } }, - { - "component": { - "type": "other", - "other": { - "name": "python3-pycares", - "version": "4.5.0", - "downloadUrl": "https://github.com/saghul/pycares/archive/refs/tags/v4.5.0.tar.gz" - } - } - }, { "component": { "type": "other", @@ -24373,8 +24403,8 @@ "type": "other", "other": { "name": "python-pytest-flake8", - "version": "1.0.4", - "downloadUrl": "https://files.pythonhosted.org/packages/source/p/pytest-flake8/pytest-flake8-1.0.4.tar.gz" + "version": "1.3.0", + "downloadUrl": "https://github.com/coherent-oss/pytest-flake8/archive/refs/tags/v1.3.0.tar.gz" } } }, @@ -24768,6 +24798,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "python-scikit-build-core", + "version": "0.11.5", + "downloadUrl": "https://files.pythonhosted.org/packages/source/s/scikit_build_core/scikit_build_core-0.11.5.tar.gz" + } + } + }, { "component": { "type": "other", @@ -25228,6 +25268,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "python-toml", + "version": "0.10.2", + "downloadUrl": "https://files.pythonhosted.org/packages/source/t/toml/toml-0.10.2.tar.gz" + } + } + }, { "component": { "type": "other", @@ -25523,8 +25573,8 @@ "type": "other", "other": { "name": "python-wheel", - "version": "0.43.0", - "downloadUrl": "https://github.com/pypa/wheel/archive/0.43.0/wheel-0.43.0.tar.gz" + "version": "0.46.3", + "downloadUrl": "https://github.com/pypa/wheel/archive/0.46.3/wheel-0.46.3.tar.gz" } } }, @@ -25623,8 +25673,8 @@ "type": "other", "other": { "name": "python-zmq", - "version": "18.1.0", - "downloadUrl": "https://github.com/zeromq/pyzmq/archive/v18.1.0.tar.gz" + "version": "27.1.0", + "downloadUrl": "https://github.com/zeromq/pyzmq/archive/refs/tags/v27.1.0.tar.gz" } } }, @@ -25688,6 +25738,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "python3-pycares", + "version": "4.5.0", + "downloadUrl": "https://github.com/saghul/pycares/archive/refs/tags/v4.5.0.tar.gz" + } + } + }, { "component": { "type": "other", @@ -25832,9 +25892,9 @@ "component": { "type": "other", "other": { - "name": "qt5-qtconnectivity", - "version": "5.14.2", - "downloadUrl": "https://download.qt.io/official_releases/qt/5.14/5.14.2/submodules/qtconnectivity-everywhere-src-5.14.2.tar.xz" + "name": "qt6-qtconnectivity", + "version": "6.5.7", + "downloadUrl": "https://download.qt.io/official_releases/qt/6.5/6.5.7/src/submodules/qtconnectivity-everywhere-opensource-src-6.5.7.tar.xz" } } }, @@ -25842,9 +25902,9 @@ "component": { "type": "other", "other": { - "name": "qt5-qtsensors", - "version": "5.14.2", - "downloadUrl": "https://download.qt.io/official_releases/qt/5.14/5.14.2/submodules/qtsensors-everywhere-src-5.14.2.tar.xz" + "name": "qt6-qtsensors", + "version": "6.5.7", + "downloadUrl": "https://download.qt.io/official_releases/qt/6.5/6.5.7/src/submodules/qtsensors-everywhere-opensource-src-6.5.7.tar.xz" } } }, @@ -25852,9 +25912,9 @@ "component": { "type": "other", "other": { - "name": "qt5-qtserialport", - "version": "5.15.9", - "downloadUrl": "https://download.qt.io/official_releases/qt/5.15/5.15.9/submodules/qtserialport-everywhere-opensource-src-5.15.9.tar.xz" + "name": "qt6-qtserialport", + "version": "6.5.7", + "downloadUrl": "https://download.qt.io/official_releases/qt/6.5/6.5.7/src/submodules/qtserialport-everywhere-opensource-src-6.5.7.tar.xz" } } }, @@ -25904,7 +25964,7 @@ "other": { "name": "quagga", "version": "1.2.4", - "downloadUrl": "http://download.savannah.gnu.org/releases/quagga/quagga-1.2.4.tar.gz" + "downloadUrl": "https://github.com/quagga/quagga/releases/download/quagga-1.2.4/quagga-1.2.4.tar.gz" } } }, @@ -26043,8 +26103,8 @@ "type": "other", "other": { "name": "rdma-core", - "version": "55.0", - "downloadUrl": "https://github.com/linux-rdma/rdma-core/releases/download/v55.0/rdma-core-55.0.tar.gz" + "version": "59.0", + "downloadUrl": "https://github.com/linux-rdma/rdma-core/releases/download/v59.0/rdma-core-59.0.tar.gz" } } }, @@ -26194,8 +26254,8 @@ "type": "other", "other": { "name": "rhino", - "version": "1.7.7.1", - "downloadUrl": "https://github.com/mozilla/rhino/archive/Rhino1_7_7_1_RELEASE.tar.gz" + "version": "1.7.15.1", + "downloadUrl": "https://github.com/mozilla/rhino/archive/Rhino1_7_15_1_Release.tar.gz" } } }, @@ -26354,8 +26414,8 @@ "type": "other", "other": { "name": "rshim", - "version": "2.1.5", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/rshim-2.1.5.tar.gz" + "version": "2.4.4", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/rshim-2.4.4.tar.gz" } } }, @@ -27474,8 +27534,8 @@ "type": "other", "other": { "name": "rust", - "version": "1.86.0", - "downloadUrl": "https://static.rust-lang.org/dist/rustc-1.86.0-src.tar.xz" + "version": "1.90.0", + "downloadUrl": "https://static.rust-lang.org/dist/rustc-1.90.0-src.tar.xz" } } }, @@ -27544,8 +27604,8 @@ "type": "other", "other": { "name": "satyr", - "version": "0.30", - "downloadUrl": "https://github.com/abrt/satyr/archive/0.30/satyr-0.30.tar.gz" + "version": "0.43", + "downloadUrl": "https://github.com/abrt/satyr/releases/download/0.43/satyr-0.43.tar.gz" } } }, @@ -28476,8 +28536,8 @@ "type": "other", "other": { "name": "srp", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/srp-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/srp-25.07.tgz" } } }, @@ -28486,8 +28546,8 @@ "type": "other", "other": { "name": "srp-hwe", - "version": "24.10", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/srp-24.10.tgz" + "version": "25.07", + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/srp-25.07.tgz" } } }, @@ -28626,8 +28686,8 @@ "type": "other", "other": { "name": "suitesparse", - "version": "5.4.0", - "downloadUrl": "https://faculty.cse.tamu.edu/davis/SuiteSparse/SuiteSparse-5.4.0.tar.gz" + "version": "7.11.0", + "downloadUrl": "https://github.com/DrTimothyAldenDavis/SuiteSparse/archive/v7.11.0/suitesparse-7.11.0.tar.gz" } } }, @@ -28696,8 +28756,8 @@ "type": "other", "other": { "name": "SymCrypt-OpenSSL", - "version": "1.9.3", - "downloadUrl": "https://github.com/microsoft/SymCrypt-OpenSSL/archive/v1.9.3.tar.gz" + "version": "1.9.4", + "downloadUrl": "https://github.com/microsoft/SymCrypt-OpenSSL/archive/v1.9.4.tar.gz" } } }, @@ -29276,8 +29336,8 @@ "type": "other", "other": { "name": "trilead-ssh2", - "version": "217.8", - "downloadUrl": "https://github.com/jenkinsci/trilead-ssh2/archive/trilead-ssh2-build217-jenkins-8.tar.gz" + "version": "217.371.vc1d30dc5a_b_32", + "downloadUrl": "https://github.com/jenkinsci/trilead-ssh2/archive/refs/tags/build-217-jenkins-371.vc1d30dc5a_b_32.tar.gz" } } }, @@ -29346,8 +29406,8 @@ "type": "other", "other": { "name": "tzdata", - "version": "2025b", - "downloadUrl": "https://data.iana.org/time-zones/releases/tzdata2025b.tar.gz" + "version": "2025c", + "downloadUrl": "https://data.iana.org/time-zones/releases/tzdata2025c.tar.gz" } } }, @@ -29406,8 +29466,8 @@ "type": "other", "other": { "name": "ucx", - "version": "1.18.0", - "downloadUrl": "https://github.com/openucx/ucx/releases/download/v1.18.0-rc3/ucx-1.18.0.tar.gz" + "version": "1.19.0", + "downloadUrl": "https://github.com/openucx/ucx/releases/download/v1.19.0/ucx-1.19.0.tar.gz" } } }, @@ -30247,8 +30307,8 @@ "type": "other", "other": { "name": "xbean", - "version": "4.18", - "downloadUrl": "http://repo2.maven.org/maven2/org/apache/xbean/xbean/4.18/xbean-4.18-source-release.zip" + "version": "4.24", + "downloadUrl": "https://repo1.maven.org/maven2/org/apache/xbean/xbean/4.24/xbean-4.24-source-release.zip" } } }, @@ -30717,8 +30777,8 @@ "type": "other", "other": { "name": "xmldb-api", - "version": "0.1", - "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/xmldb-xapi-20041010-src.tar.bz2" + "version": "1.7.0", + "downloadUrl": "https://github.com/xmldb-org/xmldb-api/archive/xmldb-api-1.7.tar.gz" } } }, @@ -30919,7 +30979,7 @@ "other": { "name": "xpmem", "version": "2.7.4", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-2.7.4.tar.gz" + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/xpmem-2.7.4.tar.gz" } } }, @@ -30929,7 +30989,7 @@ "other": { "name": "xpmem-hwe", "version": "2.7.4", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-2.7.4.tar.gz" + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/xpmem-2.7.4.tar.gz" } } }, @@ -30939,7 +30999,7 @@ "other": { "name": "xpmem-lib", "version": "2.7", - "downloadUrl": "https://linux.mellanox.com/public/repo/mlnx_ofed/24.10-0.7.0.0/SRPMS/xpmem-lib-2.7.tar.gz" + "downloadUrl": "https://azurelinuxsrcstorage.blob.core.windows.net/sources/core/xpmem-lib-2.7.tar.gz" } } }, diff --git a/toolkit/Makefile b/toolkit/Makefile index 05da21b5119..f65636fe78d 100644 --- a/toolkit/Makefile +++ b/toolkit/Makefile @@ -164,8 +164,13 @@ else VALIDATE_TOOLCHAIN_GPG ?= y endif endif +##help:var:VALIDATE_IMAGE_GPG:{y,n}=Enable RPM GPG signature verification during package fetching and image builds. When enabled, all packages must be signed - this validates that packages have completed the signing process. Default is 'n' for local development with unsigned packages. Production builds use a multi-step workflow (build packages -> sign packages -> build images) and should set 'y' for the final image build step to enforce that all packages are signed. Keys used for validation can be modified with the IMAGE_GPG_VALIDATION_KEYS variable. +VALIDATE_IMAGE_GPG ?= n -TOOLCHAIN_GPG_VALIDATION_KEYS ?= $(wildcard $(PROJECT_ROOT)/SPECS/azurelinux-repos/MICROSOFT-*-GPG-KEY) $(wildcard $(toolkit_root)/repos/MICROSOFT-*-GPG-KEY) +# Default GPG keys for package GPG validation, used with VALIDATE_TOOLCHAIN_GPG and VALIDATE_IMAGE_GPG +default_gpg_keys := $(wildcard $(PROJECT_ROOT)/SPECS/azurelinux-repos/MICROSOFT-*-GPG-KEY) $(wildcard $(toolkit_root)/repos/MICROSOFT-*-GPG-KEY) +TOOLCHAIN_GPG_VALIDATION_KEYS ?= $(default_gpg_keys) +IMAGE_GPG_VALIDATION_KEYS ?= $(default_gpg_keys) ######## COMMON MAKEFILE UTILITIES ######## diff --git a/toolkit/docs/building/building.md b/toolkit/docs/building/building.md index 2469cba2889..951dd2f4e6b 100644 --- a/toolkit/docs/building/building.md +++ b/toolkit/docs/building/building.md @@ -865,6 +865,10 @@ Authentication mode for downloading source files for SRPM packing. Valid options | INCREMENTAL_TOOLCHAIN | n | Only build toolchain RPM packages if they are not already present | RUN_CHECK | n | Run the %check sections when compiling packages | ALLOW_TOOLCHAIN_REBUILDS | n | Do not treat rebuilds of toolchain packages during regular package build phase as errors. +| VALIDATE_TOOLCHAIN_GPG | (auto - based on toolchain build mode) | Enable RPM GPG signature verification for toolchain packages. Automatically set to `y` when downloading pre-built toolchain packages (`REBUILD_TOOLCHAIN=n`), and `n` when rebuilding locally or using `DAILY_BUILD_ID`. Packages are validated against keys specified in `TOOLCHAIN_GPG_VALIDATION_KEYS`. +| TOOLCHAIN_GPG_VALIDATION_KEYS | `$(PROJECT_ROOT)/SPECS/azurelinux-repos/MICROSOFT-*-GPG-KEY $(toolkit_root)/repos/MICROSOFT-*-GPG-KEY` | Space separated list of GPG key files used to validate RPM signatures when `VALIDATE_TOOLCHAIN_GPG=y`. +| VALIDATE_IMAGE_GPG | n | Enable RPM GPG signature verification during image builds. When set to `y`, all packages fetched for image generation must have valid GPG signatures. Packages are validated against keys specified in `IMAGE_GPG_VALIDATION_KEYS`. Production builds should enable this to ensure all packages have completed the signing process. +| IMAGE_GPG_VALIDATION_KEYS | `$(PROJECT_ROOT)/SPECS/azurelinux-repos/MICROSOFT-*-GPG-KEY $(toolkit_root)/repos/MICROSOFT-*-GPG-KEY` | Space separated list of GPG key files used to validate RPM signatures when `VALIDATE_IMAGE_GPG=y`. | PACKAGE_BUILD_RETRIES | 1 | Number of build retries for each package | CHECK_BUILD_RETRIES | 1 | Minimum number of check section retries for each package if RUN_CHECK=y and tests fail. | MAX_CASCADING_REBUILDS | | When a package rebuilds, how many additional layers of dependent packages will be forced to rebuild (leave unset for unbounded, i.e., all downstream packages will rebuild) diff --git a/toolkit/docs/security/intro.md b/toolkit/docs/security/intro.md index 12d19f97d0b..75e1e11777a 100644 --- a/toolkit/docs/security/intro.md +++ b/toolkit/docs/security/intro.md @@ -4,3 +4,5 @@ Below topics are dedicated to security-related details of the operating system. ## 1. [Security features](security-features.md) ## 2. [SSL CA certificates management](ca-certificates.md) +## 3. [Verifying ISO images](iso-image-verification.md) +## 4. [Production build recommendations](production-builds.md) diff --git a/toolkit/docs/security/production-builds.md b/toolkit/docs/security/production-builds.md new file mode 100644 index 00000000000..dab654e063c --- /dev/null +++ b/toolkit/docs/security/production-builds.md @@ -0,0 +1,30 @@ +# Production Build Recommendations + +When building images or ISOs for production deployment, enable explicit GPG signature verification to ensure all packages have completed the signing process: + +```bash +sudo make image VALIDATE_IMAGE_GPG=y CONFIG_FILE= +``` + +This validates that all RPM packages fetched during image generation have valid GPG signatures from the expected signing keys. + +## Build Workflow + +A typical production workflow separates package building from image generation: + +1. **Build packages** - Compile packages from source +2. **Sign packages** - Sign built packages with your GPG key +3. **Build images** - Generate images with `VALIDATE_IMAGE_GPG=y` to enforce all packages are signed + +This separation ensures unsigned or improperly signed packages cannot be included in final images. + +## Related Variables + +| Variable | Description | +|:---------|:------------| +| `VALIDATE_IMAGE_GPG` | Set to `y` to require valid GPG signatures on all image packages | +| `IMAGE_GPG_VALIDATION_KEYS` | GPG key files for signature validation | +| `VALIDATE_TOOLCHAIN_GPG` | Automatically enabled when downloading pre-built toolchain | +| `TOOLCHAIN_GPG_VALIDATION_KEYS` | GPG key files for toolchain validation | + +See [build variables](../building/building.md#all-build-variables) for full details. diff --git a/toolkit/imageconfigs/marketplace-gen1-kernel-hwe.json b/toolkit/imageconfigs/marketplace-gen1-kernel-hwe.json new file mode 100644 index 00000000000..10b810058d6 --- /dev/null +++ b/toolkit/imageconfigs/marketplace-gen1-kernel-hwe.json @@ -0,0 +1,81 @@ +{ + "Disks": [ + { + "PartitionTableType": "gpt", + "MaxSize": 5000, + "Artifacts": [ + { + "Name": "cblmariner-gen1-kernel-hwe", + "Type": "vhd" + } + ], + "Partitions": [ + { + "ID": "reserved", + "Flags": [ + "grub" + ], + "Start": 1, + "End": 9, + "FsType": "fat32" + }, + { + "ID": "boot", + "Start": 9, + "End": 509, + "FsType": "ext4" + }, + { + "ID": "rootfs", + "Start": 509, + "End": 0, + "FsType": "ext4" + } + ] + } + ], + "SystemConfigs": [ + { + "Name": "Standard", + "BootType": "legacy", + "PartitionSettings": [ + { + "ID": "reserved", + "MountPoint": "" + }, + { + "ID": "boot", + "MountPoint": "/boot" + }, + { + "ID": "rootfs", + "MountPoint": "/" + } + ], + "PackageLists": [ + "packagelists/core-packages-image.json", + "packagelists/marketplace-tools-packages.json", + "packagelists/azurevm-packages.json", + "packagelists/hyperv-packages.json" + ], + "AdditionalFiles": { + "additionalconfigs/cloud-init.cfg": "/etc/cloud/cloud.cfg", + "additionalconfigs/chrony.cfg": "/etc/chrony.conf", + "additionalconfigs/wait-for-ptp-hyperv.conf": "/etc/systemd/system/chronyd.service.d/wait-for-ptp-hyperv.conf", + "additionalconfigs/51-ptp-hyperv.rules": "/etc/udev/rules.d/51-ptp-hyperv.rules" + }, + "PostInstallScripts": [ + { + "Path": "additionalconfigs/configure-systemd-networkd.sh" + } + ], + "KernelOptions": { + "default": "kernel-hwe" + }, + "KernelCommandLine": { + "ExtraCommandLine": "console=ttyS0" + }, + "Hostname": "azurelinux" + } + ] +} diff --git a/toolkit/imageconfigs/osguard-amd64.yaml b/toolkit/imageconfigs/osguard-amd64.yaml index 2d8460715d4..fb624145527 100644 --- a/toolkit/imageconfigs/osguard-amd64.yaml +++ b/toolkit/imageconfigs/osguard-amd64.yaml @@ -5,7 +5,7 @@ storage: bootType: efi disks: - partitionTableType: gpt - maxSize: 40G + maxSize: 30G partitions: - id: esp type: esp diff --git a/toolkit/imageconfigs/osguard-ci-amd64.yaml b/toolkit/imageconfigs/osguard-ci-amd64.yaml index 0b8a5ef8889..e241b80fa29 100644 --- a/toolkit/imageconfigs/osguard-ci-amd64.yaml +++ b/toolkit/imageconfigs/osguard-ci-amd64.yaml @@ -5,7 +5,7 @@ storage: bootType: efi disks: - partitionTableType: gpt - maxSize: 40G + maxSize: 30G partitions: - id: esp type: esp diff --git a/toolkit/imageconfigs/templates/osguard-base.yaml b/toolkit/imageconfigs/templates/osguard-base.yaml index da7a145df72..8238323f853 100644 --- a/toolkit/imageconfigs/templates/osguard-base.yaml +++ b/toolkit/imageconfigs/templates/osguard-base.yaml @@ -3,7 +3,7 @@ storage: disks: - partitionTableType: gpt - maxSize: 40G + maxSize: 30G partitions: - id: esp type: esp diff --git a/toolkit/resources/manifests/package/pkggen_core_aarch64.txt b/toolkit/resources/manifests/package/pkggen_core_aarch64.txt index 3fce2c3be63..1a2d32c8094 100644 --- a/toolkit/resources/manifests/package/pkggen_core_aarch64.txt +++ b/toolkit/resources/manifests/package/pkggen_core_aarch64.txt @@ -1,13 +1,13 @@ filesystem-1.1-21.azl3.aarch64.rpm -kernel-headers-6.6.117.1-1.azl3.noarch.rpm -glibc-2.38-16.azl3.aarch64.rpm -glibc-devel-2.38-16.azl3.aarch64.rpm -glibc-i18n-2.38-16.azl3.aarch64.rpm -glibc-iconv-2.38-16.azl3.aarch64.rpm -glibc-lang-2.38-16.azl3.aarch64.rpm -glibc-locales-all-2.38-16.azl3.aarch64.rpm -glibc-nscd-2.38-16.azl3.aarch64.rpm -glibc-tools-2.38-16.azl3.aarch64.rpm +kernel-headers-6.6.121.1-1.azl3.noarch.rpm +glibc-2.38-18.azl3.aarch64.rpm +glibc-devel-2.38-18.azl3.aarch64.rpm +glibc-i18n-2.38-18.azl3.aarch64.rpm +glibc-iconv-2.38-18.azl3.aarch64.rpm +glibc-lang-2.38-18.azl3.aarch64.rpm +glibc-locales-all-2.38-18.azl3.aarch64.rpm +glibc-nscd-2.38-18.azl3.aarch64.rpm +glibc-tools-2.38-18.azl3.aarch64.rpm zlib-1.3.1-1.azl3.aarch64.rpm zlib-devel-1.3.1-1.azl3.aarch64.rpm file-5.45-1.azl3.aarch64.rpm @@ -70,9 +70,9 @@ make-4.4.1-2.azl3.aarch64.rpm patch-2.7.6-9.azl3.aarch64.rpm libcap-ng-0.8.4-1.azl3.aarch64.rpm libcap-ng-devel-0.8.4-1.azl3.aarch64.rpm -util-linux-2.40.2-1.azl3.aarch64.rpm -util-linux-devel-2.40.2-1.azl3.aarch64.rpm -util-linux-libs-2.40.2-1.azl3.aarch64.rpm +util-linux-2.40.2-3.azl3.aarch64.rpm +util-linux-devel-2.40.2-3.azl3.aarch64.rpm +util-linux-libs-2.40.2-3.azl3.aarch64.rpm tar-1.35-2.azl3.aarch64.rpm xz-5.4.4-2.azl3.aarch64.rpm xz-devel-5.4.4-2.azl3.aarch64.rpm @@ -99,9 +99,9 @@ elfutils-libelf-0.189-6.azl3.aarch64.rpm elfutils-libelf-devel-0.189-6.azl3.aarch64.rpm elfutils-libelf-devel-static-0.189-6.azl3.aarch64.rpm elfutils-libelf-lang-0.189-6.azl3.aarch64.rpm -expat-2.6.4-2.azl3.aarch64.rpm -expat-devel-2.6.4-2.azl3.aarch64.rpm -expat-libs-2.6.4-2.azl3.aarch64.rpm +expat-2.6.4-4.azl3.aarch64.rpm +expat-devel-2.6.4-4.azl3.aarch64.rpm +expat-libs-2.6.4-4.azl3.aarch64.rpm libpipeline-1.5.7-1.azl3.aarch64.rpm libpipeline-devel-1.5.7-1.azl3.aarch64.rpm gdbm-1.23-1.azl3.aarch64.rpm @@ -170,16 +170,16 @@ gtk-doc-1.33.2-1.azl3.noarch.rpm autoconf-2.72-2.azl3.noarch.rpm automake-1.16.5-2.azl3.noarch.rpm ocaml-srpm-macros-9-4.azl3.noarch.rpm -openssl-3.3.5-1.azl3.aarch64.rpm -openssl-devel-3.3.5-1.azl3.aarch64.rpm -openssl-libs-3.3.5-1.azl3.aarch64.rpm -openssl-perl-3.3.5-1.azl3.aarch64.rpm -openssl-static-3.3.5-1.azl3.aarch64.rpm -libcap-2.69-10.azl3.aarch64.rpm -libcap-devel-2.69-10.azl3.aarch64.rpm +openssl-3.3.5-3.azl3.aarch64.rpm +openssl-devel-3.3.5-3.azl3.aarch64.rpm +openssl-libs-3.3.5-3.azl3.aarch64.rpm +openssl-perl-3.3.5-3.azl3.aarch64.rpm +openssl-static-3.3.5-3.azl3.aarch64.rpm +libcap-2.69-12.azl3.aarch64.rpm +libcap-devel-2.69-12.azl3.aarch64.rpm debugedit-5.0-2.azl3.aarch64.rpm -libarchive-3.7.7-3.azl3.aarch64.rpm -libarchive-devel-3.7.7-3.azl3.aarch64.rpm +libarchive-3.7.7-4.azl3.aarch64.rpm +libarchive-devel-3.7.7-4.azl3.aarch64.rpm rpm-4.18.2-1.azl3.aarch64.rpm rpm-build-4.18.2-1.azl3.aarch64.rpm rpm-build-libs-4.18.2-1.azl3.aarch64.rpm @@ -195,20 +195,20 @@ libsolv-0.7.28-3.azl3.aarch64.rpm libsolv-devel-0.7.28-3.azl3.aarch64.rpm libssh2-1.11.1-1.azl3.aarch64.rpm libssh2-devel-1.11.1-1.azl3.aarch64.rpm -krb5-1.21.3-2.azl3.aarch64.rpm -krb5-devel-1.21.3-2.azl3.aarch64.rpm +krb5-1.21.3-3.azl3.aarch64.rpm +krb5-devel-1.21.3-3.azl3.aarch64.rpm nghttp2-1.61.0-2.azl3.aarch64.rpm nghttp2-devel-1.61.0-2.azl3.aarch64.rpm -curl-8.11.1-4.azl3.aarch64.rpm -curl-devel-8.11.1-4.azl3.aarch64.rpm -curl-libs-8.11.1-4.azl3.aarch64.rpm +curl-8.11.1-5.azl3.aarch64.rpm +curl-devel-8.11.1-5.azl3.aarch64.rpm +curl-libs-8.11.1-5.azl3.aarch64.rpm createrepo_c-1.0.3-1.azl3.aarch64.rpm -libxml2-2.11.5-7.azl3.aarch64.rpm -libxml2-devel-2.11.5-7.azl3.aarch64.rpm +libxml2-2.11.5-8.azl3.aarch64.rpm +libxml2-devel-2.11.5-8.azl3.aarch64.rpm docbook-dtd-xml-4.5-11.azl3.noarch.rpm docbook-style-xsl-1.79.1-14.azl3.noarch.rpm libsepol-3.6-2.azl3.aarch64.rpm -glib-2.78.6-5.azl3.aarch64.rpm +glib-2.78.6-7.azl3.aarch64.rpm libltdl-2.4.7-1.azl3.aarch64.rpm libltdl-devel-2.4.7-1.azl3.aarch64.rpm lua-5.4.6-1.azl3.aarch64.rpm @@ -228,14 +228,14 @@ libksba-devel-1.6.4-1.azl3.aarch64.rpm libxslt-1.1.43-3.azl3.aarch64.rpm npth-1.6-4.azl3.aarch64.rpm pinentry-1.2.1-1.azl3.aarch64.rpm -gnupg2-2.4.7-1.azl3.aarch64.rpm -gnupg2-lang-2.4.7-1.azl3.aarch64.rpm +gnupg2-2.4.9-2.azl3.aarch64.rpm +gnupg2-lang-2.4.9-2.azl3.aarch64.rpm gpgme-1.23.2-2.azl3.aarch64.rpm azurelinux-repos-shared-3.0-5.azl3.noarch.rpm azurelinux-repos-3.0-5.azl3.noarch.rpm libffi-3.4.4-1.azl3.aarch64.rpm libffi-devel-3.4.4-1.azl3.aarch64.rpm -libtasn1-4.19.0-2.azl3.aarch64.rpm +libtasn1-4.19.0-3.azl3.aarch64.rpm p11-kit-0.25.0-1.azl3.aarch64.rpm p11-kit-trust-0.25.0-1.azl3.aarch64.rpm ca-certificates-shared-3.0.0-14.azl3.noarch.rpm @@ -244,9 +244,9 @@ ca-certificates-base-3.0.0-14.azl3.noarch.rpm ca-certificates-3.0.0-14.azl3.noarch.rpm dwz-0.14-2.azl3.aarch64.rpm unzip-6.0-22.azl3.aarch64.rpm -python3-3.12.9-6.azl3.aarch64.rpm -python3-devel-3.12.9-6.azl3.aarch64.rpm -python3-libs-3.12.9-6.azl3.aarch64.rpm +python3-3.12.9-8.azl3.aarch64.rpm +python3-devel-3.12.9-8.azl3.aarch64.rpm +python3-libs-3.12.9-8.azl3.aarch64.rpm python3-setuptools-69.0.3-5.azl3.noarch.rpm python3-pygments-2.7.4-2.azl3.noarch.rpm which-2.21-8.azl3.aarch64.rpm diff --git a/toolkit/resources/manifests/package/pkggen_core_x86_64.txt b/toolkit/resources/manifests/package/pkggen_core_x86_64.txt index b5d4a4c0266..b5a0aa00789 100644 --- a/toolkit/resources/manifests/package/pkggen_core_x86_64.txt +++ b/toolkit/resources/manifests/package/pkggen_core_x86_64.txt @@ -1,13 +1,13 @@ filesystem-1.1-21.azl3.x86_64.rpm -kernel-headers-6.6.117.1-1.azl3.noarch.rpm -glibc-2.38-16.azl3.x86_64.rpm -glibc-devel-2.38-16.azl3.x86_64.rpm -glibc-i18n-2.38-16.azl3.x86_64.rpm -glibc-iconv-2.38-16.azl3.x86_64.rpm -glibc-lang-2.38-16.azl3.x86_64.rpm -glibc-locales-all-2.38-16.azl3.x86_64.rpm -glibc-nscd-2.38-16.azl3.x86_64.rpm -glibc-tools-2.38-16.azl3.x86_64.rpm +kernel-headers-6.6.121.1-1.azl3.noarch.rpm +glibc-2.38-18.azl3.x86_64.rpm +glibc-devel-2.38-18.azl3.x86_64.rpm +glibc-i18n-2.38-18.azl3.x86_64.rpm +glibc-iconv-2.38-18.azl3.x86_64.rpm +glibc-lang-2.38-18.azl3.x86_64.rpm +glibc-locales-all-2.38-18.azl3.x86_64.rpm +glibc-nscd-2.38-18.azl3.x86_64.rpm +glibc-tools-2.38-18.azl3.x86_64.rpm zlib-1.3.1-1.azl3.x86_64.rpm zlib-devel-1.3.1-1.azl3.x86_64.rpm file-5.45-1.azl3.x86_64.rpm @@ -70,9 +70,9 @@ make-4.4.1-2.azl3.x86_64.rpm patch-2.7.6-9.azl3.x86_64.rpm libcap-ng-0.8.4-1.azl3.x86_64.rpm libcap-ng-devel-0.8.4-1.azl3.x86_64.rpm -util-linux-2.40.2-1.azl3.x86_64.rpm -util-linux-devel-2.40.2-1.azl3.x86_64.rpm -util-linux-libs-2.40.2-1.azl3.x86_64.rpm +util-linux-2.40.2-3.azl3.x86_64.rpm +util-linux-devel-2.40.2-3.azl3.x86_64.rpm +util-linux-libs-2.40.2-3.azl3.x86_64.rpm tar-1.35-2.azl3.x86_64.rpm xz-5.4.4-2.azl3.x86_64.rpm xz-devel-5.4.4-2.azl3.x86_64.rpm @@ -99,9 +99,9 @@ elfutils-libelf-0.189-6.azl3.x86_64.rpm elfutils-libelf-devel-0.189-6.azl3.x86_64.rpm elfutils-libelf-devel-static-0.189-6.azl3.x86_64.rpm elfutils-libelf-lang-0.189-6.azl3.x86_64.rpm -expat-2.6.4-2.azl3.x86_64.rpm -expat-devel-2.6.4-2.azl3.x86_64.rpm -expat-libs-2.6.4-2.azl3.x86_64.rpm +expat-2.6.4-4.azl3.x86_64.rpm +expat-devel-2.6.4-4.azl3.x86_64.rpm +expat-libs-2.6.4-4.azl3.x86_64.rpm libpipeline-1.5.7-1.azl3.x86_64.rpm libpipeline-devel-1.5.7-1.azl3.x86_64.rpm gdbm-1.23-1.azl3.x86_64.rpm @@ -170,16 +170,16 @@ gtk-doc-1.33.2-1.azl3.noarch.rpm autoconf-2.72-2.azl3.noarch.rpm automake-1.16.5-2.azl3.noarch.rpm ocaml-srpm-macros-9-4.azl3.noarch.rpm -openssl-3.3.5-1.azl3.x86_64.rpm -openssl-devel-3.3.5-1.azl3.x86_64.rpm -openssl-libs-3.3.5-1.azl3.x86_64.rpm -openssl-perl-3.3.5-1.azl3.x86_64.rpm -openssl-static-3.3.5-1.azl3.x86_64.rpm -libcap-2.69-10.azl3.x86_64.rpm -libcap-devel-2.69-10.azl3.x86_64.rpm +openssl-3.3.5-3.azl3.x86_64.rpm +openssl-devel-3.3.5-3.azl3.x86_64.rpm +openssl-libs-3.3.5-3.azl3.x86_64.rpm +openssl-perl-3.3.5-3.azl3.x86_64.rpm +openssl-static-3.3.5-3.azl3.x86_64.rpm +libcap-2.69-12.azl3.x86_64.rpm +libcap-devel-2.69-12.azl3.x86_64.rpm debugedit-5.0-2.azl3.x86_64.rpm -libarchive-3.7.7-3.azl3.x86_64.rpm -libarchive-devel-3.7.7-3.azl3.x86_64.rpm +libarchive-3.7.7-4.azl3.x86_64.rpm +libarchive-devel-3.7.7-4.azl3.x86_64.rpm rpm-4.18.2-1.azl3.x86_64.rpm rpm-build-4.18.2-1.azl3.x86_64.rpm rpm-build-libs-4.18.2-1.azl3.x86_64.rpm @@ -195,20 +195,20 @@ libsolv-0.7.28-3.azl3.x86_64.rpm libsolv-devel-0.7.28-3.azl3.x86_64.rpm libssh2-1.11.1-1.azl3.x86_64.rpm libssh2-devel-1.11.1-1.azl3.x86_64.rpm -krb5-1.21.3-2.azl3.x86_64.rpm -krb5-devel-1.21.3-2.azl3.x86_64.rpm +krb5-1.21.3-3.azl3.x86_64.rpm +krb5-devel-1.21.3-3.azl3.x86_64.rpm nghttp2-1.61.0-2.azl3.x86_64.rpm nghttp2-devel-1.61.0-2.azl3.x86_64.rpm -curl-8.11.1-4.azl3.x86_64.rpm -curl-devel-8.11.1-4.azl3.x86_64.rpm -curl-libs-8.11.1-4.azl3.x86_64.rpm +curl-8.11.1-5.azl3.x86_64.rpm +curl-devel-8.11.1-5.azl3.x86_64.rpm +curl-libs-8.11.1-5.azl3.x86_64.rpm createrepo_c-1.0.3-1.azl3.x86_64.rpm -libxml2-2.11.5-7.azl3.x86_64.rpm -libxml2-devel-2.11.5-7.azl3.x86_64.rpm +libxml2-2.11.5-8.azl3.x86_64.rpm +libxml2-devel-2.11.5-8.azl3.x86_64.rpm docbook-dtd-xml-4.5-11.azl3.noarch.rpm docbook-style-xsl-1.79.1-14.azl3.noarch.rpm libsepol-3.6-2.azl3.x86_64.rpm -glib-2.78.6-5.azl3.x86_64.rpm +glib-2.78.6-7.azl3.x86_64.rpm libltdl-2.4.7-1.azl3.x86_64.rpm libltdl-devel-2.4.7-1.azl3.x86_64.rpm lua-5.4.6-1.azl3.x86_64.rpm @@ -228,14 +228,14 @@ libksba-devel-1.6.4-1.azl3.x86_64.rpm libxslt-1.1.43-3.azl3.x86_64.rpm npth-1.6-4.azl3.x86_64.rpm pinentry-1.2.1-1.azl3.x86_64.rpm -gnupg2-2.4.7-1.azl3.x86_64.rpm -gnupg2-lang-2.4.7-1.azl3.x86_64.rpm +gnupg2-2.4.9-2.azl3.x86_64.rpm +gnupg2-lang-2.4.9-2.azl3.x86_64.rpm gpgme-1.23.2-2.azl3.x86_64.rpm azurelinux-repos-shared-3.0-5.azl3.noarch.rpm azurelinux-repos-3.0-5.azl3.noarch.rpm libffi-3.4.4-1.azl3.x86_64.rpm libffi-devel-3.4.4-1.azl3.x86_64.rpm -libtasn1-4.19.0-2.azl3.x86_64.rpm +libtasn1-4.19.0-3.azl3.x86_64.rpm p11-kit-0.25.0-1.azl3.x86_64.rpm p11-kit-trust-0.25.0-1.azl3.x86_64.rpm ca-certificates-shared-3.0.0-14.azl3.noarch.rpm @@ -244,9 +244,9 @@ ca-certificates-base-3.0.0-14.azl3.noarch.rpm ca-certificates-3.0.0-14.azl3.noarch.rpm dwz-0.14-2.azl3.x86_64.rpm unzip-6.0-22.azl3.x86_64.rpm -python3-3.12.9-6.azl3.x86_64.rpm -python3-devel-3.12.9-6.azl3.x86_64.rpm -python3-libs-3.12.9-6.azl3.x86_64.rpm +python3-3.12.9-8.azl3.x86_64.rpm +python3-devel-3.12.9-8.azl3.x86_64.rpm +python3-libs-3.12.9-8.azl3.x86_64.rpm python3-setuptools-69.0.3-5.azl3.noarch.rpm python3-pygments-2.7.4-2.azl3.noarch.rpm which-2.21-8.azl3.x86_64.rpm diff --git a/toolkit/resources/manifests/package/toolchain_aarch64.txt b/toolkit/resources/manifests/package/toolchain_aarch64.txt index 7ea1b70f3e5..5c571938f8c 100644 --- a/toolkit/resources/manifests/package/toolchain_aarch64.txt +++ b/toolkit/resources/manifests/package/toolchain_aarch64.txt @@ -51,8 +51,8 @@ check-debuginfo-0.15.2-1.azl3.aarch64.rpm chkconfig-1.25-1.azl3.aarch64.rpm chkconfig-debuginfo-1.25-1.azl3.aarch64.rpm chkconfig-lang-1.25-1.azl3.aarch64.rpm -cmake-3.30.3-10.azl3.aarch64.rpm -cmake-debuginfo-3.30.3-10.azl3.aarch64.rpm +cmake-3.30.3-11.azl3.aarch64.rpm +cmake-debuginfo-3.30.3-11.azl3.aarch64.rpm coreutils-9.4-6.azl3.aarch64.rpm coreutils-debuginfo-9.4-6.azl3.aarch64.rpm coreutils-lang-9.4-6.azl3.aarch64.rpm @@ -67,10 +67,10 @@ cracklib-lang-2.9.11-1.azl3.aarch64.rpm createrepo_c-1.0.3-1.azl3.aarch64.rpm createrepo_c-debuginfo-1.0.3-1.azl3.aarch64.rpm createrepo_c-devel-1.0.3-1.azl3.aarch64.rpm -curl-8.11.1-4.azl3.aarch64.rpm -curl-debuginfo-8.11.1-4.azl3.aarch64.rpm -curl-devel-8.11.1-4.azl3.aarch64.rpm -curl-libs-8.11.1-4.azl3.aarch64.rpm +curl-8.11.1-5.azl3.aarch64.rpm +curl-debuginfo-8.11.1-5.azl3.aarch64.rpm +curl-devel-8.11.1-5.azl3.aarch64.rpm +curl-libs-8.11.1-5.azl3.aarch64.rpm Cython-debuginfo-3.0.5-2.azl3.aarch64.rpm debugedit-5.0-2.azl3.aarch64.rpm debugedit-debuginfo-5.0-2.azl3.aarch64.rpm @@ -94,10 +94,10 @@ elfutils-libelf-0.189-6.azl3.aarch64.rpm elfutils-libelf-devel-0.189-6.azl3.aarch64.rpm elfutils-libelf-devel-static-0.189-6.azl3.aarch64.rpm elfutils-libelf-lang-0.189-6.azl3.aarch64.rpm -expat-2.6.4-2.azl3.aarch64.rpm -expat-debuginfo-2.6.4-2.azl3.aarch64.rpm -expat-devel-2.6.4-2.azl3.aarch64.rpm -expat-libs-2.6.4-2.azl3.aarch64.rpm +expat-2.6.4-4.azl3.aarch64.rpm +expat-debuginfo-2.6.4-4.azl3.aarch64.rpm +expat-devel-2.6.4-4.azl3.aarch64.rpm +expat-libs-2.6.4-4.azl3.aarch64.rpm file-5.45-1.azl3.aarch64.rpm file-debuginfo-5.45-1.azl3.aarch64.rpm file-devel-5.45-1.azl3.aarch64.rpm @@ -122,27 +122,27 @@ gdbm-lang-1.23-1.azl3.aarch64.rpm gettext-0.22-1.azl3.aarch64.rpm gettext-debuginfo-0.22-1.azl3.aarch64.rpm gfortran-13.2.0-7.azl3.aarch64.rpm -glib-2.78.6-5.azl3.aarch64.rpm -glib-debuginfo-2.78.6-5.azl3.aarch64.rpm -glib-devel-2.78.6-5.azl3.aarch64.rpm -glib-doc-2.78.6-5.azl3.noarch.rpm -glib-schemas-2.78.6-5.azl3.aarch64.rpm -glibc-2.38-16.azl3.aarch64.rpm -glibc-debuginfo-2.38-16.azl3.aarch64.rpm -glibc-devel-2.38-16.azl3.aarch64.rpm -glibc-i18n-2.38-16.azl3.aarch64.rpm -glibc-iconv-2.38-16.azl3.aarch64.rpm -glibc-lang-2.38-16.azl3.aarch64.rpm -glibc-locales-all-2.38-16.azl3.aarch64.rpm -glibc-nscd-2.38-16.azl3.aarch64.rpm -glibc-static-2.38-16.azl3.aarch64.rpm -glibc-tools-2.38-16.azl3.aarch64.rpm +glib-2.78.6-7.azl3.aarch64.rpm +glib-debuginfo-2.78.6-7.azl3.aarch64.rpm +glib-devel-2.78.6-7.azl3.aarch64.rpm +glib-doc-2.78.6-7.azl3.noarch.rpm +glib-schemas-2.78.6-7.azl3.aarch64.rpm +glibc-2.38-18.azl3.aarch64.rpm +glibc-debuginfo-2.38-18.azl3.aarch64.rpm +glibc-devel-2.38-18.azl3.aarch64.rpm +glibc-i18n-2.38-18.azl3.aarch64.rpm +glibc-iconv-2.38-18.azl3.aarch64.rpm +glibc-lang-2.38-18.azl3.aarch64.rpm +glibc-locales-all-2.38-18.azl3.aarch64.rpm +glibc-nscd-2.38-18.azl3.aarch64.rpm +glibc-static-2.38-18.azl3.aarch64.rpm +glibc-tools-2.38-18.azl3.aarch64.rpm gmp-6.3.0-1.azl3.aarch64.rpm gmp-debuginfo-6.3.0-1.azl3.aarch64.rpm gmp-devel-6.3.0-1.azl3.aarch64.rpm -gnupg2-2.4.7-1.azl3.aarch64.rpm -gnupg2-debuginfo-2.4.7-1.azl3.aarch64.rpm -gnupg2-lang-2.4.7-1.azl3.aarch64.rpm +gnupg2-2.4.9-2.azl3.aarch64.rpm +gnupg2-debuginfo-2.4.9-2.azl3.aarch64.rpm +gnupg2-lang-2.4.9-2.azl3.aarch64.rpm gperf-3.1-5.azl3.aarch64.rpm gperf-debuginfo-3.1-5.azl3.aarch64.rpm gpgme-1.23.2-2.azl3.aarch64.rpm @@ -158,28 +158,28 @@ intltool-0.51.0-7.azl3.noarch.rpm itstool-2.0.7-1.azl3.noarch.rpm kbd-2.2.0-2.azl3.aarch64.rpm kbd-debuginfo-2.2.0-2.azl3.aarch64.rpm -kernel-headers-6.6.117.1-1.azl3.noarch.rpm +kernel-headers-6.6.121.1-1.azl3.noarch.rpm kmod-30-1.azl3.aarch64.rpm kmod-debuginfo-30-1.azl3.aarch64.rpm kmod-devel-30-1.azl3.aarch64.rpm -krb5-1.21.3-2.azl3.aarch64.rpm -krb5-debuginfo-1.21.3-2.azl3.aarch64.rpm -krb5-devel-1.21.3-2.azl3.aarch64.rpm -krb5-lang-1.21.3-2.azl3.aarch64.rpm +krb5-1.21.3-3.azl3.aarch64.rpm +krb5-debuginfo-1.21.3-3.azl3.aarch64.rpm +krb5-devel-1.21.3-3.azl3.aarch64.rpm +krb5-lang-1.21.3-3.azl3.aarch64.rpm libacl-2.3.1-2.azl3.aarch64.rpm libacl-devel-2.3.1-2.azl3.aarch64.rpm -libarchive-3.7.7-3.azl3.aarch64.rpm -libarchive-debuginfo-3.7.7-3.azl3.aarch64.rpm -libarchive-devel-3.7.7-3.azl3.aarch64.rpm +libarchive-3.7.7-4.azl3.aarch64.rpm +libarchive-debuginfo-3.7.7-4.azl3.aarch64.rpm +libarchive-devel-3.7.7-4.azl3.aarch64.rpm libassuan-2.5.6-1.azl3.aarch64.rpm libassuan-debuginfo-2.5.6-1.azl3.aarch64.rpm libassuan-devel-2.5.6-1.azl3.aarch64.rpm libattr-2.5.2-1.azl3.aarch64.rpm libattr-devel-2.5.2-1.azl3.aarch64.rpm libbacktrace-static-13.2.0-7.azl3.aarch64.rpm -libcap-2.69-10.azl3.aarch64.rpm -libcap-debuginfo-2.69-10.azl3.aarch64.rpm -libcap-devel-2.69-10.azl3.aarch64.rpm +libcap-2.69-12.azl3.aarch64.rpm +libcap-debuginfo-2.69-12.azl3.aarch64.rpm +libcap-devel-2.69-12.azl3.aarch64.rpm libcap-ng-0.8.4-1.azl3.aarch64.rpm libcap-ng-debuginfo-0.8.4-1.azl3.aarch64.rpm libcap-ng-devel-0.8.4-1.azl3.aarch64.rpm @@ -234,17 +234,17 @@ libssh2-debuginfo-1.11.1-1.azl3.aarch64.rpm libssh2-devel-1.11.1-1.azl3.aarch64.rpm libstdc++-13.2.0-7.azl3.aarch64.rpm libstdc++-devel-13.2.0-7.azl3.aarch64.rpm -libtasn1-4.19.0-2.azl3.aarch64.rpm -libtasn1-debuginfo-4.19.0-2.azl3.aarch64.rpm -libtasn1-devel-4.19.0-2.azl3.aarch64.rpm +libtasn1-4.19.0-3.azl3.aarch64.rpm +libtasn1-debuginfo-4.19.0-3.azl3.aarch64.rpm +libtasn1-devel-4.19.0-3.azl3.aarch64.rpm libtool-2.4.7-1.azl3.aarch64.rpm libtool-debuginfo-2.4.7-1.azl3.aarch64.rpm libxcrypt-4.4.36-2.azl3.aarch64.rpm libxcrypt-debuginfo-4.4.36-2.azl3.aarch64.rpm libxcrypt-devel-4.4.36-2.azl3.aarch64.rpm -libxml2-2.11.5-7.azl3.aarch64.rpm -libxml2-debuginfo-2.11.5-7.azl3.aarch64.rpm -libxml2-devel-2.11.5-7.azl3.aarch64.rpm +libxml2-2.11.5-8.azl3.aarch64.rpm +libxml2-debuginfo-2.11.5-8.azl3.aarch64.rpm +libxml2-devel-2.11.5-8.azl3.aarch64.rpm libxslt-1.1.43-3.azl3.aarch64.rpm libxslt-debuginfo-1.1.43-3.azl3.aarch64.rpm libxslt-devel-1.1.43-3.azl3.aarch64.rpm @@ -287,12 +287,12 @@ npth-debuginfo-1.6-4.azl3.aarch64.rpm npth-devel-1.6-4.azl3.aarch64.rpm ntsysv-1.25-1.azl3.aarch64.rpm ocaml-srpm-macros-9-4.azl3.noarch.rpm -openssl-3.3.5-1.azl3.aarch64.rpm -openssl-debuginfo-3.3.5-1.azl3.aarch64.rpm -openssl-devel-3.3.5-1.azl3.aarch64.rpm -openssl-libs-3.3.5-1.azl3.aarch64.rpm -openssl-perl-3.3.5-1.azl3.aarch64.rpm -openssl-static-3.3.5-1.azl3.aarch64.rpm +openssl-3.3.5-3.azl3.aarch64.rpm +openssl-debuginfo-3.3.5-3.azl3.aarch64.rpm +openssl-devel-3.3.5-3.azl3.aarch64.rpm +openssl-libs-3.3.5-3.azl3.aarch64.rpm +openssl-perl-3.3.5-3.azl3.aarch64.rpm +openssl-static-3.3.5-3.azl3.aarch64.rpm p11-kit-0.25.0-1.azl3.aarch64.rpm p11-kit-debuginfo-0.25.0-1.azl3.aarch64.rpm p11-kit-devel-0.25.0-1.azl3.aarch64.rpm @@ -530,20 +530,21 @@ procps-ng-lang-4.0.4-1.azl3.aarch64.rpm pyproject-rpm-macros-1.12.0-2.azl3.noarch.rpm pyproject-srpm-macros-1.12.0-2.azl3.noarch.rpm python-markupsafe-debuginfo-2.1.3-1.azl3.aarch64.rpm -python-wheel-wheel-0.43.0-1.azl3.noarch.rpm -python3-3.12.9-6.azl3.aarch64.rpm +python-wheel-wheel-0.46.3-1.azl3.noarch.rpm +python3-3.12.9-8.azl3.aarch64.rpm python3-audit-3.1.2-1.azl3.aarch64.rpm python3-cracklib-2.9.11-1.azl3.aarch64.rpm -python3-curses-3.12.9-6.azl3.aarch64.rpm +python3-curses-3.12.9-8.azl3.aarch64.rpm python3-Cython-3.0.5-2.azl3.aarch64.rpm -python3-debuginfo-3.12.9-6.azl3.aarch64.rpm -python3-devel-3.12.9-6.azl3.aarch64.rpm +python3-debuginfo-3.12.9-8.azl3.aarch64.rpm +python3-devel-3.12.9-8.azl3.aarch64.rpm python3-flit-core-3.9.0-1.azl3.noarch.rpm python3-gpg-1.23.2-2.azl3.aarch64.rpm python3-jinja2-3.1.2-3.azl3.noarch.rpm python3-libcap-ng-0.8.4-1.azl3.aarch64.rpm -python3-libs-3.12.9-6.azl3.aarch64.rpm -python3-libxml2-2.11.5-7.azl3.aarch64.rpm +python3-libmount-2.40.2-3.azl3.aarch64.rpm +python3-libs-3.12.9-8.azl3.aarch64.rpm +python3-libxml2-2.11.5-8.azl3.aarch64.rpm python3-lxml-4.9.3-1.azl3.aarch64.rpm python3-magic-5.45-1.azl3.noarch.rpm python3-markupsafe-2.1.3-1.azl3.aarch64.rpm @@ -554,9 +555,9 @@ python3-pygments-2.7.4-2.azl3.noarch.rpm python3-rpm-4.18.2-1.azl3.aarch64.rpm python3-rpm-generators-14-11.azl3.noarch.rpm python3-setuptools-69.0.3-5.azl3.noarch.rpm -python3-test-3.12.9-6.azl3.aarch64.rpm -python3-tools-3.12.9-6.azl3.aarch64.rpm -python3-wheel-0.43.0-1.azl3.noarch.rpm +python3-test-3.12.9-8.azl3.aarch64.rpm +python3-tools-3.12.9-8.azl3.aarch64.rpm +python3-wheel-0.46.3-1.azl3.noarch.rpm readline-8.2-2.azl3.aarch64.rpm readline-debuginfo-8.2-2.azl3.aarch64.rpm readline-devel-8.2-2.azl3.aarch64.rpm @@ -598,11 +599,11 @@ texinfo-7.0.3-1.azl3.aarch64.rpm texinfo-debuginfo-7.0.3-1.azl3.aarch64.rpm unzip-6.0-22.azl3.aarch64.rpm unzip-debuginfo-6.0-22.azl3.aarch64.rpm -util-linux-2.40.2-1.azl3.aarch64.rpm -util-linux-debuginfo-2.40.2-1.azl3.aarch64.rpm -util-linux-devel-2.40.2-1.azl3.aarch64.rpm -util-linux-lang-2.40.2-1.azl3.aarch64.rpm -util-linux-libs-2.40.2-1.azl3.aarch64.rpm +util-linux-2.40.2-3.azl3.aarch64.rpm +util-linux-debuginfo-2.40.2-3.azl3.aarch64.rpm +util-linux-devel-2.40.2-3.azl3.aarch64.rpm +util-linux-lang-2.40.2-3.azl3.aarch64.rpm +util-linux-libs-2.40.2-3.azl3.aarch64.rpm which-2.21-8.azl3.aarch64.rpm which-debuginfo-2.21-8.azl3.aarch64.rpm xz-5.4.4-2.azl3.aarch64.rpm diff --git a/toolkit/resources/manifests/package/toolchain_x86_64.txt b/toolkit/resources/manifests/package/toolchain_x86_64.txt index a529d95a088..0861a676436 100644 --- a/toolkit/resources/manifests/package/toolchain_x86_64.txt +++ b/toolkit/resources/manifests/package/toolchain_x86_64.txt @@ -54,8 +54,8 @@ check-debuginfo-0.15.2-1.azl3.x86_64.rpm chkconfig-1.25-1.azl3.x86_64.rpm chkconfig-debuginfo-1.25-1.azl3.x86_64.rpm chkconfig-lang-1.25-1.azl3.x86_64.rpm -cmake-3.30.3-10.azl3.x86_64.rpm -cmake-debuginfo-3.30.3-10.azl3.x86_64.rpm +cmake-3.30.3-11.azl3.x86_64.rpm +cmake-debuginfo-3.30.3-11.azl3.x86_64.rpm coreutils-9.4-6.azl3.x86_64.rpm coreutils-debuginfo-9.4-6.azl3.x86_64.rpm coreutils-lang-9.4-6.azl3.x86_64.rpm @@ -72,10 +72,10 @@ createrepo_c-debuginfo-1.0.3-1.azl3.x86_64.rpm createrepo_c-devel-1.0.3-1.azl3.x86_64.rpm cross-binutils-common-2.41-10.azl3.noarch.rpm cross-gcc-common-13.2.0-7.azl3.noarch.rpm -curl-8.11.1-4.azl3.x86_64.rpm -curl-debuginfo-8.11.1-4.azl3.x86_64.rpm -curl-devel-8.11.1-4.azl3.x86_64.rpm -curl-libs-8.11.1-4.azl3.x86_64.rpm +curl-8.11.1-5.azl3.x86_64.rpm +curl-debuginfo-8.11.1-5.azl3.x86_64.rpm +curl-devel-8.11.1-5.azl3.x86_64.rpm +curl-libs-8.11.1-5.azl3.x86_64.rpm Cython-debuginfo-3.0.5-2.azl3.x86_64.rpm debugedit-5.0-2.azl3.x86_64.rpm debugedit-debuginfo-5.0-2.azl3.x86_64.rpm @@ -99,10 +99,10 @@ elfutils-libelf-0.189-6.azl3.x86_64.rpm elfutils-libelf-devel-0.189-6.azl3.x86_64.rpm elfutils-libelf-devel-static-0.189-6.azl3.x86_64.rpm elfutils-libelf-lang-0.189-6.azl3.x86_64.rpm -expat-2.6.4-2.azl3.x86_64.rpm -expat-debuginfo-2.6.4-2.azl3.x86_64.rpm -expat-devel-2.6.4-2.azl3.x86_64.rpm -expat-libs-2.6.4-2.azl3.x86_64.rpm +expat-2.6.4-4.azl3.x86_64.rpm +expat-debuginfo-2.6.4-4.azl3.x86_64.rpm +expat-devel-2.6.4-4.azl3.x86_64.rpm +expat-libs-2.6.4-4.azl3.x86_64.rpm file-5.45-1.azl3.x86_64.rpm file-debuginfo-5.45-1.azl3.x86_64.rpm file-devel-5.45-1.azl3.x86_64.rpm @@ -129,27 +129,27 @@ gdbm-lang-1.23-1.azl3.x86_64.rpm gettext-0.22-1.azl3.x86_64.rpm gettext-debuginfo-0.22-1.azl3.x86_64.rpm gfortran-13.2.0-7.azl3.x86_64.rpm -glib-2.78.6-5.azl3.x86_64.rpm -glib-debuginfo-2.78.6-5.azl3.x86_64.rpm -glib-devel-2.78.6-5.azl3.x86_64.rpm -glib-doc-2.78.6-5.azl3.noarch.rpm -glib-schemas-2.78.6-5.azl3.x86_64.rpm -glibc-2.38-16.azl3.x86_64.rpm -glibc-debuginfo-2.38-16.azl3.x86_64.rpm -glibc-devel-2.38-16.azl3.x86_64.rpm -glibc-i18n-2.38-16.azl3.x86_64.rpm -glibc-iconv-2.38-16.azl3.x86_64.rpm -glibc-lang-2.38-16.azl3.x86_64.rpm -glibc-locales-all-2.38-16.azl3.x86_64.rpm -glibc-nscd-2.38-16.azl3.x86_64.rpm -glibc-static-2.38-16.azl3.x86_64.rpm -glibc-tools-2.38-16.azl3.x86_64.rpm +glib-2.78.6-7.azl3.x86_64.rpm +glib-debuginfo-2.78.6-7.azl3.x86_64.rpm +glib-devel-2.78.6-7.azl3.x86_64.rpm +glib-doc-2.78.6-7.azl3.noarch.rpm +glib-schemas-2.78.6-7.azl3.x86_64.rpm +glibc-2.38-18.azl3.x86_64.rpm +glibc-debuginfo-2.38-18.azl3.x86_64.rpm +glibc-devel-2.38-18.azl3.x86_64.rpm +glibc-i18n-2.38-18.azl3.x86_64.rpm +glibc-iconv-2.38-18.azl3.x86_64.rpm +glibc-lang-2.38-18.azl3.x86_64.rpm +glibc-locales-all-2.38-18.azl3.x86_64.rpm +glibc-nscd-2.38-18.azl3.x86_64.rpm +glibc-static-2.38-18.azl3.x86_64.rpm +glibc-tools-2.38-18.azl3.x86_64.rpm gmp-6.3.0-1.azl3.x86_64.rpm gmp-debuginfo-6.3.0-1.azl3.x86_64.rpm gmp-devel-6.3.0-1.azl3.x86_64.rpm -gnupg2-2.4.7-1.azl3.x86_64.rpm -gnupg2-debuginfo-2.4.7-1.azl3.x86_64.rpm -gnupg2-lang-2.4.7-1.azl3.x86_64.rpm +gnupg2-2.4.9-2.azl3.x86_64.rpm +gnupg2-debuginfo-2.4.9-2.azl3.x86_64.rpm +gnupg2-lang-2.4.9-2.azl3.x86_64.rpm gperf-3.1-5.azl3.x86_64.rpm gperf-debuginfo-3.1-5.azl3.x86_64.rpm gpgme-1.23.2-2.azl3.x86_64.rpm @@ -165,29 +165,29 @@ intltool-0.51.0-7.azl3.noarch.rpm itstool-2.0.7-1.azl3.noarch.rpm kbd-2.2.0-2.azl3.x86_64.rpm kbd-debuginfo-2.2.0-2.azl3.x86_64.rpm -kernel-cross-headers-6.6.117.1-1.azl3.noarch.rpm -kernel-headers-6.6.117.1-1.azl3.noarch.rpm +kernel-cross-headers-6.6.121.1-1.azl3.noarch.rpm +kernel-headers-6.6.121.1-1.azl3.noarch.rpm kmod-30-1.azl3.x86_64.rpm kmod-debuginfo-30-1.azl3.x86_64.rpm kmod-devel-30-1.azl3.x86_64.rpm -krb5-1.21.3-2.azl3.x86_64.rpm -krb5-debuginfo-1.21.3-2.azl3.x86_64.rpm -krb5-devel-1.21.3-2.azl3.x86_64.rpm -krb5-lang-1.21.3-2.azl3.x86_64.rpm +krb5-1.21.3-3.azl3.x86_64.rpm +krb5-debuginfo-1.21.3-3.azl3.x86_64.rpm +krb5-devel-1.21.3-3.azl3.x86_64.rpm +krb5-lang-1.21.3-3.azl3.x86_64.rpm libacl-2.3.1-2.azl3.x86_64.rpm libacl-devel-2.3.1-2.azl3.x86_64.rpm -libarchive-3.7.7-3.azl3.x86_64.rpm -libarchive-debuginfo-3.7.7-3.azl3.x86_64.rpm -libarchive-devel-3.7.7-3.azl3.x86_64.rpm +libarchive-3.7.7-4.azl3.x86_64.rpm +libarchive-debuginfo-3.7.7-4.azl3.x86_64.rpm +libarchive-devel-3.7.7-4.azl3.x86_64.rpm libassuan-2.5.6-1.azl3.x86_64.rpm libassuan-debuginfo-2.5.6-1.azl3.x86_64.rpm libassuan-devel-2.5.6-1.azl3.x86_64.rpm libattr-2.5.2-1.azl3.x86_64.rpm libattr-devel-2.5.2-1.azl3.x86_64.rpm libbacktrace-static-13.2.0-7.azl3.x86_64.rpm -libcap-2.69-10.azl3.x86_64.rpm -libcap-debuginfo-2.69-10.azl3.x86_64.rpm -libcap-devel-2.69-10.azl3.x86_64.rpm +libcap-2.69-12.azl3.x86_64.rpm +libcap-debuginfo-2.69-12.azl3.x86_64.rpm +libcap-devel-2.69-12.azl3.x86_64.rpm libcap-ng-0.8.4-1.azl3.x86_64.rpm libcap-ng-debuginfo-0.8.4-1.azl3.x86_64.rpm libcap-ng-devel-0.8.4-1.azl3.x86_64.rpm @@ -242,14 +242,14 @@ libssh2-debuginfo-1.11.1-1.azl3.x86_64.rpm libssh2-devel-1.11.1-1.azl3.x86_64.rpm libstdc++-13.2.0-7.azl3.x86_64.rpm libstdc++-devel-13.2.0-7.azl3.x86_64.rpm -libtasn1-4.19.0-2.azl3.x86_64.rpm -libtasn1-debuginfo-4.19.0-2.azl3.x86_64.rpm -libtasn1-devel-4.19.0-2.azl3.x86_64.rpm +libtasn1-4.19.0-3.azl3.x86_64.rpm +libtasn1-debuginfo-4.19.0-3.azl3.x86_64.rpm +libtasn1-devel-4.19.0-3.azl3.x86_64.rpm libtool-2.4.7-1.azl3.x86_64.rpm libtool-debuginfo-2.4.7-1.azl3.x86_64.rpm -libxml2-2.11.5-7.azl3.x86_64.rpm -libxml2-debuginfo-2.11.5-7.azl3.x86_64.rpm -libxml2-devel-2.11.5-7.azl3.x86_64.rpm +libxml2-2.11.5-8.azl3.x86_64.rpm +libxml2-debuginfo-2.11.5-8.azl3.x86_64.rpm +libxml2-devel-2.11.5-8.azl3.x86_64.rpm libxcrypt-4.4.36-2.azl3.x86_64.rpm libxcrypt-debuginfo-4.4.36-2.azl3.x86_64.rpm libxcrypt-devel-4.4.36-2.azl3.x86_64.rpm @@ -295,12 +295,12 @@ npth-debuginfo-1.6-4.azl3.x86_64.rpm npth-devel-1.6-4.azl3.x86_64.rpm ntsysv-1.25-1.azl3.x86_64.rpm ocaml-srpm-macros-9-4.azl3.noarch.rpm -openssl-3.3.5-1.azl3.x86_64.rpm -openssl-debuginfo-3.3.5-1.azl3.x86_64.rpm -openssl-devel-3.3.5-1.azl3.x86_64.rpm -openssl-libs-3.3.5-1.azl3.x86_64.rpm -openssl-perl-3.3.5-1.azl3.x86_64.rpm -openssl-static-3.3.5-1.azl3.x86_64.rpm +openssl-3.3.5-3.azl3.x86_64.rpm +openssl-debuginfo-3.3.5-3.azl3.x86_64.rpm +openssl-devel-3.3.5-3.azl3.x86_64.rpm +openssl-libs-3.3.5-3.azl3.x86_64.rpm +openssl-perl-3.3.5-3.azl3.x86_64.rpm +openssl-static-3.3.5-3.azl3.x86_64.rpm p11-kit-0.25.0-1.azl3.x86_64.rpm p11-kit-debuginfo-0.25.0-1.azl3.x86_64.rpm p11-kit-devel-0.25.0-1.azl3.x86_64.rpm @@ -538,20 +538,21 @@ procps-ng-lang-4.0.4-1.azl3.x86_64.rpm pyproject-rpm-macros-1.12.0-2.azl3.noarch.rpm pyproject-srpm-macros-1.12.0-2.azl3.noarch.rpm python-markupsafe-debuginfo-2.1.3-1.azl3.x86_64.rpm -python-wheel-wheel-0.43.0-1.azl3.noarch.rpm -python3-3.12.9-6.azl3.x86_64.rpm +python-wheel-wheel-0.46.3-1.azl3.noarch.rpm +python3-3.12.9-8.azl3.x86_64.rpm python3-audit-3.1.2-1.azl3.x86_64.rpm python3-cracklib-2.9.11-1.azl3.x86_64.rpm -python3-curses-3.12.9-6.azl3.x86_64.rpm +python3-curses-3.12.9-8.azl3.x86_64.rpm python3-Cython-3.0.5-2.azl3.x86_64.rpm -python3-debuginfo-3.12.9-6.azl3.x86_64.rpm -python3-devel-3.12.9-6.azl3.x86_64.rpm +python3-debuginfo-3.12.9-8.azl3.x86_64.rpm +python3-devel-3.12.9-8.azl3.x86_64.rpm python3-flit-core-3.9.0-1.azl3.noarch.rpm python3-gpg-1.23.2-2.azl3.x86_64.rpm python3-jinja2-3.1.2-3.azl3.noarch.rpm python3-libcap-ng-0.8.4-1.azl3.x86_64.rpm -python3-libs-3.12.9-6.azl3.x86_64.rpm -python3-libxml2-2.11.5-7.azl3.x86_64.rpm +python3-libmount-2.40.2-3.azl3.x86_64.rpm +python3-libs-3.12.9-8.azl3.x86_64.rpm +python3-libxml2-2.11.5-8.azl3.x86_64.rpm python3-lxml-4.9.3-1.azl3.x86_64.rpm python3-magic-5.45-1.azl3.noarch.rpm python3-markupsafe-2.1.3-1.azl3.x86_64.rpm @@ -562,9 +563,9 @@ python3-pygments-2.7.4-2.azl3.noarch.rpm python3-rpm-4.18.2-1.azl3.x86_64.rpm python3-rpm-generators-14-11.azl3.noarch.rpm python3-setuptools-69.0.3-5.azl3.noarch.rpm -python3-test-3.12.9-6.azl3.x86_64.rpm -python3-tools-3.12.9-6.azl3.x86_64.rpm -python3-wheel-0.43.0-1.azl3.noarch.rpm +python3-test-3.12.9-8.azl3.x86_64.rpm +python3-tools-3.12.9-8.azl3.x86_64.rpm +python3-wheel-0.46.3-1.azl3.noarch.rpm readline-8.2-2.azl3.x86_64.rpm readline-debuginfo-8.2-2.azl3.x86_64.rpm readline-devel-8.2-2.azl3.x86_64.rpm @@ -606,11 +607,11 @@ texinfo-7.0.3-1.azl3.x86_64.rpm texinfo-debuginfo-7.0.3-1.azl3.x86_64.rpm unzip-6.0-22.azl3.x86_64.rpm unzip-debuginfo-6.0-22.azl3.x86_64.rpm -util-linux-2.40.2-1.azl3.x86_64.rpm -util-linux-debuginfo-2.40.2-1.azl3.x86_64.rpm -util-linux-devel-2.40.2-1.azl3.x86_64.rpm -util-linux-lang-2.40.2-1.azl3.x86_64.rpm -util-linux-libs-2.40.2-1.azl3.x86_64.rpm +util-linux-2.40.2-3.azl3.x86_64.rpm +util-linux-debuginfo-2.40.2-3.azl3.x86_64.rpm +util-linux-devel-2.40.2-3.azl3.x86_64.rpm +util-linux-lang-2.40.2-3.azl3.x86_64.rpm +util-linux-libs-2.40.2-3.azl3.x86_64.rpm which-2.21-8.azl3.x86_64.rpm which-debuginfo-2.21-8.azl3.x86_64.rpm xz-5.4.4-2.azl3.x86_64.rpm diff --git a/toolkit/scripts/check_entangled_specs.py b/toolkit/scripts/check_entangled_specs.py index 44252e4bc07..6a6e941266d 100755 --- a/toolkit/scripts/check_entangled_specs.py +++ b/toolkit/scripts/check_entangled_specs.py @@ -64,14 +64,6 @@ "SPECS/shim-unsigned-x64/shim-unsigned-x64.spec", "SPECS/shim-unsigned-aarch64/shim-unsigned-aarch64.spec" ]), - frozenset([ - "SPECS-SIGNED/fwctl-signed/fwctl-signed.spec", - "SPECS/fwctl/fwctl.spec" - ]), - frozenset([ - "SPECS-SIGNED/fwctl-hwe-signed/fwctl-hwe-signed.spec", - "SPECS/fwctl-hwe/fwctl-hwe.spec" - ]), frozenset([ "SPECS-SIGNED/iser-signed/iser-signed.spec", "SPECS/iser/iser.spec" diff --git a/toolkit/scripts/get_config_deps.sh b/toolkit/scripts/get_config_deps.sh index 1e724d03d0a..564f9ae46fc 100755 --- a/toolkit/scripts/get_config_deps.sh +++ b/toolkit/scripts/get_config_deps.sh @@ -44,6 +44,8 @@ do then echo "$filename" else - echo $(realpath "$config_base_dir/$filename") + # Use -m to canonicalize paths even if they don't exist + # This allows the Makefile to detect missing files and provide a helpful error + echo $(realpath -m "$config_base_dir/$filename") fi done diff --git a/toolkit/scripts/imggen.mk b/toolkit/scripts/imggen.mk index 881f30d6437..f0f1d2458e5 100644 --- a/toolkit/scripts/imggen.mk +++ b/toolkit/scripts/imggen.mk @@ -94,6 +94,20 @@ fetch-external-image-packages: $(image_external_package_cache_summary) # Validate the selected config file if any changes occur in the image config base directory. # Changes to files located outside the base directory will not be detected. validate-image-config: $(validate-config) + +# Validate that all config dependencies exist before Make tries to process them as prerequisites +# If we don't do this, Make will error out with a less-than-helpful message about having no rule to make +# the validation flag (since its a pattern match and if a dependency is missing, it can't match the pattern) +# Skip this check for printvar targets so users can still debug with the suggested command +ifneq ($(CONFIG_FILE),) + ifeq ($(filter printvar-%,$(MAKECMDGOALS)),) + config_missing_files = $(filter-out $(wildcard $(config_other_files)),$(config_other_files)) + ifneq ($(config_missing_files),) + $(error $(newline)$(newline)ERROR: Image configuration '$(CONFIG_FILE)' missing files:$(newline)$(newline)$(foreach file,$(config_missing_files), - $(file)$(newline))$(newline)Run this command to see all expected files:$(newline) make printvar-config_other_files CONFIG_FILE=$(CONFIG_FILE) --quiet$(newline)) + endif + endif +endif + $(STATUS_FLAGS_DIR)/validate-image-config%.flag: $(go-imageconfigvalidator) $(depend_CONFIG_FILE) $(CONFIG_FILE) $(config_other_files) $(if $(CONFIG_FILE),,$(error Must set CONFIG_FILE=)) $(go-imageconfigvalidator) \ @@ -126,7 +140,12 @@ ifneq ($(REPO_SNAPSHOT_TIME),) imagepkgfetcher_extra_flags += --repo-snapshot-time=$(REPO_SNAPSHOT_TIME) endif -$(image_package_cache_summary): $(go-imagepkgfetcher) $(chroot_worker) $(toolchain_rpms) $(imggen_local_repo) $(depend_REPO_LIST) $(REPO_LIST) $(depend_CONFIG_FILE) $(CONFIG_FILE) $(validate-config) $(RPMS_DIR) $(imggen_rpms) $(depend_REPO_SNAPSHOT_TIME) $(STATUS_FLAGS_DIR)/imagegen_cleanup.flag +ifeq ($(VALIDATE_IMAGE_GPG),y) +imagepkgfetcher_extra_flags += --enable-gpg-check +imagepkgfetcher_extra_flags += $(foreach key,$(IMAGE_GPG_VALIDATION_KEYS),--gpg-key=$(key)) +endif + +$(image_package_cache_summary): $(go-imagepkgfetcher) $(chroot_worker) $(toolchain_rpms) $(imggen_local_repo) $(depend_REPO_LIST) $(REPO_LIST) $(depend_CONFIG_FILE) $(CONFIG_FILE) $(validate-config) $(RPMS_DIR) $(imggen_rpms) $(depend_REPO_SNAPSHOT_TIME) $(depend_VALIDATE_IMAGE_GPG) $(depend_IMAGE_GPG_VALIDATION_KEYS) $(IMAGE_GPG_VALIDATION_KEYS) $(STATUS_FLAGS_DIR)/imagegen_cleanup.flag $(if $(CONFIG_FILE),,$(error Must set CONFIG_FILE=)) $(go-imagepkgfetcher) \ --input=$(CONFIG_FILE) \ diff --git a/toolkit/scripts/toolchain.mk b/toolkit/scripts/toolchain.mk index e0856b56543..c193717d128 100644 --- a/toolkit/scripts/toolchain.mk +++ b/toolkit/scripts/toolchain.mk @@ -309,7 +309,7 @@ $(toolchain_rpms): $(TOOLCHAIN_MANIFEST) $(STATUS_FLAGS_DIR)/toolchain_local_tem # No archive was selected, so download from online package server instead. All packages must be available for this step to succeed. else -$(toolchain_rpms): $(TOOLCHAIN_MANIFEST) $(STATUS_FLAGS_DIR)/toolchain_auto_cleanup.flag $(depend_REBUILD_TOOLCHAIN) $(go-downloader) $(SCRIPTS_DIR)/toolchain/download_toolchain_rpm.sh $(TOOLCHAIN_GPG_VALIDATION_KEYS) +$(toolchain_rpms): $(TOOLCHAIN_MANIFEST) $(STATUS_FLAGS_DIR)/toolchain_auto_cleanup.flag $(depend_REBUILD_TOOLCHAIN) $(go-downloader) $(SCRIPTS_DIR)/toolchain/download_toolchain_rpm.sh $(depend_TOOLCHAIN_GPG_VALIDATION_KEYS) $(TOOLCHAIN_GPG_VALIDATION_KEYS) @log_file="$(toolchain_downloads_logs_dir)/$(notdir $@).log" && \ rm -f "$$log_file" && \ $(SCRIPTS_DIR)/toolchain/download_toolchain_rpm.sh \ diff --git a/toolkit/scripts/toolchain/container/toolchain-sha256sums b/toolkit/scripts/toolchain/container/toolchain-sha256sums index 1ce65592377..13f3d612f05 100644 --- a/toolkit/scripts/toolchain/container/toolchain-sha256sums +++ b/toolkit/scripts/toolchain/container/toolchain-sha256sums @@ -28,7 +28,7 @@ a3c2b80201b89e68616f4ad30bc66aee4927c3ce50e33929ca819d5c43538898 gmp-6.3.0.tar. 1db2aedde89d0dea42b16d9528f894c8d15dae4e190b59aecc78f5a951276eab grep-3.11.tar.xz 6b9757f592b7518b4902eb6af7e54570bdccba37a871fddb2d30ae3863511c13 groff-1.23.0.tar.gz 7454eb6935db17c6655576c2e1b0fabefd38b4d0936e0f87f48cd062ce91a057 gzip-1.13.tar.xz -bfbbeba626396e2bab9bd520a46943e68d228a91e8f11cd662bf4fb3996443d3 kernel-6.6.117.1.tar.gz +aa5721db931ce7b5a7a2c9a554c78e399dbe76e823356d36f860308cfa9c5e12 kernel-6.6.121.1.tar.gz 5d24e40819768f74daf846b99837fc53a3a9dcdf3ce1c2003fe0596db850f0f0 libarchive-3.7.1.tar.gz f311f8f3dad84699d0566d1d6f7ec943a9298b28f714cae3c931dfd57492d7eb libcap-2.69.tar.xz b8b45194989022a79ec1317f64a2a75b1551b2a55bea06f67704cb2a2e4690b0 libpipeline-1.5.7.tar.gz diff --git a/toolkit/scripts/toolchain/container/toolchain_build_temp_tools.sh b/toolkit/scripts/toolchain/container/toolchain_build_temp_tools.sh index a54ef9acf92..6a8092a8859 100755 --- a/toolkit/scripts/toolchain/container/toolchain_build_temp_tools.sh +++ b/toolkit/scripts/toolchain/container/toolchain_build_temp_tools.sh @@ -86,7 +86,7 @@ rm -rf gcc-13.2.0 touch $LFS/logs/temptoolchain/status_gcc_pass1_complete -KERNEL_VERSION="6.6.117.1" +KERNEL_VERSION="6.6.121.1" echo Linux-${KERNEL_VERSION} API Headers tar xf kernel-${KERNEL_VERSION}.tar.gz pushd CBL-Mariner-Linux-Kernel-rolling-lts-mariner-3-${KERNEL_VERSION} diff --git a/toolkit/scripts/utils.mk b/toolkit/scripts/utils.mk index e4565110809..b00b75e1299 100644 --- a/toolkit/scripts/utils.mk +++ b/toolkit/scripts/utils.mk @@ -15,6 +15,12 @@ build_arch := $(shell uname -m) no_repo_acl = $(STATUS_FLAGS_DIR)/no_repo_acl.flag +# Define newline for use in error messages and output formatting +define newline + + +endef + ######## MISC. MAKEFILE Functions ######## # Creates a folder if it doesn't exist. Also sets the timestamp to 0 if it is @@ -60,10 +66,11 @@ endef ######## VARIABLE DEPENDENCY TRACKING ######## # List of variables to watch for changes. -watch_vars=PACKAGE_BUILD_LIST PACKAGE_REBUILD_LIST PACKAGE_IGNORE_LIST REPO_LIST CONFIG_FILE STOP_ON_PKG_FAIL TOOLCHAIN_ARCHIVE REBUILD_TOOLCHAIN SRPM_PACK_LIST SPECS_DIR MAX_CASCADING_REBUILDS RUN_CHECK TEST_RUN_LIST TEST_RERUN_LIST TEST_IGNORE_LIST EXTRA_BUILD_LAYERS LICENSE_CHECK_MODE VALIDATE_TOOLCHAIN_GPG REPO_SNAPSHOT_TIME PACKAGE_CACHE_SUMMARY +watch_vars=PACKAGE_BUILD_LIST PACKAGE_REBUILD_LIST PACKAGE_IGNORE_LIST REPO_LIST CONFIG_FILE STOP_ON_PKG_FAIL TOOLCHAIN_ARCHIVE REBUILD_TOOLCHAIN SRPM_PACK_LIST SPECS_DIR MAX_CASCADING_REBUILDS RUN_CHECK TEST_RUN_LIST TEST_RERUN_LIST TEST_IGNORE_LIST EXTRA_BUILD_LAYERS LICENSE_CHECK_MODE VALIDATE_TOOLCHAIN_GPG TOOLCHAIN_GPG_VALIDATION_KEYS VALIDATE_IMAGE_GPG IMAGE_GPG_VALIDATION_KEYS REPO_SNAPSHOT_TIME PACKAGE_CACHE_SUMMARY # Current list: $(depend_PACKAGE_BUILD_LIST) $(depend_PACKAGE_REBUILD_LIST) $(depend_PACKAGE_IGNORE_LIST) $(depend_REPO_LIST) $(depend_CONFIG_FILE) $(depend_STOP_ON_PKG_FAIL) # $(depend_TOOLCHAIN_ARCHIVE) $(depend_REBUILD_TOOLCHAIN) $(depend_SRPM_PACK_LIST) $(depend_SPECS_DIR) $(depend_EXTRA_BUILD_LAYERS) $(depend_MAX_CASCADING_REBUILDS) $(depend_RUN_CHECK) $(depend_TEST_RUN_LIST) -# $(depend_TEST_RERUN_LIST) $(depend_TEST_IGNORE_LIST) $(depend_LICENSE_CHECK_MODE) $(depend_VALIDATE_TOOLCHAIN_GPG) $(depend_REPO_SNAPSHOT_TIME) $(depend_PACKAGE_CACHE_SUMMARY) +# $(depend_TEST_RERUN_LIST) $(depend_TEST_IGNORE_LIST) $(depend_LICENSE_CHECK_MODE) $(depend_VALIDATE_TOOLCHAIN_GPG) $(depend_TOOLCHAIN_GPG_VALIDATION_KEYS) $(depend_VALIDATE_IMAGE_GPG) +# $(depend_IMAGE_GPG_VALIDATION_KEYS) $(depend_REPO_SNAPSHOT_TIME) $(depend_PACKAGE_CACHE_SUMMARY) .PHONY: variable_depends_on_phony clean-variable_depends_on_phony setfacl_always_run_phony clean: clean-variable_depends_on_phony diff --git a/toolkit/tools/imagegen/installutils/installutils.go b/toolkit/tools/imagegen/installutils/installutils.go index 0ee732ef2a9..2c183b73329 100644 --- a/toolkit/tools/imagegen/installutils/installutils.go +++ b/toolkit/tools/imagegen/installutils/installutils.go @@ -766,7 +766,9 @@ func TdnfInstallWithProgress(packageName, installRoot string, currentPackagesIns return } - // TDNF 3.x uses repositories from installchroot instead of host. Passing setopt for repo files directory to use local repo for installroot installation + // TDNF 3.x uses repositories from installchroot instead of host. Passing setopt for repo files directory to use local repo for installroot installation. + // Note: --nogpgcheck is used here because GPG signature validation is performed earlier during package fetching (imagepkgfetcher) + // when VALIDATE_IMAGE_GPG=y is set. Packages in the local repo have already been verified. err = shell.NewExecBuilder("tdnf", "-v", "install", packageName, "--installroot", installRoot, "--nogpgcheck", "--assumeyes", "--setopt", "reposdir=/etc/yum.repos.d/", releaseverCliArg). StdoutCallback(onStdout). @@ -830,7 +832,9 @@ func calculateTotalPackages(packages []string, installRoot string) (installedPac stderr string ) - // Issue an install request but stop right before actually performing the install (assumeno) + // Issue an install request but stop right before actually performing the install (assumeno). + // Note: --nogpgcheck is safe here because this is a dry-run (--assumeno) and packages are validated + // during fetching when VALIDATE_IMAGE_GPG=y is set. stdout, stderr, err = shell.Execute("tdnf", "install", releaseverCliArg, "--assumeno", "--nogpgcheck", pkg, "--installroot", installRoot) if err != nil { // tdnf aborts the process when it detects an install with --assumeno. @@ -1815,11 +1819,8 @@ func ProvisionUserSSHCerts(installChroot safechroot.ChrootInterface, username st ) (err error) { var ( pubKeyData []string - exists bool ) const squashErrors = false - const authorizedKeysTempFilePerms = 0644 - const authorizedKeysTempFile = "/tmp/authorized_keys" const sshDirectoryPermission = "0700" // Skip user SSH directory generation when not provided with public keys @@ -1835,27 +1836,21 @@ func ProvisionUserSSHCerts(installChroot safechroot.ChrootInterface, username st authorizedKeysFile := filepath.Join(userSSHKeyDir, userutils.SSHAuthorizedKeysFileName) - exists, err = file.PathExists(authorizedKeysTempFile) + // Create a guaranteed unique temporary file for authorized_keys as a staging file which we will copy + // into the chroot. + tmpFile, err := os.CreateTemp("", "authorized_keys_*") if err != nil { - logger.Log.Warnf("Error accessing %s file : %v", authorizedKeysTempFile, err) + logger.Log.Warnf("Failed to create temporary authorized_keys file: %v", err) return } - if !exists { - logger.Log.Debugf("File %s does not exist. Creating file...", authorizedKeysTempFile) - err = file.Create(authorizedKeysTempFile, authorizedKeysTempFilePerms) - if err != nil { - logger.Log.Warnf("Failed to create %s file : %v", authorizedKeysTempFile, err) - return - } - } else { - err = os.Truncate(authorizedKeysTempFile, 0) - if err != nil { - logger.Log.Warnf("Failed to truncate %s file : %v", authorizedKeysTempFile, err) - return - } - } + authorizedKeysTempFile := tmpFile.Name() defer os.Remove(authorizedKeysTempFile) + if err = tmpFile.Close(); err != nil { + logger.Log.Warnf("Failed to close temporary authorized_keys file: %v", err) + return + } + allSSHKeys := []string(nil) if includeExistingKeys { diff --git a/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go b/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go index 3b70a6c9db6..8ca24933ba6 100644 --- a/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go +++ b/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go @@ -16,6 +16,7 @@ import ( "github.com/microsoft/azurelinux/toolkit/tools/internal/packagerepo/repoutils" "github.com/microsoft/azurelinux/toolkit/tools/internal/pkggraph" "github.com/microsoft/azurelinux/toolkit/tools/internal/pkgjson" + "github.com/microsoft/azurelinux/toolkit/tools/internal/rpm" "github.com/microsoft/azurelinux/toolkit/tools/internal/timestamp" "github.com/microsoft/azurelinux/toolkit/tools/pkg/profile" @@ -49,6 +50,9 @@ var ( inputSummaryFile = app.Flag("input-summary-file", "Path to a file with the summary of packages cloned to be restored").String() outputSummaryFile = app.Flag("output-summary-file", "Path to save the summary of packages cloned").String() + enableGpgCheck = app.Flag("enable-gpg-check", "Enable RPM GPG signature verification for all repositories during package fetching.").Bool() + gpgKeyPaths = app.Flag("gpg-key", "Path to a GPG key file for signature validation. May be specified multiple times. Required if enable-gpg-check is set.").ExistingFiles() + logFlags = exe.SetupLogFlags(app) profFlags = exe.SetupProfileFlags(app) timestampFile = app.Flag("timestamp-file", "File that stores timestamps for this program.").String() @@ -73,6 +77,10 @@ func main() { logger.Log.Fatal("input-graph must be provided if external-only is set.") } + if *enableGpgCheck && len(*gpgKeyPaths) == 0 { + logger.Log.Fatal("--enable-gpg-check requires at least one --gpg-key path") + } + timestamp.StartEvent("initialize and configure cloner", nil) cloner, err := rpmrepocloner.ConstructCloner(*outDir, *tmpDir, *workertar, *existingRpmDir, *existingToolchainRpmDir, *tlsClientCert, *tlsClientKey, *repoFiles, *repoSnapshotTime) @@ -110,6 +118,14 @@ func main() { logger.Log.Panicf("Failed to clone RPM repo. Error: %s", err) } + // Validate GPG signatures of downloaded packages if enabled + if *enableGpgCheck { + err = rpm.ValidateDirectoryRPMSignatures(cloner.CloneDirectory(), *gpgKeyPaths) + if err != nil { + logger.Log.Panicf("Failed to validate RPM signatures. Error: %s", err) + } + } + timestamp.StartEvent("finalize cloned packages", nil) err = cloner.ConvertDownloadedPackagesIntoRepo() diff --git a/toolkit/tools/internal/rpm/rpm.go b/toolkit/tools/internal/rpm/rpm.go index 8ef77dffe56..96f817b8557 100644 --- a/toolkit/tools/internal/rpm/rpm.go +++ b/toolkit/tools/internal/rpm/rpm.go @@ -5,6 +5,8 @@ package rpm import ( "fmt" + "os" + "os/exec" "path/filepath" "regexp" "runtime" @@ -501,6 +503,91 @@ func InstallRPM(rpmFile string) (err error) { return } +const rpmKeysProgram = "rpmkeys" + +// importGPGKeysToRPMDb imports GPG keys into an RPM database for signature verification. +// - rpmDbRoot: path to a directory to use as the RPM database root (will be created if it doesn't exist) +// - gpgKeyPaths: paths to GPG key files to import into the RPM database +// This should be called once before validating multiple RPMs with checkRPMSignature. +func importGPGKeysToRPMDb(rpmDbRoot string, gpgKeyPaths []string) (err error) { + if _, err := exec.LookPath(rpmKeysProgram); err != nil { + return fmt.Errorf("%s command not found - explicit GPG signature enforcement requires this tool:\n%w", rpmKeysProgram, err) + } + for _, keyPath := range gpgKeyPaths { + _, stderr, importErr := shell.Execute(rpmKeysProgram, "--root", rpmDbRoot, "--import", keyPath) + if importErr != nil { + return fmt.Errorf("failed to import GPG key (%s) into RPM database: %v:\n%w", keyPath, stderr, importErr) + } + } + return nil +} + +// checkRPMSignature validates the GPG signature of an RPM file. +// - rpmFile: path to the RPM file to validate +// - rpmDbRoot: path to a directory used as the RPM database root (must have GPG keys already imported via importGPGKeysToRpmDb) +// Returns an error if the RPM signature is missing or invalid. +func checkRPMSignature(rpmFile string, rpmDbRoot string) (err error) { + _, stderr, err := shell.Execute(rpmKeysProgram, "--root", rpmDbRoot, "--checksig", rpmFile, "-D", "%_pkgverify_level signature") + if err != nil { + return fmt.Errorf("RPM signature validation failed for (%s): %v\n%w", rpmFile, stderr, err) + } + return nil +} + +// ValidateDirectoryRPMSignatures validates the GPG signatures of all RPM files in a directory. +// It creates an isolated RPM database, imports the provided GPG keys, and validates each RPM. +// Returns an error if any RPM has a missing or invalid signature. +func ValidateDirectoryRPMSignatures(rpmDir string, gpgKeyPaths []string) (err error) { + logger.Log.Info("Validating GPG signatures of downloaded packages") + + // Create a temporary directory for the isolated RPM database + rpmDbRoot, err := os.MkdirTemp("", "rpm-gpg-check-*") + if err != nil { + return fmt.Errorf("failed to create temporary directory for RPM database:\n%w", err) + } + defer os.RemoveAll(rpmDbRoot) + + // Import GPG keys once before validating all RPMs + err = importGPGKeysToRPMDb(rpmDbRoot, gpgKeyPaths) + if err != nil { + return err + } + + // Find all RPM files in the directory (recursively) + var rpmFiles []string + err = filepath.WalkDir(rpmDir, func(path string, d os.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + if !d.IsDir() && filepath.Ext(path) == ".rpm" { + rpmFiles = append(rpmFiles, path) + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to find RPM files in (%s):\n%w", rpmDir, err) + } + + if len(rpmFiles) == 0 { + logger.Log.Debug("No RPM files found to validate") + return nil + } + + logger.Log.Infof("Validating %d RPM files", len(rpmFiles)) + + // Validate each RPM + for _, rpmFile := range rpmFiles { + logger.Log.Debugf("Validating signature of: %s", filepath.Base(rpmFile)) + err = checkRPMSignature(rpmFile, rpmDbRoot) + if err != nil { + return fmt.Errorf("GPG signature validation failed:\n%w", err) + } + } + + logger.Log.Info("All downloaded RPMs have valid GPG signatures") + return nil +} + // QueryRPMProvides returns what an RPM file provides. // This includes any provides made by a generator and files provided by the rpm. func QueryRPMProvides(rpmFile string) (provides []string, err error) {