Viewing file: depSolver.py (29.47 KB) -rwxr-xr-x Select action/file-type: (+) | (+) | (+) | Code (+) | Session (+) | (+) | SDB (+) | (+) | (+) | (+) | (+) | (+) |
#!/usr/bin/python
import sys import re import string sys.path.insert(0, "/usr/share/rhn/") sys.path.insert(1,"/usr/share/rhn/up2date_client")
import StringIO import packageList import rpm import rpmUtils import pprint from rhpl.translate import _, N_ import headers import up2dateUtils import up2dateErrors import up2dateLog import rpcServer import rhnPackageInfo import rhnChannel import repoDirector import config
import fnmatch
from repoBackends import genericSolveDep
#x2repos = repoDirector.initRepoDirector()
def showSkipListHits(skiplist): errmsg = _(""" To solve all dependencies for the RPMs you have selected, The following packages you have marked to exclude would have to be added to the set: """) errmsg = errmsg + _("\n %-30.30s\t%-30.30s\n") % ( _("Package Name"), _("Reason For Skipping")) errmsg = errmsg + " " * 4 + "=" * 70 + "\n" for i in skiplist: package = "%s-%s-%s" % (i[0][0], i[0][1], i[0][2]) errmsg = errmsg + " %-30.30s\t%-30.30s\n" % (package, i[1]) return errmsg
class SolveDep: def __init__(self):
self.log = up2dateLog.initLog() self.metainfo = {} self.source_list = []
#FIXME: the solve dep code needs to be part of the # repo modules
repos = repoDirector.initRepoDirector() repoSolveDeps = repos.getDepSolveHandlers()
for repoSolveDep in repoSolveDeps.keys(): self.addSourceInstance({'object':repoSolveDeps[repoSolveDep], 'name':repoSolveDep})
def orderSourceInstance(self, namelist): # sort thes sources in the order of repo types specified # FIXME: plus this into the sourcesConfig or something so # can theoretically have an order self.new_source_list = [] self.new_metainfo = {} for name in namelist: self.new_source_list.append(name) self.new_metainfo[name] = self.metainfo[name]
def addSourceInstance(self, metainfo): source = metainfo name = source['name'] self.log.log_debug("add instance class name", name) self.source_list.append(name) self.metainfo[name] = source['object'] # self.metainfo[name]['name'] = name
# FIXME: need a way to "hint" the depsolve in the # right direction def solveDep(self, unknowns, availList, msgCallback = None, progressCallback = None, refreshCallback = None): for source_key in self.source_list: source = self.metainfo[source_key] (ret, depToPkg) = source.solveDep(unknowns, availList, refreshCallback = refreshCallback, progressCallback = progressCallback, msgCallback = msgCallback) if ret != []: self.log.log_debug("Dep %s Fetched via: %s" % ( unknowns, ret)) #self.fetchType[pkg] = source['name'] return (ret, depToPkg) return ([], {})
class DependencySolver: """This class is the engine of the dependency solver so that the external interface to up2date.dryRun remains readable""" def __init__(self, selected, availPkgs): self.log = up2dateLog.initLog() self.cfg = config.initUp2dateConfig() self.selected = selected available = availPkgs obsoletes = rhnPackageInfo.obsoletesList() self.availableList = available
# exclude the already selected ones from the available list xlist = map(lambda a: a[0], self.selected) self.available = {} self.allAvailable = {} self.debug = 0
self.obsoletes = {} self.kernel_list = [] for p in obsoletes: key = p[5] if not self.obsoletes.has_key(key): self.obsoletes[key] = [] self.obsoletes[key].append(p)
self.solve_dep = SolveDep()
# a list of excludes that might break our dependency chains self.excludes = []
self.pkgToDep = {} self.pkgToReq = {}
# make sure the ones that are skipped are not in here available, junk = self.__skip(available) for i in range(len(available)): pkgName = available[i][0] if not pkgName in xlist: if not self.available.has_key(pkgName): self.available[pkgName] = available[i] if self.allAvailable.has_key(pkgName): self.allAvailable[pkgName].append(available[i]) else: self.allAvailable[pkgName] = [available[i]]
# placeholder for the database and transaction set self.ts = None # null callbacks for now self.msgCallback = self.progressCallback = self.refreshCallback = None # get around cached dependencies. Gross ugly hack.
# set the callbacks def set_callback(self, name, function): if name == "msg": self.msgCallback = function elif name == "progress": self.progressCallback = function elif name == "refresh": self.refreshCallback = function
# get ready to rumble def setup(self): # prepare for the transaction list self.ts = rpm.TransactionSet() self.ts.setVSFlags(-1) self.ts.setProbFilter(rpm.RPMTRANS_FLAG_BUILD_PROBS|rpm.RPMPROB_FILTER_IGNOREOS) # self.ts.setFlags(rpm.RPMPROB_FILTER_IGNOREOS|rpm.RPMPROB)
if self.msgCallback != None: self.msgCallback(_("Testing package set / solving RPM "\ "inter-dependencies")) pkgset = self.selected self.selected = [] # the ___add will fill it back as it does ts.add self.__add(pkgset, check = 0) # JBJ: possibly dont need ordering here # self.ts.order()
# refresh the screen or something def __refresh(self): if self.refreshCallback: self.refreshCallback() # sanitize a list of packages for the ones we have to skip def __skip(self, plist): # make sure the new packages don't include any packages # with packages excludes plist.sort() xlist = packageList.removeSkipPackagesFromList(plist) for x in xlist: # remove the package self.log.log_debug("Removing package", x) pkgName = x[0][0] if self.available.has_key(pkgName): # make it unavailable too del self.available[pkgName] self.excludes.append(x) return plist, xlist
def __isKernel(self, h): provides = h['provides'] if "kernel" in provides: return 1 return 0
# add packages to a transaction set def __add(self, plist, check = 0): self.log.log_debug("Candidates for the selected list:", plist) # get the headers headerList = headers.initHeaderList() # h_list = getHeaders(plist, refreshCallback = self.refreshCallback) h_list = [] for p in plist: hdr = headerList[p] if hdr: h_list.append(hdr) else: self.log.log_debug("Could not obtain header for item:", p)
added = [] # what packages do we add? # add them one by one counter = -1 for h in h_list: # TODO KABI if self.__isKernel(h): #print "adding %s-%s-%s.%s to kernel_list" % (h['name'], h['version'], h['release'], h['arch']) self.kernel_list.append(h) counter = counter + 1 if not h: continue if check: xlist = rpmUtils.checkHeaderForFileConfigExcludes( h, plist[counter], self.ts) if xlist: # those packages have to be skipped too self.log.log_debug("Got exclude list from config file check:", xlist) for x in xlist: self.log.log_debug("Skipping package %s because of "\ "config file change" % (x,)) pkgName = x[0][0] if self.available.has_key(pkgName): del self.available[pkgName] self.excludes.append(x) continue newp = plist[counter] # never add a package that is already obsoleted to # the transaction set
if newp not in self.selected: if self.obsoletes.has_key(newp[0]): # if the package name is obsoleted. verify this version is obsolete for obs in self.obsoletes[newp[0]]: if up2dateUtils.isObsoleted(obs, newp): # since were iterating over the packages that the obsoletes pull # in, we need to check to see if its in self.selected again if newp not in self.selected: self.selected.append(newp) self.log.log_debug("Adding to transaction set", newp) install = 0 pkgsToInstallNotUpdate = self.cfg['pkgsToInstallNotUpdate'] if type(pkgsToInstallNotUpdate) == type(""): pkgsToInstallNotUpdate = [pkgsToInstallNotUpdate] for providename in h['Providename']: if providename in pkgsToInstallNotUpdate: install = 1 if install: self.ts.addInstall(h, newp, "i") else: self.ts.addInstall(h, newp, "u") added.append(newp) else: # not obsoleted by anything installed, add it self.selected.append(newp) self.log.log_debug("Adding to transaction set", newp) install = 0 # this is supposed to be a list, but try to treat it like # string as well if only one item bz:116985 pkgsToInstallNotUpdate = self.cfg['pkgsToInstallNotUpdate'] if type(pkgsToInstallNotUpdate) == type(""): pkgsToInstallNotUpdate = [pkgsToInstallNotUpdate] for providename in h['Providename']: if providename in pkgsToInstallNotUpdate : install = 1 if install: self.ts.addInstall(h, newp, "i") else: self.ts.addInstall(h, newp, "u") added.append(newp) if self.available.has_key(newp[0]): # no longer available del self.available[newp[0]] self.__refresh()
return added # Solve dependencies def __dependencies(self, dependencies): if not dependencies: return 0 changed = 0 self.log.log_debug("Dependencies:", dependencies) solved = []
# dont catch exceptions here, since we use them to figure out # if the server cant solve a dep if len(dependencies): (ret, depToPkg) = self.solve_dep.solveDep(map(lambda a: a[1], dependencies), self.availableList, refreshCallback = self.refreshCallback, progressCallback = self.progressCallback, msgCallback = self.msgCallback) solved = ret
for dep in depToPkg.keys(): pkgs = depToPkg[dep] for pkg in pkgs: if not self.pkgToDep.has_key(tuple(pkg)): self.pkgToDep[tuple(pkg)] = [dep] else: self.pkgToDep[tuple(pkg)].append(dep) self.log.log_debug("Got back response:", solved) # If we got a response back, take out the packages that have been # already selected so we don't loop without a purpose self.selected.sort() for s in solved[:]: if s in self.selected: self.log.log_debug("Already selected:", s) solved.remove(s)
# try to see if anything in the solved set is obsoleted by # something installed for pkg in solved: if self.obsoletes.has_key(pkg[0]): #looks like some installed package at least # obsoletes a package with the same name as this one obs = self.obsoletes[pkg[0]] for ob in obs: # print "isObsolete(%s, %s): %s " (ob, pkg, up2dateUtils.isObsoleted(ob,pkg)) if up2dateUtils.isObsoleted(ob, pkg): self.log.log_debug("Package %s is obsoleted by %s" % (pkg, ob)) # it's obsoleted, dont install it solved.remove(pkg) # changed = 1 # if we did not get a solution pack back, try looking at the # problem packages that are throwing these dependencies. if not solved: solved = [] snames = map(lambda a: a[0], dependencies) for s in snames: if self.available.has_key(s): self.log.log_debug("Package %s raised invalid dependency. "\ "Adding %s to set" % ( s, self.available[s])) solved.append(self.available[s]) changed = 1 # look in obsoletes as well if self.obsoletes.has_key(s): self.log.log_debug("Package %s raised a dep, but it is "\ "obsoleted by: %s" % (s, self.obsoletes[s])) # for every package that obsoletes this package, # add it to the solve list if it's available for p in self.obsoletes[s]: if self.available.has_key(p[0]): solved.append(self.available[p[0]]) changed = 1 # sanitize this list solved, skiplist = self.__skip(solved) added = self.__add(solved, check = 1) if added: changed = 1 else: # NOTE: THIS IS A TOTAL KLUGE. # it's ugly, and misplaced, and ugly. But it fixes # a potentially mis solved dep case, so here we go... # at some point in the future the depsolve will have # to be rewritten to make self.available keyed of at # lest package name _and_ arch, and the dep infomation # returned from the dep check needs to include the # arch of the package raising the dep as well. But in # the meantime, this is a workaround... # try upgrading everything in site again... # this can happen if one arch of a installed package is # needs to be updated to solve the arch, since we always get # a solution back, we never fall into it the first time, so # if we get here, lets take a look at upgrading it anyway. # worse case should be attempting this twice on a trans that # is pretty damn close to failing anyway, one last ditch attempt # shouldnt hurt
# if we did not get a solution pack back, try looking at the # problem packages that are thorwing these dependencies. solved = [] snames = map(lambda a: a[0], dependencies) for s in snames: if self.allAvailable.has_key(s): for i in self.allAvailable[s]: # print multiple arches here, really need to know installed # packages as well, and to respect cfg['forcedArch'] self.log.log_debug("Package %s raised invalid dependency. "\ "Adding %s to set" % ( s, i)) solved.append(i) changed = 1 # look in obsoletes as well if self.obsoletes.has_key(s): self.log.log_debug("Package %s raised a dep, but it is "\ "obsoleted by: %s" % (s, self.obsoletes[s])) # for every package that obsoletes this package, # add it to the solve list if it's available for p in self.obsoletes[s]: if self.available.has_key(p[0]): solved.append(self.available[p[0]]) changed = 1
added = self.__add(solved, check = 1) if added: changed = 1 else: changed = 0 return changed # Solve conflicts def __conflicts(self, conflicts): if not conflicts: return 0 changed = 0 self.log.log_debug("Conflicts:", conflicts) solved = [] for c in conflicts: # c should be a pair (name1, name2) # if one of these packages is still available, grab it for conflname in c: if self.available.has_key(conflname): solved.append(self.available[conflname]) changed = 1 if self.obsoletes.has_key(conflname): self.log.log_debug("Package %s raised a dep, but it is "\ "obsoleted by: %s" % ( conflname, self.obsoletes[conflname])) # for every package that obsoletes this package, add # it to the solve list if it's available for p in self.obsoletes[conflname]: if self.available.has_key(p[0]): solved.append(self.available[p[0]]) changed = 1 if solved is None and not changed: raise up2dateErrors.ConflictError(_( "Could not satisfy conflict dependencies."), conflicts) solved, skiplist = self.__skip(solved) added = self.__add(solved, check = 1) if added: changed = 1 return changed def showDepPackages(self, depPkgData, pkgReqData, unsolveddeps, verbose=None): # we need to filter out the unsolved deps from this list... pkgs = depPkgData.keys() pkgs.sort()
#pkgs is the list of all pkgs that had deps... verbose = 1 reqPkgData = {} # flip the pkg->reqs for req->pkgs # we need this so we can see what package solved a give # dep for pkg in pkgReqData.keys(): reqs = pkgReqData[pkg] for req in reqs: if not reqPkgData.has_key(req): reqPkgData[req] = [] reqPkgData[req].append(pkg)
msg = "" msg = msg + _("The following packages were added to your selection to satisfy dependencies:\n") msg = msg + _("""Package Required by\n""") msg = msg + """----------------------------------------------------------------------------\n""" # loop over all the packagse that solve a dep missingdeps = [] unsolveddeps = map(lambda a: a[1][0], unsolveddeps) for pkg in pkgs: pkgStr = "%s-%s-%s.%s" % (pkg[0], pkg[1], pkg[2], pkg[4]) # for each dep that package solves, uniq'ed deps = depPkgData[pkg] u = {} for i in deps: u[i] = None deps = u.keys() for dep in deps: if dep in unsolveddeps: missingdeps.append(dep) continue # see what packages needed raise that dep reqPkgs = reqPkgData[dep] # multiple pkgs can raise multiple deps # uniqify d = {} for i in reqPkgs: d[tuple(i)] = None reqPkgs = d.keys() for reqPkg in reqPkgs: if verbose: msg = msg + "%-40s%-40s%-40s\n" % (pkgStr, "%s-%s-%s" % (reqPkg[0], reqPkg[1], reqPkg[2]), dep) else: msg = msg + "%-40s%-40s\n" % (pkgStr, "%s-%s-%s" % (reqPkg[0], reqPkg[1], reqPkg[2])) # if len(missingdeps): # msg = msg + "\nThe Following deps were unsolved\n" # msg = msg + "----------------------------------\n" # msg = msg + "%-40s%-40s%-40s\n" % ("Required", "by", "attempted to solve with") # for dep in missingdeps: # msg = msg + "dep: %s" % dep # pkg = depPkgData[dep] # reqPkg = reqPkgData[dep] # msg = msg + "%-40s%-40s%-40s\n" % (dep, "%s-%s-%s" % (pkg[0], pkg[1], Pkg[2]), # "%s-%s-%s" % (reqPkg[0], reqPkg[1], reqPkg[2])) return msg
def __dep_error(self, deps): s = StringIO.StringIO() for dep in deps: ((name, version, release), (needsName, needsVersion), flags, suggested, sense) = dep if sense == 1: blurb = "conflicts with" else: blurb = "requires" if needsVersion: s.write(_("%-40.40s %s %s \n") % ( "%s-%s-%s" % (name, version, release), blurb, "%s %s %s" % (needsName, up2dateUtils.rpmFlagsToOperator(flags), needsVersion))) else: s.write(_("%-40.40s %s %s\n") % ( "%s %s %s-%s" % (name, up2dateUtils.rpmFlagsToOperator(flags), version, release), blurb, needsName)) ret = s.getvalue() s.close() return ret
def __filter_excludes_for_deps(self,deps): """This just filters the exclude list to any that are actually applicable to the failed deps""" # FIXME: this is probabaly fragile # FIXME: return the applicable and the non applicable # in seperate list so we can display them as well?
depnames = [] for dep in deps: ((name, version, release), (needsName, needsVersion), flags, suggested, sense) = dep depnames.append(name) depnames.append(needsName)
new_excludes = [] for exclude in self.excludes: if exclude[0][0] in depnames: new_excludes.append(exclude)
return new_excludes # generic dependency handler def process_deps(self, deps): if not deps: return 0 conflicts = [] dependencies = [] last_excludes = self.excludes self.excludes = [] for dep in deps: self.log.log_debug("Processing dependency", dep) ((name, version, release), (needsName, needsVersion), flags, suggested, sense) = dep tup = (name, version, release) if not self.pkgToReq.has_key(tup): self.pkgToReq[tup] = [] self.pkgToReq[tup].append(needsName) if sense == rpm.RPMDEP_SENSE_REQUIRES: dependencies.append((name, needsName)) elif sense == rpm.RPMDEP_SENSE_CONFLICTS: # print "((%s,%s,%s), (%s, %s), %s, %s, %s)" % (name, version, release, # needsName, needsVersion, # flags, suggest, sense) conflicts.append((name, needsName)) else: raise up2dateErrors.DependencySenseError(_( "Don't know how to handle dependency sense \"%s\"") % sense, sense) self.__refresh() # now solve the dependencies... changed = self.__dependencies(dependencies) self.__refresh() # and the conflicts... changed = changed + self.__conflicts(conflicts) self.__refresh() if not changed: # blow up if not self.excludes: # get the last round instead self.excludes = last_excludes errmsg = "" applicable_excludes = self.__filter_excludes_for_deps(deps) if len(applicable_excludes): errmsg = showSkipListHits(self.excludes) dep_error = self.__dep_error(deps) blip = self.showDepPackages(self.pkgToDep, self.pkgToReq, deps) dep_error = dep_error +"\n\n" + blip raise up2dateErrors.DependencyError(_("%s\nUnresolvable chain "\ "of dependencies:\n%s") % ( errmsg, dep_error), deps) return 1
def findKernelKabis(self):
match_re = re.compile(".*\((.*)\).*") self.kabi_dict = {} for ker in self.kernel_list: for provide in ker['provides']: if provide[:5] == "kABI(": m = match_re.search(provide) if m: kabi = m.groups()[0] self.kabi_dict[kabi] = ker if len(self.kabi_dict): return 1 return 0
def __mungeModuleNameArch(self, pkg_name, kabi): # kabi = "5.0.x86_64" # pkg[0] == "kernel-module-e1000-kabi-4.0" kabi_bits = kabi.split('.') kabi_str = string.join(kabi_bits[:-1], '.') arch = kabi_bits[-1] name_bits = pkg_name.split('-') name_str = string.join(name_bits[:-1], '-') name = "%s-%s" % (name_str, kabi_str) return (name, arch)
def __findKernelModuleDeps(self): deps = [] if len(self.kernel_list): if self.findKernelKabis(): # we've got kernels with a Kabi of some sort # see what modules need those kabi's # FIXME: this code has a bug in it... # namely, it is stupid about the arch for kabi in self.kabi_dict.keys(): # construct a approriate requires string req_str = "kABI(*)" pkgs = rpmUtils.installedHeaderByKeyword(requirename=req_str, provides="kernel-module")
# we need to munge the name/arch of the old package to generate # a dep name for the new package for pkg in pkgs: (dep_name, arch) = self.__mungeModuleNameArch(pkg['name'], kabi) alreadySelected = None epoch = pkg['epoch'] if epoch == None: epoch = ""
for selPkg in self.selected: bar = [dep_name, pkg['version'], pkg['release'], epoch, arch] if selPkg[:5] == bar: alreadySelected = 1 if alreadySelected: continue
# detect if we've already added that package to the dep set, if so, dont have a fake dep # since it cant be solved at that point foo = (("kernel-module-KABI-virtual-dep", "1.0", "1"), (dep_name, None), 0, None, 0) deps.append(foo)
return deps # run a dependency iteration def solvedep(self): self.log.log_debug("Checking for dependencies") self.__refresh() deps = self.ts.check()
virtDeps = self.__findKernelModuleDeps() deps = deps + virtDeps # in addition to the deps rpm tells us about, we # also need to see if there are new kernels installed. # and if so, we need to make sure the right modules # get added to the deps list if deps: self.log.log_debug("RPM returned %d deps." % len(deps)) # make the deps list unique: deps.sort() last = deps[-1] for i in range(len(deps)-2, -1, -1): if last == deps[i]: del deps[i] else: last = deps[i] ret = self.process_deps(deps) return ret
# run the transaction set now to check for any other problems def run(self): self.log.log_debug("Running transaction (final step)...") self.ts.order() self.__refresh() self.ts.setFlags(rpm.RPMTRANS_FLAG_BUILD_PROBS) # FIXME: uh, wtf. Apt strips the "os" header out of there headers so any transaction # using those headers is going to fail... kind of lame... # the #REPLACEOLFFILES is to ignore some wacky apt header issues, see bug #119111 self.ts.setProbFilter(rpm.RPMTRANS_FLAG_BUILD_PROBS|rpm.RPMPROB_FILTER_IGNOREOS|rpm.RPMPROB_FILTER_REPLACEOLDFILES) ret = self.ts.run(rpmUtils.rpmCallback, self.progressCallback) self.__refresh() return (ret, self.pkgToDep, self.pkgToReq)
|