Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,10 @@ protected List<StoragePool> reorderPoolsByCapacity(DeploymentPlan plan, List<Sto
storageType = "shared";
}

logger.debug(String.format(
"Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.",
logger.info(
"Filtering storage pools by capacity type [{}] as the first storage pool of the list, with name [{}] and ID [{}], is a [{}] storage.",
capacityType, storagePool.getName(), storagePool.getUuid(), storageType
));
);

Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
List<Long> poolIdsByCapacity = result.first();
Expand Down Expand Up @@ -185,7 +185,7 @@ protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L
Long clusterId = plan.getClusterId();

List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
logger.debug("List of pools in ascending order of number of volumes for account [{}] is [{}].", account, poolIdsByVolCount);

// now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
Expand All @@ -206,16 +206,11 @@ protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L

@Override
public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
if (logger.isTraceEnabled()) {
logger.trace("reordering pools");
}
if (pools == null) {
logger.trace("There are no pools to reorder; returning null.");
logger.info("There are no pools to reorder.");
return null;
}
if (logger.isTraceEnabled()) {
logger.trace(String.format("reordering %d pools", pools.size()));
}
logger.info("Reordering [{}] pools", pools.size());
Comment on lines 212 to +213
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These log statements (and similar ones added below) have been raised to INFO level, which means every pool reordering attempt will now be emitted at INFO instead of DEBUG/TRACE and can significantly increase log volume in busy environments. This also goes beyond the PR description (which mentioned adding DEBUG logs and promoting TRACE to DEBUG), so please confirm that INFO is the intended level for these high-frequency paths or consider keeping them at DEBUG while still adopting the new Log4j2 placeholder syntax.

Copilot uses AI. Check for mistakes.
Account account = null;
if (vmProfile.getVirtualMachine() != null) {
account = vmProfile.getOwner();
Expand All @@ -224,9 +219,7 @@ public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachinePro
pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);

if (vmProfile.getVirtualMachine() == null) {
if (logger.isTraceEnabled()) {
logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
}
logger.info("The VM is null, skipping pool reordering by disk provisioning type.");
return pools;
}

Expand All @@ -240,14 +233,10 @@ public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachinePro

List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
String volumeAllocationAlgorithm = VolumeOrchestrationService.VolumeAllocationAlgorithm.value();
logger.debug("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
logger.info("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
if (volumeAllocationAlgorithm.equals("random") || volumeAllocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
reorderRandomPools(pools);
} else if (StringUtils.equalsAny(volumeAllocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
if (logger.isTraceEnabled()) {
logger.trace("Using reordering algorithm {}", volumeAllocationAlgorithm);
}

if (volumeAllocationAlgorithm.equals("userdispersing")) {
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
} else {
Expand All @@ -259,16 +248,15 @@ List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, D

void reorderRandomPools(List<StoragePool> pools) {
StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: ");
if (logger.isTraceEnabled()) {
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
}
StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: ");
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
logger.debug("Pools to shuffle: [{}]", pools);
Collections.shuffle(pools, secureRandom);
StorageUtil.traceLogStoragePools(pools, logger, "shuffled list of pools to choose from: ");
logger.debug("Shuffled list of pools to choose from: [{}]", pools);
}

private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile diskProfile) {
if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
logger.info("Reordering [{}] pools by disk provisioning type [{}].", pools.size(), diskProfile.getProvisioningType());
List<StoragePool> reorderedPools = new ArrayList<>();
int preferredIndex = 0;
for (StoragePool pool : pools) {
Expand All @@ -282,22 +270,28 @@ private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> p
reorderedPools.add(preferredIndex++, pool);
}
}
logger.debug("Reordered list of pools by disk provisioning type [{}]: [{}]", diskProfile.getProvisioningType(), reorderedPools);
return reorderedPools;
} else {
if (diskProfile == null) {
logger.info("Reordering pools by disk provisioning type wasn't necessary, since no disk profile was found.");
} else {
logger.debug("Reordering pools by disk provisioning type wasn't necessary, since the provisioning type is [{}].", diskProfile.getProvisioningType());
}
return pools;
}
}

protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh));
logger.debug("Checking if storage pool [{}] is suitable to disk [{}].", pool, dskCh);
if (avoid.shouldAvoid(pool)) {
logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh));
logger.debug("StoragePool [{}] is in avoid set, skipping this pool to allocation of disk [{}].", pool, dskCh);
return false;
}

if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType()));
logger.debug("Storage pool type '[{}]' doesn't support encryption required for volume, skipping this pool", pool.getPoolType());
}
return false;
}
Expand All @@ -319,8 +313,8 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
}

if (!checkDiskProvisioningSupport(dskCh, pool)) {
logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType")));
logger.debug("Storage pool [{}] does not have support to disk provisioning of disk [{}].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"));
return false;
}

Expand All @@ -332,7 +326,7 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
HostVO plannedHost = hostDao.findById(plan.getHostId());
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
logger.debug("StoragePool [{}] and host [{}] does not have matching storage access groups", pool, plannedHost);
}
return false;
}
Expand All @@ -343,13 +337,13 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
if (!isTempVolume) {
volume = volumeDao.findById(dskCh.getVolumeId());
if (!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) {
logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume));
logger.debug("Pool [{}] is not compatible with volume [{}], skipping it.", pool, volume);
return false;
}
}

if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) {
logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume));
logger.debug("Cannot allocate pool [{}] to volume [{}] because the max number of managed clustered filesystems has been exceeded.", pool, volume);
return false;
}

Expand All @@ -358,13 +352,13 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh));
if (dskCh.getHypervisorType() == HypervisorType.VMware) {
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is a parent datastore cluster.", pool, volume);
return false;
}
if (pool.getParent() != 0L) {
StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent());
if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not in [{}] state.", datastoreCluster, volume, StoragePoolStatus.Up);
return false;
}
}
Expand All @@ -374,11 +368,11 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
storageMgr.isStoragePoolCompliantWithStoragePolicy(dskCh.getDiskOfferingId(), pool) :
storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool);
if (!isStoragePoolStoragePolicyCompliance) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not compliant with the storage policy required by the volume.", pool, volume);
return false;
}
} catch (StorageUnavailableException e) {
logger.warn(String.format("Could not verify storage policy compliance against storage pool %s due to exception %s", pool.getUuid(), e.getMessage()));
logger.warn("Could not verify storage policy compliance against storage pool [{}] due to exception [{}]", pool.getUuid(), e.getMessage());
return false;
}
}
Expand Down Expand Up @@ -427,19 +421,19 @@ private boolean checkHypervisorCompatibility(HypervisorType hyperType, Volume.Ty
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
if (disabledPools != null && !disabledPools.isEmpty()) {
logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
logger.trace("Ignoring pools [{}] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools));
}
}

protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
boolean bypassStorageTypeCheck){
logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
logger.trace("[{}] is looking for storage pools that match the VM's disk profile [{}], virtual machine profile [{}] and "
+ "deployment plan [{}]. Returning up to [{}] and bypassStorageTypeCheck [{}].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
}

protected void logEndOfSearch(List<StoragePool> storagePoolList) {
logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray())));
logger.debug("[{}] is returning [{}] suitable storage pools [{}].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray()));
}

}
Loading