Wednesday, July 8, 2015

vio disk mapping - end the head scratching

was tormented trying to resolve client lpar disks to actual real san luns so wrote a script to do the whole damn thing. Auto-documentation is the only way to go.

Still need to work out the differences between hardware HMC and IVM on a blade....nearly there.....

much of below written by my colleague Felician Moldovan ( thanks to him! )

$ cat get_luns.info.ksh
#!/usr/bin/ksh
#
#
timestamp=`date +"%s"`
lpar=$1
#
get_vio_data() {
vio=$1
CTRLR1=$2
CTRLR2=$3
/usr/bin/ssh -qn uxadmin@${vio} -i key "lsdev -Cc disk -F 'name;status;description'" > ${vio}.${timestamp}.disks
/usr/bin/ssh -qn uxadmin@${vio} -i key "lspv" > ${vio}.${timestamp}.pvs
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -all|grep vhost" >${vio}.${timestamp}.maps
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lspath | grep hdisk" > ${vio}.${timestamp}.paths
/usr/bin/ssh -qn uxadmin@${vio} -i key "sudo /usr/bin/mpio_get_config -Av" > ${vio}.${timestamp}.mpio
/usr/bin/ssh -qn uxadmin@${vio} -i key "sudo /usr/DynamicLinkManager/bin/dlnkmgr view -path -srt lu" > ${vio}.${timestamp}.hit
#
# determine which vhosts are interesting to this lpar and get map info for these vhosts
#
VHOST1=$(grep "C$CTRLR1" ${vio}.${timestamp}.maps | awk '{print $1}' )
#echo $CTRLR1 $VHOST1
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -vadapter $VHOST1" >${vio}.${VHOST1}.${timestamp}.maps
if [ -n "$CTRLR2" ]
then
VHOST2=$(grep "C$CTRLR2" ${vio}.${timestamp}.maps | awk '{print $1}')
#echo $CTRLR2 $VHOST2
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -vadapter $VHOST2" >${vio}.${VHOST2}.${timestamp}.maps
fi
}
#
get_hmc_data() # $1=HMC $2=Part of serial Number $3=LPAR_NO
{
# hmcs provide the mapping from vio host scsi controller to client lpar vscsi controller i.e. linking vscsi in client lpar to vhost in VIO server
# once we know the controller and lun on both sides and the vio-client controller mapping we can definitively state the mapping even if there is no pvid on the disk
#echo "Logging into hmc as hscroot. you will be prompted TWICE for hscroot password"
SEARCH_STRING=$2
LPAR_NO=$3
FRAME_NAME=$( /usr/bin/ssh -q hscroot@$1 -i key2 "lssyscfg -r sys -F name" | grep $2)
/usr/bin/ssh -q hscroot@$1 -i key2 "lshwres -r virtualio --rsubtype scsi -m $FRAME_NAME --filter lpar_ids=" $3 " -F slot_num,remote_lpar_name,remote_slot_num" > ${HMC}.${timestamp}.ctrlr
}
#
get_lpar_data()
{
PART_SERIAL=$(uname -m | cut -c4-8)
HMC=$(host $(echo $(lsrsrc IBM.ManagementServer | grep Hostname | head -1 | awk '{print $NF}'| sed -e 's/\"//g'))| awk '{print $1 }')
PARTITION=$(uname -L | awk '{print $1}')
HOST=$(uname -L | awk '{print $2}')
#
for DISK in $(lspv | awk '{print $1}')
do
echo $DISK $(lspv| grep "$DISK "| awk '{print $2}') $(lscfg -l $DISK | awk '{print $2}' | cut -d"-" -f5 | cut -d"L" -f2) \
$(for VPATH in $(lspath -l $DISK |awk '{print $3}')
do
echo $VPATH $(lscfg -l $VPATH|awk '{print $2}' | sed -e 's/-C/ /g'|sed -e 's/-T/ /g' | awk '{print $2" "$4}') $(lspath -l $DISK | grep $VPATH | awk '{print $1}')
done)>>${HOST}.${timestamp}.disks
done
#cat ${HOST}.${timestamp}.disks
}
#
merge_vio_ctrlr_info()
{
# $1 is lpar diskfile $2 is output of get_hmc_data function
while read LINE
do
VIO=$(echo $LINE | cut -d"," -f2)
REM_CTRLR=$(echo $LINE | cut -d"," -f3)
VHOST=$(grep "C$REM_CTRLR" ${vio}.${timestamp}.maps | awk '{print $1}' )
CTRLR=$(echo $LINE | cut -d"," -f1)
sed -e "s/ $CTRLR / $CTRLR $VIO $REM_CTRLR $VHOST /g" $1 >$1.tmp
mv $1.tmp $1
done<$2
#cat $1
}
#
get_SAN_info()
{
# $1 is the hdisk on the vio server to gather SAN info for
# get LUN info for each disk
if grep "$1 " $VIO.${timestamp}.mpio >/dev/null 2>&1
then
line=$(grep -w $1 $VIO.${timestamp}.mpio)
san=`grep "Subsystem Name" $VIO.${timestamp}.mpio | cut -d\' -f2`
lun=`echo ${line} | awk '{print $2}'`
else
line=$(grep -w $1 $VIO.${timestamp}.hit)
san=`echo ${line} | awk '{print $5}' | cut -d. -f2`
lun=`echo ${line} | awk '{print $6}'`
fi
echo "$san $lun"
}
merge_fc_disk_info()
{
for VIO in $(cat ${HMC}.${timestamp}.ctrlr | cut -d"," -f2 | sort | uniq)
do
#echo $VIO
>$1.tmp
while read LINE
do
HDISK=$(echo $LINE | awk '{print $1}')
LPAR=$(hostname)
LUN=$(echo $LINE | awk '{print $3}')
VHOST=$(echo $LINE | awk '{print $8}')
#echo $VHOST
VHDS=$(grep -p $LUN $VIO.$VHOST.${timestamp}.maps | grep -E 'VTD'| awk '{print $NF}')
VHOST_FC_DISK=$(grep -p $LUN $VIO.$VHOST.${timestamp}.maps | grep -E 'Backing'| awk '{print $NF}')
# echo $VHOST_FC_DISK
SAN_INFO=$(get_SAN_info $VHOST_FC_DISK)
FIBRE_PATHS=$(grep "$VHOST_FC_DISK " $VIO.${timestamp}.paths | awk '{print $3" "$1" "}'| xargs echo)
echo $LINE $VIO $VHDS $VHOST_FC_DISK $SAN_INFO $FIBRE_PATHS >> $1.tmp
done<$1
mv $1.tmp $1
done
}
#
get_lpar_data
get_hmc_data $HMC $PART_SERIAL $PARTITION
for VIO in $(cat ${HMC}.${timestamp}.ctrlr | cut -d"," -f2 | sort | uniq)
do
# echo getting vio data from $VIO
get_vio_data $VIO $(echo $(cat ${HMC}.${timestamp}.ctrlr | grep $VIO | cut -d"," -f3 | sort | uniq))
done
merge_vio_ctrlr_info ${HOST}.${timestamp}.disks ${HMC}.${timestamp}.ctrlr
merge_fc_disk_info ${HOST}.${timestamp}.disks
mv ${HOST}.${timestamp}.disks diskmap.${HOST}
rm *${timestamp}*
while read LINE
do
echo $(hostname) $LINE
donediskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
cat diskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
rm diskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
rm diskmap.${HOST}
exit 0
$

Monday, February 4, 2013

Using Hitachi disk with Native AIX mpio ( no HDLM )

1.  Check MPIO filesets available:
lslpp -l devices.common.IBM.mpio.rte
2. Get Hitachi Specific MPIO ODM Updates from
Collect MPIO Base and updates: List below current at 2 Feb 2013
https://tuf.hds.com/wiki/pub/Main/AIXODMUpdates/aix_odm_5400.zip
https://tuf.hds.com/wiki/pub/Main/AIXODMUpdates/aix_odm_5401U.zip
https://tuf.hds.com/wiki/pub/Main/AIXODMUpdates/aix_odm_5402U.zip
https://tuf.hds.com/wiki/pub/Main/AIXODMUpdates/aix_odm_5403U.zip
https://tuf.hds.com/wiki/pub/Main/AIXODMUpdates/aix_odm_5404U.zip
 
1)      Unzip all files and move *.tar and *U to your target server in path /usr/sys/inst.images. 
2)      cd /usr/sys/inst.images
3)      extract tar file
tar xvf *tar
4)       inutoc .
5)      Remove packages Hitachi.aix.support.rte and its dependent package Hitachi.hacmp.support.rte  which relates to no MPIO driver ( e.g. HDLM  or PowerPath ).  These are incompatible with native MPIO driver for HDS disks.  Make sure to change remove dependent software to yes or action will fail
6)       smitty install: Select and install the software in /usr/sys/inst.images
7)      check installed filesets
8)  if any Hitachi disks have been detected already, remove them with rmdev -dRl hdisk
9)  Reboot server at soonest opportunity
10)  Check for detected MPIO disks: "lsdev -Cc disk"
11)  Check that 2 or more paths are seen to the Hitachi disk: "lspath" 
12)  can also check paths and disk sizes by
echo “ No hdiskxx size mb No-paths”
p="/usr/sbin/lspath";for i in `$p| awk ' !/Missing/ {print $2}'|sort|uniq `;do echo "$i; `getconf DISK_SIZE /dev/$i` mb; `$p| awk ' !/Missing/ &&/'$i' / {print $2}'|wc -l|sed 's/ //g'`" ; done|cat -n
      13)  Default setup of multipath is failover, reserved single path, qdepth=2. you can check this with lsattr -EL hdisk4 for example. 
     14) To configure round-robin with queue depth of 8 we need to change each disk as follows:
              chdev -l hdisk4 -areserve_policy=no_reserve -a algorithm=round_robin -a queue_depth=8
  This will also have to be done for any new disks that get added.
15)  Validate that new settings are applied using lsattr -EL hdisk
16)  Thats it.  You can build your volume groups and filesystems as needed
17)  you can check to ensure data is going down both fibre adapters by running "iostat -a | grep fcs"
 

Monday, February 6, 2012

VIO - resolve client lpar disk to san luns

This had my head wrecked for 2 days.....

90+ dLPARs in 2 x p560 + 6 by JS23 blades and a PS701 blade all hanging off a DS3400 boot disk san and Hitachi USP-V data disk san.

Thanks to Felician Moldovan for much of the below script.

basically to resolve the mapping you have to
1. get the disk data in the client lpar
2. work out the hmc and vios servers
3. get the client lpar vscsi to vio vsci device mapping ( from the hmc )
4. get the disk data from the VIO servers
5. parse the whole damn lot into something presentable.

Still have not resolved all the issues for the IVM. Need to work out how to convert the HMC commands as hscroot into something that the IVM/VIO server in the blade can eat.

script below for your viewing pleasure. Apologies for lack of commentary within the script.

$ cat get_luns.info.ksh
#!/usr/bin/ksh
#
#
timestamp=`date +"%s"`
lpar=$1
#
get_vio_data() {
vio=$1
CTRLR1=$2
CTRLR2=$3
/usr/bin/ssh -qn uxadmin@${vio} -i key "lsdev -Cc disk -F 'name;status;description'" > ${vio}.${timestamp}.disks
/usr/bin/ssh -qn uxadmin@${vio} -i key "lspv" > ${vio}.${timestamp}.pvs
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -allgrep vhost" >${vio}.${timestamp}.maps
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lspath grep hdisk" > ${vio}.${timestamp}.paths
/usr/bin/ssh -qn uxadmin@${vio} -i key "sudo /usr/bin/mpio_get_config -Av" > ${vio}.${timestamp}.mpio
/usr/bin/ssh -qn uxadmin@${vio} -i key "sudo /usr/DynamicLinkManager/bin/dlnkmgr view -path -srt lu" > ${vio}.${timestamp}.hit
#
# determine which vhosts are interesting to this lpar and get map info for these vhosts
#
VHOST1=$(grep "C$CTRLR1" ${vio}.${timestamp}.maps awk '{print $1}' )
#echo $CTRLR1 $VHOST1
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -vadapter $VHOST1" >${vio}.${VHOST1}.${timestamp}.maps
if [ -n "$CTRLR2" ]
then
VHOST2=$(grep "C$CTRLR2" ${vio}.${timestamp}.maps awk '{print $1}')
#echo $CTRLR2 $VHOST2
/usr/bin/ssh -qn uxadmin@${vio} -i key "/usr/ios/cli/ioscli lsmap -vadapter $VHOST2" >${vio}.${VHOST2}.${timestamp}.maps
fi
}
#
get_hmc_data() # $1=HMC $2=Part of serial Number $3=LPAR_NO
{
# hmcs provide the mapping from vio host scsi controller to client lpar vscsi controller i.e. linking vscsi in client lpar to vhost in VIO server
# once we know the controller and lun on both sides and the vio-client controller mapping we can definitively state the mapping even if there is no pvid on the disk
#echo "Logging into hmc as hscroot. you will be prompted TWICE for hscroot password"
SEARCH_STRING=$2
LPAR_NO=$3
FRAME_NAME=$( /usr/bin/ssh -q hscroot@$1 -i key2 "lssyscfg -r sys -F name" grep $2)
/usr/bin/ssh -q hscroot@$1 -i key2 "lshwres -r virtualio --rsubtype scsi -m $FRAME_NAME --filter lpar_ids=" $3 " -F slot_num,remote_lpar_name,remote_slot_num" > ${HMC}.${timestamp}.ctrlr
}
#
get_lpar_data()
{
PART_SERIAL=$(uname -m cut -c4-8)
HMC=$(host $(echo $(lsrsrc IBM.ManagementServer grep Hostname head -1 awk '{print $NF}' sed -e 's/\"//g')) awk '{print $1 }')
PARTITION=$(uname -L awk '{print $1}')
HOST=$(uname -L awk '{print $2}')
#
for DISK in $(lspv awk '{print $1}')
do
echo $DISK $(lspv grep "$DISK " awk '{print $2}') $(lscfg -l $DISK awk '{print $2}' cut -d"-" -f5 cut -d"L" -f2) \
$(for VPATH in $(lspath -l $DISK awk '{print $3}')
do
echo $VPATH $(lscfg -l $VPATHawk '{print $2}' sed -e 's/-C/ /g'sed -e 's/-T/ /g' awk '{print $2" "$4}') $(lspath -l $DISK grep $VPATH awk '{print $1}')
done)>>${HOST}.${timestamp}.disks
done
#cat ${HOST}.${timestamp}.disks
}
#
merge_vio_ctrlr_info()
{
# $1 is lpar diskfile $2 is output of get_hmc_data function
while read LINE
do
VIO=$(echo $LINE cut -d"," -f2)
REM_CTRLR=$(echo $LINE cut -d"," -f3)
VHOST=$(grep "C$REM_CTRLR" ${vio}.${timestamp}.maps awk '{print $1}' )
CTRLR=$(echo $LINE cut -d"," -f1)
sed -e "s/ $CTRLR / $CTRLR $VIO $REM_CTRLR $VHOST /g" $1 >$1.tmp
mv $1.tmp $1
done<$2 #cat $1 } # get_SAN_info() { # $1 is the hdisk on the vio server to gather SAN info for # get LUN info for each disk if grep "$1 " $VIO.${timestamp}.mpio >/dev/null 2>&1
then
line=$(grep -w $1 $VIO.${timestamp}.mpio)
san=`grep "Subsystem Name" $VIO.${timestamp}.mpio cut -d\' -f2`
lun=`echo ${line} awk '{print $2}'`
else
line=$(grep -w $1 $VIO.${timestamp}.hit)
san=`echo ${line} awk '{print $5}' cut -d. -f2`
lun=`echo ${line} awk '{print $6}'`
fi
echo "$san $lun"
}
merge_fc_disk_info()
{
for VIO in $(cat ${HMC}.${timestamp}.ctrlr cut -d"," -f2 sort uniq)
do
#echo $VIO
>$1.tmp
while read LINE
do
HDISK=$(echo $LINE awk '{print $1}')
LPAR=$(hostname)
LUN=$(echo $LINE awk '{print $3}')
VHOST=$(echo $LINE awk '{print $8}')
#echo $VHOST
VHDS=$(grep -p $LUN $VIO.$VHOST.${timestamp}.maps grep -E 'VTD' awk '{print $NF}')
VHOST_FC_DISK=$(grep -p $LUN $VIO.$VHOST.${timestamp}.maps grep -E 'Backing' awk '{print $NF}')
# echo $VHOST_FC_DISK
SAN_INFO=$(get_SAN_info $VHOST_FC_DISK)
FIBRE_PATHS=$(grep "$VHOST_FC_DISK " $VIO.${timestamp}.paths awk '{print $3" "$1" "}' xargs echo)
echo $LINE $VIO $VHDS $VHOST_FC_DISK $SAN_INFO $FIBRE_PATHS >> $1.tmp
done<$1 mv $1.tmp $1 done } # get_lpar_data get_hmc_data $HMC $PART_SERIAL $PARTITION for VIO in $(cat ${HMC}.${timestamp}.ctrlr cut -d"," -f2 sort uniq) do # echo getting vio data from $VIO get_vio_data $VIO $(echo $(cat ${HMC}.${timestamp}.ctrlr grep $VIO cut -d"," -f3 sort uniq)) done merge_vio_ctrlr_info ${HOST}.${timestamp}.disks ${HMC}.${timestamp}.ctrlr merge_fc_disk_info ${HOST}.${timestamp}.disks mv ${HOST}.${timestamp}.disks diskmap.${HOST} rm *${timestamp}* while read LINE do echo $(hostname) $LINE done<diskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
cat diskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
rm diskmap.${HOST}.$(date +%Y%m%d_%H%M%S)
rm diskmap.${HOST}
exit 0
$

really useful site for sysadmins

http://www.tablespace.net/quicksheet/

Friday, December 3, 2010

Solaris 11 Express available

Just downloaded Oracle Solaris 11 Express
Running it on virtual box.

Good tutorial form Jim Laurent at his blog. http://blogs.sun.com/jimlaurent/

Anyways. blurb saying it boots really fast is true. Although I do have a new i7 / 8Gb laptop so that may have something to do with it.

Will do a speed test comparision wiht the Soalrisi 10 Virtual machine I have as soon as I can.

It seems to stick/crash now and again. Have downloaded latest virtual box and will update.

Gnome only desktop. ZFS root by default. Basically it looks like OpenSOlaris 2010.11?

however, no major compaints as yet. I need to get my head aournd the packaging / patching, but zfs root is very convenient for snapshot based live update. Hopefully extended downtime for OS patching is now a thing of the past. Hopefully. More later as I am bothered.

Thursday, June 18, 2009

How I got sendmail to work......

As is well documented sendmail is tricky to get to work the way you want it to.....
So when I finally got it to work as required, it documented the process for my colleagues.....

biggest issue was how to make root mail look like it comes from a reverse lookup DNS entry on teh public network. each machine has address like <sitecode>ux123.<sitecode>.company.com which is fine. we masquerade all outbount mail as if it comes from user@company.com, again this is fine no problems. However, root is specifically restricted from teh masquerade and still comes out as root@<sitecode>ux123.<sitecode>.company.com THis makes sense as I do want to know which machine a root mail comes from. the problem is that many ( soo will be most ) external mail gateways and recipient machines will do a reverse DNS lookup to ensure that the sender is not being spoofed as a spam prevention.

We do have company.com with valid forward, reverse and MX DNS entries in the public internet. Most of our machines are in provate ip space 10.x.x.x, so these have to be NAT'd which is fine, but without a valid reverse lookup DNS on the internet, valid recipients of mail from root ( or any services started under the root user ) can be blocked.

So, in short, need to masquerade root as something other than root@company.com.

To achieve this we need to use sendmails genericstable. It is like an alias table four outbound mail ( aliases only take care of inbvound ). Just like aliases it needs to be compiled into a db format.

here is the procedure for Solaris and Aix ( will come back to HP/UX version....






Solaris 10



1. Create and edit a mc file in /etc/mail/cf/cf e.g. sendmail.mc and insert the following text:

VERSIONID(`@(#)sendmail.mc 1.11 (Sun) 06/21/04')
OSTYPE(`solaris8')dnl
DOMAIN(`solaris-generic')dnl
define(`confFALLBACK_SMARTHOST', `mailhost$?m.$m$.')dnl
define(`SMART_HOST', `smtprelay.site.compnany.com')dnl
FEATURE(`masquerade_envelope')dnl
MASQUERADE_AS(`company.com')dnl
FEATURE(genericstable)dnl
GENERICS_DOMAIN(`host.site.company.com')dnl
FEATURE(generics_entire_domain)
MAILER(`local')dnl
MAILER(`smtp')dnl


2. Compile the sendmail.mc file into a useable sendmail.cf file
/usr/ccs/bin/m4 ../m4/cf.m4 sendmail.mc >sendmail.cf

3. backup existing sendmail.cf and replace with new cf file
cp /etc/mail/sendmail.cf /etc/mail/sendmail.cf.
cp /etc/mail/cf/cf/sendmail.cf /etc/mail/sendmail.cf

4. Create /etc/mail/genericstable with list of userids to translate to other name e.g.
root superuser.host@company.com
jdoe dada@anothercompany.com

5. Change permissions on /etc/mail/genericstable to 600
6. Compile /etc/mail/genericstable into /etc/mail/genericstable.db
cd /etc/mail
makemap hash genericstable <genericstable
7. Change permissions on /etc/mail/genericstable.db to 600
8. restart sendmail service
svcadm restart svc:/network/smtp:sendmail



AIX Setup

1. Create and edit a mc file in /usr/samples/tcpip/sendmail/cf e.g. sendmail.mc and insert the following text:

include(`/usr/samples/tcpip/sendmail/m4/cf.m4')
OSTYPE(`aixsample')dnl
DOMAIN(`generic')dnl
define(`confFALLBACK_SMARTHOST', `mailhost$?m.$m$.')dnl
define(`SMART_HOST', `smtprelay.site.company.com')dnl
FEATURE(`masquerade_envelope')dnl
MASQUERADE_AS(`company.com')dnl
FEATURE(genericstable)dnl
GENERICS_DOMAIN(`host.site.company.com')dnl
FEATURE(generics_entire_domain)
MAILER(`local')dnl
MAILER(`smtp')dnl


2. Compile the sendmail.mc file into a useable sendmail.cf file
/usr/bin/m4 sendmail.mc >sendmail.cf

3. backup existing sendmail.cf and replace with new cf file
cp /etc/mail/sendmail.cf /etc/mail/sendmail.cf.<datestring>
cp usr/samples/tcpip/sendmail/cf/sendmail.cf /etc/mail/sendmail.cf

4. Create /etc/mail/genericstable with list of userids to translate to other name e.g.
root hostname.superuser@company.com
myusern first.last@anothercompany.com
5. touch /etc/mail/local-host-names
6. Change permissions on /etc/mail/genericstable to 600
7. Compile /etc/mail/genericstable into /etc/mail/genericstable.db
cd /etc/mail
makemap hash genericstable < genericstable

8. Change permissions on /etc/mail/genericstable.db to 600
9. restart sendmail service
stopsrc -s sendmail
startsrc -s sendmail -a "-bd -q30m"

Saturday, June 6, 2009

Why SAN is rubbish...or how to make a Solaris host see new stuff WITHOUT having to reboot

I noticed that SAN admins like to say, just reboot the host and it will see the new WWN's and devices.......

Well guess what, Unix hosts ( and linux too I guess) should need to be rebooted about once a decade if they are set up right. (Microsoft are making efforts in this area too.....)

I should not need to reboot for something as trivial as adding a new bit of storage. This is supposed to be enterprise equipment after all.

I get pretty cut up about SAN's fibre channel lack of what I would consider pretty basic functionality.

Basically, evey san device has a unique ID right, just like ethernet. SAN is a packet based network for Storage......so why cannot it be used like ethernet?

Imagine if your server vendors told you that every time you added another device on the IP network, you had to reboot every machine you wantted to talk to it?

I am not talking 2bit SAN here either. I have the same problems with HDS USPV, EMC Symmetrix, EMC Clariion, Cisco MDS 9513 Directors ( Director sounds so much classier than switch, lets double the price...) Multi-million dollar equipment.

Ok, rant over......here come the useful bit.....apologies if my tech terms are a bit off....only the SAN guys really care about Front end ports, back end directors, WWNs etc. Most Unix admins just want to know how to get to the point where the host sees the disk so they can start building volumes and filsystems.

Option 3 below has worked for me every time, I have yet to find a solaris 10 box that it didnt work on.

I came to the conclusion that a reboot should not be necessary, all I needed to do was find out what the server did when initializing the HBA ( host bus adaptor, { again, just a classy name for a SAN network card, which costs about $4k })

So here are some Solaris 10 commands that I have used to detect new SAN equipment that is zoned to the host. ( i have been mostly working on Solaris for the last year. Not because of preference but because of commercial issues, where my employer decided to use Sun rather than another vendor. ) I guess there should be similar functionality on Aix and HP/UX but I havent yet had a need to find out .....

I assume you are root for all the below. If you dont know what that means, then I cant help you much.....Reboot the server ( gently ) like the SAN folks told you.
Also, I use ksh, and am too lazy to port to other shells.

There are 3 basic scenarios:

1. Adding a lun from the same SAN and front end ports (WWN ) and SCSI target that is already hosting some LUNs you are using on the Solaris server:

In this case you have already established the hba<->SAN communication and you are just registering a new LUN.

just running "devfsadm" should be enough to allow you see the new LUN(s)

2. Adding new LUNs from a same SAN and front end ports but different SCSI target:

Here you need to probe the controllers to see if they can register new devices

#! /bin/ksh
fcinfo hba-port | grep "^HBA"| awk '{print $4 }' | while read ln
do
fcinfo remote-port -p $ln -s >/dev/null 2>&1
done

devfsadm


3. Lastly for new SAN arrays you have to find all the hba's and force them to do a link initialization protocol so that they start talking to whatever is out there. This is what the host does on re-boot. I have dont this on production hosts with no ill effects but it ti possible it could cause some interruption to existing data flow so use with care and only if you need to MUST.  From experience there is a medium risk of File System corruption

Find which controllers are present:
cfgadm -la | grep fc-fabric
Force Link Initialisation Protocal for each controller you find:
luxadm -e forcelip /dev/cfg/c2
****Stop, wait 5 mins, get a coffee, it can take time for the hba to come back up and mpathadm to recover the paths via the controller you reset.  If you dont wait you will likely lose all good paths to the LUN, which gives an IO error and likely corrupts your filesystem.  Very BAD****
luxadm -e forcelip /dev/cfg/c4
Check what the hba's can see ( if you want to ):
luxadm -e dump_map /dev/cfg/c2
luxadm -e dump_map /dev/cfg/c4

Go check the newly discovered WWNs for LUNs:
#! /bin/ksh
fcinfo hba-port | grep "^HBA"| awk '{print $4 }' | while read ln
do
fcinfo remote-port -p $ln -s >/dev/null 2>&1
done

Configure the LUNs onto your server:
cfgadm -c configure c2
cfgadm -c configure c4


Check if you got them:

cfgadm -la | grep c2
cfgadm -la | grep c4


Install the disk devices into Solaris:
devfsadm

You can then do what you need via format / Solaris Volume manager / Veritas or whatever