+ return () unless ($cache_p);
+
+ my $dd = "$ENV{HOME}/Library/Caches"; # MacOS location
+ if (-d $dd) {
+ $cache_file_name = "$dd/org.jwz.xscreensaver.getimage.cache";
+ } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location
+ $dd = "$ENV{HOME}/.cache/xscreensaver";
+ if (! -d $dd) { mkdir ($dd) || error ("mkdir $dd: $!"); }
+ $cache_file_name = "$dd/xscreensaver-getimage.cache"
+ } elsif (-d "$ENV{HOME}/tmp") { # If ~/tmp/ exists, use it.
+ $cache_file_name = "$ENV{HOME}/tmp/.xscreensaver-getimage.cache";
+ } else {
+ $cache_file_name = "$ENV{HOME}/.xscreensaver-getimage.cache";
+ }
+
+ print STDERR "$progname: awaiting lock: $cache_file_name\n"
+ if ($verbose > 1);
+
+ my $file = $cache_file_name;
+ open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!");
+ flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!");
+ seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!");
+
+ my $mtime = (stat($cache_fd))[9];
+
+ if ($mtime + $cache_max_age < time) {
+ print STDERR "$progname: cache is too old\n" if ($verbose);
+ return ();
+ }
+
+ my $odir = <$cache_fd>;
+ $odir =~ s/[\r\n]+$//s if defined ($odir);
+ if (!defined ($odir) || ($dir ne $odir)) {
+ print STDERR "$progname: cache is for $odir, not $dir\n"
+ if ($verbose && $odir);
+ return ();
+ }
+
+ my @files = ();
+ while (<$cache_fd>) {
+ s/[\r\n]+$//s;
+ push @files, "$odir/$_";
+ }
+
+ print STDERR "$progname: " . ($#files+1) . " files in cache\n"
+ if ($verbose);
+
+ $read_cache_p = 1;
+ return @files;
+}
+
+
+sub write_cache($) {
+ my ($dir) = @_;
+
+ return unless ($cache_p);
+
+ # If we read the cache, just close it without rewriting it.
+ # If we didn't read it, then write it now.
+
+ if (! $read_cache_p) {
+
+ truncate ($cache_fd, 0) ||
+ error ("unable to truncate $cache_file_name: $!");
+ seek ($cache_fd, 0, 0) ||
+ error ("unable to rewind $cache_file_name: $!");
+
+ if ($#all_files >= 0) {
+ print $cache_fd "$dir\n";
+ foreach (@all_files) {
+ my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
+ $f =~ s@^\Q$dir/@@so || die; # remove $dir from front
+ print $cache_fd "$f\n";
+ }
+ }
+
+ print STDERR "$progname: cached " . ($#all_files+1) . " files\n"
+ if ($verbose);
+ }
+
+ flock ($cache_fd, LOCK_UN) ||
+ error ("unable to unlock $cache_file_name: $!");
+ close ($cache_fd);
+ $cache_fd = undef;
+}
+
+
+sub html_unquote($) {
+ my ($h) = @_;
+
+ # This only needs to handle entities that occur in RSS, not full HTML.
+ my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>',
+ 'quot' => '"', 'apos' => "'" );
+ $h =~ s/(&(\#)?([[:alpha:]\d]+);?)/
+ {
+ my ($o, $c) = ($1, $3);
+ if (! defined($2)) {
+ $c = $ent{$c}; # for <
+ } else {
+ if ($c =~ m@^x([\dA-F]+)$@si) { # for A
+ $c = chr(hex($1));
+ } elsif ($c =~ m@^\d+$@si) { # for A
+ $c = chr($c);
+ } else {
+ $c = undef;
+ }
+ }
+ ($c || $o);
+ }
+ /gexi;
+ return $h;
+}
+
+
+
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
+sub set_proxy($) {
+ my ($ua) = @_;
+
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ foreach my $proto ('http', 'https') {
+ my ($server) = ($proxy_data =~ m/\b${proto}Proxy\s*:\s*([^\s]+)/si);
+ my ($port) = ($proxy_data =~ m/\b${proto}Port\s*:\s*([^\s]+)/si);
+ my ($enable) = ($proxy_data =~ m/\b${proto}Enable\s*:\s*([^\s]+)/si);
+
+ if ($server && $enable) {
+ # Note: this ignores the "ExceptionsList".
+ my $proto2 = 'http';
+ $ENV{"${proto}_proxy"} = ("${proto2}://" . $server .
+ ($port ? ":$port" : "") . "/");
+ print STDERR "$progname: MacOS $proto proxy: " .
+ $ENV{"${proto}_proxy"} . "\n"
+ if ($verbose > 2);
+ }
+ }
+
+ $ua->env_proxy();
+}
+
+
+sub init_lwp() {
+ if (! defined ($LWP::Simple::ua)) {
+ error ("\n\n\tPerl is broken. Do this to repair it:\n" .
+ "\n\tsudo cpan LWP::Simple LWP::Protocol::https Mozilla::CA\n");
+ }
+ set_proxy ($LWP::Simple::ua);
+}
+
+
+sub sanity_check_lwp() {
+ my $url1 = 'https://www.mozilla.org/';
+ my $url2 = 'http://www.mozilla.org/';
+ my $body = (LWP::Simple::get($url1) || '');
+ if (length($body) < 10240) {
+ my $err = "";
+ $body = (LWP::Simple::get($url2) || '');
+ if (length($body) < 10240) {
+ $err = "Perl is broken: neither HTTP nor HTTPS URLs work.";
+ } else {
+ $err = "Perl is broken: HTTP URLs work but HTTPS URLs don't.";
+ }
+ $err .= "\nMaybe try: sudo cpan -f Mozilla::CA LWP::Protocol::https";
+ $err =~ s/^/\t/gm;
+ error ("\n\n$err\n");
+ }
+}
+
+
+# If the URL does not already end with an extension appropriate for the
+# content-type, add it after a "#" search.
+#
+# This is for when we know the content type of the URL, but the URL is
+# some crazy thing without an extension. The files on disk need to have
+# proper extensions.
+#
+sub force_extension($$) {
+ my ($url, $ct) = @_;
+ return $url unless (defined($url) && defined($ct));
+ my ($ext) = ($ct =~ m@^image/([-a-z\d]+)@si);
+ return $url unless $ext;
+ $ext = lc($ext);
+ $ext = 'jpg' if ($ext eq 'jpeg');
+ return $url if ($url =~ m/\.$ext$/si);
+ return "$url#.$ext";
+}
+
+
+# Returns a list of the image enclosures in the RSS or Atom feed.
+# Elements of the list are references, [ "url", "guid" ].
+#
+sub parse_feed($);
+sub parse_feed($) {
+ my ($url) = @_;
+
+ init_lwp();
+ $LWP::Simple::ua->agent ("$progname/$version");
+ $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes
+
+
+ # Half the time, random Linux systems don't have Mozilla::CA installed,
+ # which results in "Can't verify SSL peers without knowning which
+ # Certificate Authorities to trust".
+ #
+ # In xscreensaver-text we just disabled certificate checks. However,
+ # malicious images really do exist, so for xscreensaver-getimage-file,
+ # let's actually require that SSL be installed properly.
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
+ my $body = (LWP::Simple::get($url) || '');
+
+ if ($body !~ m@^\s*<(\?xml|rss)\b@si) {
+ # Not an RSS/Atom feed. Try RSS autodiscovery.
+
+ # (Great news, everybody: Flickr no longer provides RSS for "Sets",
+ # only for "Photostreams", and only the first 20 images of those.
+ # Thanks, assholes.)
+
+ if ($body =~ m/^\s*$/s) {
+ sanity_check_lwp();
+ error ("null response: $url");
+ }
+
+ error ("not an RSS or Atom feed, or HTML: $url")
+ unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si);
+
+ # Find the first <link> with RSS or Atom in it, and use that instead.
+
+ $body =~ s@<LINK\s+([^<>]*)>@{
+ my $p = $1;
+ if ($p =~ m! \b REL \s* = \s* ['"]? alternate \b!six &&
+ $p =~ m! \b TYPE \s* = \s* ['"]? application/(atom|rss) !six &&
+ $p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six
+ ) {
+ my $u2 = html_unquote ($1);
+ if ($u2 =~ m!^/!s) {
+ my ($h) = ($url =~ m!^([a-z]+://[^/]+)!si);
+ $u2 = "$h$u2";
+ }
+ print STDERR "$progname: found feed: $u2\n"
+ if ($verbose);
+ return parse_feed ($u2);
+ }
+ '';
+ }@gsexi;
+
+ error ("no RSS or Atom feed for HTML page: $url");
+ }
+
+
+ $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+ my @items = split(/\001/, $body);
+ shift @items;
+
+ my @imgs = ();
+ my %ids;
+
+ foreach my $item (@items) {
+ my $iurl = undef;
+ my $id = undef;
+
+ # First look for <link rel="enclosure" href="...">
+ #
+ if (! $iurl) {
+ foreach my $link ($item =~ m@<LINK[^<>]*>@gsi) {
+ last if $iurl;
+ my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $href = undef unless (lc($rel || '') eq 'enclosure');
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
+ $iurl = html_unquote($href) if $href;
+ $iurl = force_extension ($iurl, $type);
+ }
+ }
+
+ # Then look for <media:content url="...">
+ #
+ if (! $iurl) {
+ foreach my $link ($item =~ m@<MEDIA:CONTENT[^<>]*>@gsi) {
+ last if $iurl;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($med) = ($link =~ m/\bMEDIUM\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $type = 'image/jpeg' if (!$type && lc($med || '') eq 'image');
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
+ $iurl = html_unquote($href) if $href;
+ $iurl = force_extension ($iurl, $type);
+ }
+ }
+
+ # Then look for <enclosure url="..."/>
+ #
+ if (! $iurl) {
+ foreach my $link ($item =~ m@<ENCLOSURE[^<>]*>@gsi) {
+ last if $iurl;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
+ $iurl = html_unquote($href) if ($href);
+ $iurl = force_extension ($iurl, $type);
+ }
+ }
+
+ # Ok, maybe there's an image in the <url> field?
+ #
+ if (! $iurl) {
+ foreach my $link ($item =~ m@<URL\b[^<>]*>([^<>]*)@gsi) {
+ last if $iurl;
+ my $u2 = $1;
+ $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io);
+ if (! $iurl) {
+ my $u3 = $u2;
+ $u3 =~ s/#.*$//gs;
+ $u3 =~ s/[?&].*$//gs;
+ $iurl = html_unquote($u2) if ($u3 =~ m/$good_file_re/io);
+ }
+ }
+ }
+
+ # Then look for <content:encoded> or <description>... with an
+ # <img src="..."> inside. If more than one image, take the first.
+ #
+ foreach my $t ('content:encoded', 'description') {
+ last if $iurl;
+ foreach my $link ($item =~ m@<$t[^<>]*>(.*?)</$t>@gsi) {
+ last if $iurl;
+ my $desc = $1;
+ if ($desc =~ m@<!\[CDATA\[\s*(.*?)\s*\]\]>@gs) {
+ $desc = $1;
+ } else {
+ $desc = html_unquote($desc);
+ }
+ my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+ $iurl = html_unquote($href) if ($href);
+ # If IMG SRC has a bogus extension, pretend it's a JPEG.
+ $iurl = force_extension ($iurl, 'image/jpeg')
+ if ($iurl && $iurl !~ m/$good_file_re/io);
+ }
+ }
+
+ # Find a unique ID for this image, to defeat image farms.
+ # First look for <id>...</id>
+ ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+
+ # Then look for <guid isPermaLink=...> ... </guid>
+ ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+
+ # Then look for <link> ... </link>
+ ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+
+ # If we only have a GUID or LINK, but it's an image, use that.
+ $iurl = $id if (!$iurl && $id && $id =~ m/$good_file_re/io);
+
+ if ($iurl) {
+ $id = $iurl unless $id;
+ my $o = $ids{$id};
+ if (! $o) {
+ $ids{$id} = $iurl;
+ my @P = ($iurl, $id);
+ push @imgs, \@P;
+ } elsif ($iurl ne $o) {
+ print STDERR "$progname: WARNING: dup ID \"$id\"" .
+ " for \"$o\" and \"$iurl\"\n";
+ }
+ }
+ }
+
+ return @imgs;
+}
+
+
+# Like md5_base64 but uses filename-safe characters.
+#
+sub md5_file($) {
+ my ($s) = @_;
+ $s = md5_base64($s);
+ $s =~ s@[/]@_@gs;
+ $s =~ s@[+]@-@gs;
+ return $s;
+}
+
+
+# expands the first URL relative to the second.
+#
+sub expand_url($$) {
+ my ($url, $base) = @_;
+
+ $url =~ s/^\s+//gs; # lose whitespace at front and back
+ $url =~ s/\s+$//gs;
+
+ if (! ($url =~ m/^[a-z]+:/)) {
+
+ $base =~ s@(\#.*)$@@; # strip anchors
+ $base =~ s@(\?.*)$@@; # strip arguments
+ $base =~ s@/[^/]*$@/@; # take off trailing file component
+
+ my $tail = '';
+ if ($url =~ s@(\#.*)$@@) { $tail = $1; } # save anchors
+ if ($url =~ s@(\?.*)$@@) { $tail = "$1$tail"; } # save arguments
+
+ my $base2 = $base;
+
+ $base2 =~ s@^([a-z]+:/+[^/]+)/.*@$1@ # if url is an absolute path
+ if ($url =~ m@^/@);
+
+ my $ourl = $url;
+
+ $url = $base2 . $url;
+ $url =~ s@/\./@/@g; # expand "."
+ 1 while ($url =~ s@/[^/]+/\.\./@/@s); # expand ".."
+
+ $url .= $tail; # put anchors/args back
+
+ print STDERR "$progname: relative URL: $ourl --> $url\n"
+ if ($verbose > 1);
+
+ } else {
+ print STDERR "$progname: absolute URL: $url\n"
+ if ($verbose > 2);
+ }
+
+ return $url;
+}
+
+
+# Given the URL of an image, download it into the given directory
+# and return the file name.
+#
+sub download_image($$$) {
+ my ($url, $uid, $dir) = @_;
+
+ my $url2 = $url;
+ $url2 =~ s/\#.*$//s; # Omit search terms after file extension
+ $url2 =~ s/\?.*$//s;
+ my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si) unless $ext;